Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Here is a snippet: <|code_start|> same data. It doesn't have sense to make multiple instances
"""
_instance = None
_instance_lock = threading.Lock()
@staticmethod
def get_instance():
with XklWrapper._instance_lock:
if not XklWrapper._instance:
XklWrapper._instance = XklWrapper()
return XklWrapper._instance
def __init__(self):
#initialize Xkl-related stuff
display = GdkX11.x11_get_default_xdisplay()
self._engine = Xkl.Engine.get_instance(display)
self._rec = Xkl.ConfigRec()
if not self._rec.get_from_server(self._engine):
raise XklWrapperError("Failed to get configuration from server")
#X is probably initialized to the 'us' layout without any variant and
#since we want to add layouts with variants we need the layouts and
#variants lists to have the same length. Add "" padding to variants.
#See docstring of the add_layout method for details.
diff = len(self._rec.layouts) - len(self._rec.variants)
<|code_end|>
. Write the next line using the current file imports:
import types
import os
import re
import shutil
import ctypes
import gettext
import threading
import logging
from collections import namedtuple
from pyanaconda import iutil
from pyanaconda import flags
from pyanaconda.safe_dbus import dbus_call_safe_sync, dbus_get_property_safe_sync
from pyanaconda.safe_dbus import DBUS_SYSTEM_BUS_ADDR, DBusPropertyError
from pyanaconda.constants import DEFAULT_VC_FONT, DEFAULT_KEYBOARD, THREAD_XKL_WRAPPER_INIT
from pyanaconda.threads import threadMgr, AnacondaThread
from gi.repository import Xkl, Gio, GLib
from gi.repository import GdkX11
and context from other files:
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
#
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DEFAULT_VC_FONT = "latarcyrheb-sun16"
#
# DEFAULT_KEYBOARD = "us"
#
# THREAD_XKL_WRAPPER_INIT = "AnaXklWrapperInitThread"
#
# Path: pyanaconda/threads.py
# class ThreadManager(object):
# class AnacondaThread(threading.Thread):
# def __init__(self):
# def __call__(self):
# def add(self, obj):
# def remove(self, name):
# def exists(self, name):
# def get(self, name):
# def wait(self, name):
# def wait_all(self):
# def set_error(self, name, *exc_info):
# def get_error(self, name):
# def any_errors(self):
# def raise_error(self, name):
# def in_main_thread(self):
# def running(self):
# def names(self):
# def __init__(self, *args, **kwargs):
# def run(self, *args, **kwargs):
# def initThreading():
, which may include functions, classes, or code. Output only the next line. | if diff > 0 and flags.can_touch_runtime_system("activate layouts"): |
Continue the code snippet: <|code_start|> shutil.copy2(xconf_file_path,
os.path.normpath(root + "/" + xconf_file_path))
except IOError:
# The file may not exist (eg. text install) so don't raise
pass
# restore the original values
localed_wrapper.set_layouts(layouts_variants,
options)
else:
try:
# just let systemd-localed write out the conf file
localed_wrapper.set_layouts(keyboard.x_layouts,
keyboard.switch_options)
except InvalidLayoutVariantSpec as ilvs:
# some weird value appeared as a requested X layout
log.error("Failed to write out config file: %s", ilvs)
# try default
keyboard.x_layouts = [DEFAULT_KEYBOARD]
localed_wrapper.set_layouts(keyboard.x_layouts,
keyboard.switch_options)
if keyboard.vc_keymap:
try:
with open(os.path.join(vcconf_dir, vcconf_file), "w") as fobj:
fobj.write('KEYMAP="%s"\n' % keyboard.vc_keymap)
# systemd now defaults to a font that cannot display non-ascii
# characters, so we have to tell it to use a better one
<|code_end|>
. Use current file imports:
import types
import os
import re
import shutil
import ctypes
import gettext
import threading
import logging
from collections import namedtuple
from pyanaconda import iutil
from pyanaconda import flags
from pyanaconda.safe_dbus import dbus_call_safe_sync, dbus_get_property_safe_sync
from pyanaconda.safe_dbus import DBUS_SYSTEM_BUS_ADDR, DBusPropertyError
from pyanaconda.constants import DEFAULT_VC_FONT, DEFAULT_KEYBOARD, THREAD_XKL_WRAPPER_INIT
from pyanaconda.threads import threadMgr, AnacondaThread
from gi.repository import Xkl, Gio, GLib
from gi.repository import GdkX11
and context (classes, functions, or code) from other files:
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
#
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DEFAULT_VC_FONT = "latarcyrheb-sun16"
#
# DEFAULT_KEYBOARD = "us"
#
# THREAD_XKL_WRAPPER_INIT = "AnaXklWrapperInitThread"
#
# Path: pyanaconda/threads.py
# class ThreadManager(object):
# class AnacondaThread(threading.Thread):
# def __init__(self):
# def __call__(self):
# def add(self, obj):
# def remove(self, name):
# def exists(self, name):
# def get(self, name):
# def wait(self, name):
# def wait_all(self):
# def set_error(self, name, *exc_info):
# def get_error(self, name):
# def any_errors(self):
# def raise_error(self, name):
# def in_main_thread(self):
# def running(self):
# def names(self):
# def __init__(self, *args, **kwargs):
# def run(self, *args, **kwargs):
# def initThreading():
. Output only the next line. | fobj.write('FONT="%s"\n' % DEFAULT_VC_FONT) |
Given the code snippet: <|code_start|> """
Normalize keyboard layout and variant specification given as a single
string. E.g. for a 'layout(variant) string missing the space between the
left parenthesis return 'layout (variant)' which is a proper layout and
variant specification we use.
:param layout_str: a string specifying keyboard layout and its variant
:type layout_str: string
"""
layout, variant = parse_layout_variant(layout_str)
return _join_layout_variant(layout, variant)
def populate_missing_items(keyboard):
"""
Function that populates keyboard.vc_keymap and keyboard.x_layouts if
they are missing. By invoking LocaledWrapper's methods this function
MODIFIES CONFIGURATION FILES.
:type keyboard: ksdata.keyboard object
"""
localed = LocaledWrapper()
if keyboard.x_layouts and not keyboard.vc_keymap:
keyboard.vc_keymap = localed.set_and_convert_layout(keyboard.x_layouts[0])
if not keyboard.vc_keymap:
<|code_end|>
, generate the next line using the imports in this file:
import types
import os
import re
import shutil
import ctypes
import gettext
import threading
import logging
from collections import namedtuple
from pyanaconda import iutil
from pyanaconda import flags
from pyanaconda.safe_dbus import dbus_call_safe_sync, dbus_get_property_safe_sync
from pyanaconda.safe_dbus import DBUS_SYSTEM_BUS_ADDR, DBusPropertyError
from pyanaconda.constants import DEFAULT_VC_FONT, DEFAULT_KEYBOARD, THREAD_XKL_WRAPPER_INIT
from pyanaconda.threads import threadMgr, AnacondaThread
from gi.repository import Xkl, Gio, GLib
from gi.repository import GdkX11
and context (functions, classes, or occasionally code) from other files:
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
#
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DEFAULT_VC_FONT = "latarcyrheb-sun16"
#
# DEFAULT_KEYBOARD = "us"
#
# THREAD_XKL_WRAPPER_INIT = "AnaXklWrapperInitThread"
#
# Path: pyanaconda/threads.py
# class ThreadManager(object):
# class AnacondaThread(threading.Thread):
# def __init__(self):
# def __call__(self):
# def add(self, obj):
# def remove(self, name):
# def exists(self, name):
# def get(self, name):
# def wait(self, name):
# def wait_all(self):
# def set_error(self, name, *exc_info):
# def get_error(self, name):
# def any_errors(self):
# def raise_error(self, name):
# def in_main_thread(self):
# def running(self):
# def names(self):
# def __init__(self, *args, **kwargs):
# def run(self, *args, **kwargs):
# def initThreading():
. Output only the next line. | keyboard.vc_keymap = DEFAULT_KEYBOARD |
Given the code snippet: <|code_start|> else:
# activate VConsole keymap and get converted layout and variant
c_lay_var = localed.set_and_convert_keymap(keyboard.vc_keymap)
if not keyboard.x_layouts:
if c_lay_var:
# suggested by systemd-localed for a requested VConsole keymap
keyboard.x_layouts.append(c_lay_var)
if keyboard.vc_keymap:
# nothing suggested by systemd-localed, but we may try to use the
# same string for both VConsole keymap and X layout (will fail
# safely if it doesn't work)
keyboard.x_layouts.append(keyboard.vc_keymap)
if keyboard.x_layouts:
c_keymap = localed.set_and_convert_layout(keyboard.x_layouts[0])
if not keyboard.vc_keymap:
keyboard.vc_keymap = c_keymap
# write out keyboard configuration for the X session
write_keyboard_config(keyboard, root="/", convert=False)
def background_XklWrapper_initialize():
"""
Create the XklWrapper singleton instance in a separate thread to save time
when it's really needed.
"""
<|code_end|>
, generate the next line using the imports in this file:
import types
import os
import re
import shutil
import ctypes
import gettext
import threading
import logging
from collections import namedtuple
from pyanaconda import iutil
from pyanaconda import flags
from pyanaconda.safe_dbus import dbus_call_safe_sync, dbus_get_property_safe_sync
from pyanaconda.safe_dbus import DBUS_SYSTEM_BUS_ADDR, DBusPropertyError
from pyanaconda.constants import DEFAULT_VC_FONT, DEFAULT_KEYBOARD, THREAD_XKL_WRAPPER_INIT
from pyanaconda.threads import threadMgr, AnacondaThread
from gi.repository import Xkl, Gio, GLib
from gi.repository import GdkX11
and context (functions, classes, or occasionally code) from other files:
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
#
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DEFAULT_VC_FONT = "latarcyrheb-sun16"
#
# DEFAULT_KEYBOARD = "us"
#
# THREAD_XKL_WRAPPER_INIT = "AnaXklWrapperInitThread"
#
# Path: pyanaconda/threads.py
# class ThreadManager(object):
# class AnacondaThread(threading.Thread):
# def __init__(self):
# def __call__(self):
# def add(self, obj):
# def remove(self, name):
# def exists(self, name):
# def get(self, name):
# def wait(self, name):
# def wait_all(self):
# def set_error(self, name, *exc_info):
# def get_error(self, name):
# def any_errors(self):
# def raise_error(self, name):
# def in_main_thread(self):
# def running(self):
# def names(self):
# def __init__(self, *args, **kwargs):
# def run(self, *args, **kwargs):
# def initThreading():
. Output only the next line. | threadMgr.add(AnacondaThread(name=THREAD_XKL_WRAPPER_INIT, target=XklWrapper.get_instance)) |
Given the code snippet: <|code_start|> else:
# activate VConsole keymap and get converted layout and variant
c_lay_var = localed.set_and_convert_keymap(keyboard.vc_keymap)
if not keyboard.x_layouts:
if c_lay_var:
# suggested by systemd-localed for a requested VConsole keymap
keyboard.x_layouts.append(c_lay_var)
if keyboard.vc_keymap:
# nothing suggested by systemd-localed, but we may try to use the
# same string for both VConsole keymap and X layout (will fail
# safely if it doesn't work)
keyboard.x_layouts.append(keyboard.vc_keymap)
if keyboard.x_layouts:
c_keymap = localed.set_and_convert_layout(keyboard.x_layouts[0])
if not keyboard.vc_keymap:
keyboard.vc_keymap = c_keymap
# write out keyboard configuration for the X session
write_keyboard_config(keyboard, root="/", convert=False)
def background_XklWrapper_initialize():
"""
Create the XklWrapper singleton instance in a separate thread to save time
when it's really needed.
"""
<|code_end|>
, generate the next line using the imports in this file:
import types
import os
import re
import shutil
import ctypes
import gettext
import threading
import logging
from collections import namedtuple
from pyanaconda import iutil
from pyanaconda import flags
from pyanaconda.safe_dbus import dbus_call_safe_sync, dbus_get_property_safe_sync
from pyanaconda.safe_dbus import DBUS_SYSTEM_BUS_ADDR, DBusPropertyError
from pyanaconda.constants import DEFAULT_VC_FONT, DEFAULT_KEYBOARD, THREAD_XKL_WRAPPER_INIT
from pyanaconda.threads import threadMgr, AnacondaThread
from gi.repository import Xkl, Gio, GLib
from gi.repository import GdkX11
and context (functions, classes, or occasionally code) from other files:
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
#
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DEFAULT_VC_FONT = "latarcyrheb-sun16"
#
# DEFAULT_KEYBOARD = "us"
#
# THREAD_XKL_WRAPPER_INIT = "AnaXklWrapperInitThread"
#
# Path: pyanaconda/threads.py
# class ThreadManager(object):
# class AnacondaThread(threading.Thread):
# def __init__(self):
# def __call__(self):
# def add(self, obj):
# def remove(self, name):
# def exists(self, name):
# def get(self, name):
# def wait(self, name):
# def wait_all(self):
# def set_error(self, name, *exc_info):
# def get_error(self, name):
# def any_errors(self):
# def raise_error(self, name):
# def in_main_thread(self):
# def running(self):
# def names(self):
# def __init__(self, *args, **kwargs):
# def run(self, *args, **kwargs):
# def initThreading():
. Output only the next line. | threadMgr.add(AnacondaThread(name=THREAD_XKL_WRAPPER_INIT, target=XklWrapper.get_instance)) |
Using the snippet: <|code_start|> else:
# activate VConsole keymap and get converted layout and variant
c_lay_var = localed.set_and_convert_keymap(keyboard.vc_keymap)
if not keyboard.x_layouts:
if c_lay_var:
# suggested by systemd-localed for a requested VConsole keymap
keyboard.x_layouts.append(c_lay_var)
if keyboard.vc_keymap:
# nothing suggested by systemd-localed, but we may try to use the
# same string for both VConsole keymap and X layout (will fail
# safely if it doesn't work)
keyboard.x_layouts.append(keyboard.vc_keymap)
if keyboard.x_layouts:
c_keymap = localed.set_and_convert_layout(keyboard.x_layouts[0])
if not keyboard.vc_keymap:
keyboard.vc_keymap = c_keymap
# write out keyboard configuration for the X session
write_keyboard_config(keyboard, root="/", convert=False)
def background_XklWrapper_initialize():
"""
Create the XklWrapper singleton instance in a separate thread to save time
when it's really needed.
"""
<|code_end|>
, determine the next line of code. You have imports:
import types
import os
import re
import shutil
import ctypes
import gettext
import threading
import logging
from collections import namedtuple
from pyanaconda import iutil
from pyanaconda import flags
from pyanaconda.safe_dbus import dbus_call_safe_sync, dbus_get_property_safe_sync
from pyanaconda.safe_dbus import DBUS_SYSTEM_BUS_ADDR, DBusPropertyError
from pyanaconda.constants import DEFAULT_VC_FONT, DEFAULT_KEYBOARD, THREAD_XKL_WRAPPER_INIT
from pyanaconda.threads import threadMgr, AnacondaThread
from gi.repository import Xkl, Gio, GLib
from gi.repository import GdkX11
and context (class names, function names, or code) available:
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
#
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DEFAULT_VC_FONT = "latarcyrheb-sun16"
#
# DEFAULT_KEYBOARD = "us"
#
# THREAD_XKL_WRAPPER_INIT = "AnaXklWrapperInitThread"
#
# Path: pyanaconda/threads.py
# class ThreadManager(object):
# class AnacondaThread(threading.Thread):
# def __init__(self):
# def __call__(self):
# def add(self, obj):
# def remove(self, name):
# def exists(self, name):
# def get(self, name):
# def wait(self, name):
# def wait_all(self):
# def set_error(self, name, *exc_info):
# def get_error(self, name):
# def any_errors(self):
# def raise_error(self, name):
# def in_main_thread(self):
# def running(self):
# def names(self):
# def __init__(self, *args, **kwargs):
# def run(self, *args, **kwargs):
# def initThreading():
. Output only the next line. | threadMgr.add(AnacondaThread(name=THREAD_XKL_WRAPPER_INIT, target=XklWrapper.get_instance)) |
Next line prediction: <|code_start|> # Add a handler for the duped stuff. No fancy formatting, thanks.
self.addFileHandler(sys.stdout, stdoutLogger,
fmtStr=STDOUT_FORMAT, minLevel=logging.INFO)
# Stderr logger
stderrLogger = logging.getLogger("anaconda.stderr")
stderrLogger.setLevel(logging.INFO)
self.addFileHandler(sys.stderr, stderrLogger,
fmtStr=STDOUT_FORMAT, minLevel=logging.INFO)
# Add a simple handler - file or stream, depending on what we're given.
def addFileHandler (self, dest, addToLogger, minLevel=DEFAULT_TTY_LEVEL,
fmtStr=ENTRY_FORMAT,
autoLevel=False):
try:
if isinstance(dest, types.StringTypes):
logfileHandler = logging.FileHandler(dest)
else:
logfileHandler = logging.StreamHandler(dest)
logfileHandler.setLevel(minLevel)
logfileHandler.setFormatter(logging.Formatter(fmtStr, DATE_FORMAT))
autoSetLevel(logfileHandler, autoLevel)
addToLogger.addHandler(logfileHandler)
except IOError:
pass
def forwardToSyslog(self, logr):
"""Forward everything that goes in the logger to the syslog daemon.
"""
<|code_end|>
. Use current file imports:
(import logging
import os
import sys
import types
import warnings
from logging.handlers import SysLogHandler, SYSLOG_UDP_PORT
from pyanaconda.flags import flags
from threading import Lock)
and context including class names, function names, or small code snippets from other files:
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
. Output only the next line. | if flags.imageInstall: |
Predict the next line after this snippet: <|code_start|># GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Matt Wilson <msw@redhat.com>
# Chris Lumens <clumens@redhat.com>
#
log = logging.getLogger("anaconda")
class Desktop(object):
def __init__(self):
self._runlevel = 3
self.desktop = None
@property
def runlevel(self):
return self._runlevel
@runlevel.setter
def runlevel(self, runlevel):
if int(runlevel) not in RUNLEVELS:
raise RuntimeError("Desktop::setDefaultRunLevel() - Must specify runlevel as one of %s" % RUNLEVELS.keys())
self._runlevel = runlevel
def write(self):
if self.desktop:
<|code_end|>
using the current file's imports:
import os
import logging
from pyanaconda.constants import ROOT_PATH, RUNLEVELS
from pyanaconda import iutil
and any relevant context from other files:
# Path: pyanaconda/constants.py
# ROOT_PATH = "/mnt/sysimage"
#
# RUNLEVELS = {3: 'multi-user.target', 5: 'graphical.target'}
#
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
. Output only the next line. | with open(ROOT_PATH + "/etc/sysconfig/desktop", "w") as f: |
Given the following code snippet before the placeholder: <|code_start|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Matt Wilson <msw@redhat.com>
# Chris Lumens <clumens@redhat.com>
#
log = logging.getLogger("anaconda")
class Desktop(object):
def __init__(self):
self._runlevel = 3
self.desktop = None
@property
def runlevel(self):
return self._runlevel
@runlevel.setter
def runlevel(self, runlevel):
<|code_end|>
, predict the next line using imports from the current file:
import os
import logging
from pyanaconda.constants import ROOT_PATH, RUNLEVELS
from pyanaconda import iutil
and context including class names, function names, and sometimes code from other files:
# Path: pyanaconda/constants.py
# ROOT_PATH = "/mnt/sysimage"
#
# RUNLEVELS = {3: 'multi-user.target', 5: 'graphical.target'}
#
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
. Output only the next line. | if int(runlevel) not in RUNLEVELS: |
Based on the snippet: <|code_start|>
class Desktop(object):
def __init__(self):
self._runlevel = 3
self.desktop = None
@property
def runlevel(self):
return self._runlevel
@runlevel.setter
def runlevel(self, runlevel):
if int(runlevel) not in RUNLEVELS:
raise RuntimeError("Desktop::setDefaultRunLevel() - Must specify runlevel as one of %s" % RUNLEVELS.keys())
self._runlevel = runlevel
def write(self):
if self.desktop:
with open(ROOT_PATH + "/etc/sysconfig/desktop", "w") as f:
f.write("DESKTOP=%s\n" % self.desktop)
if not os.path.isdir(ROOT_PATH + '/etc/systemd/system'):
log.warning("there is no /etc/systemd/system directory, cannot update default.target!")
return
default_target = ROOT_PATH + '/etc/systemd/system/default.target'
if os.path.islink(default_target):
os.unlink(default_target)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import logging
from pyanaconda.constants import ROOT_PATH, RUNLEVELS
from pyanaconda import iutil
and context (classes, functions, sometimes code) from other files:
# Path: pyanaconda/constants.py
# ROOT_PATH = "/mnt/sysimage"
#
# RUNLEVELS = {3: 'multi-user.target', 5: 'graphical.target'}
#
# Path: pyanaconda/iutil.py
# def augmentEnv():
# def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
# def chroot():
# def execWithRedirect(command, argv, stdin=None, stdout=None,
# root='/', env_prune=None):
# def execWithCapture(command, argv, stdin=None, root='/'):
# def execReadlines(command, argv, stdin=None, root='/', env_prune=None):
# def queue_lines(out, queue):
# def chroot():
# def execConsole():
# def getDirSize(directory):
# def getSubdirSize(directory):
# def mkdirChain(directory):
# def get_active_console(dev="console"):
# def isConsoleOnVirtualTerminal(dev="console"):
# def strip_markup(text):
# def reIPL(ipldev):
# def resetRpmDb():
# def parseNfsUrl(nfsurl):
# def add_po_path(module, directory):
# def setup_translations(module):
# def fork_orphan():
# def _run_systemctl(command, service):
# def start_service(service):
# def stop_service(service):
# def restart_service(service):
# def service_running(service):
# def dracut_eject(device):
# def vtActivate(num):
# def __init__(self, url=None, protocol="http://", host=None, port="3128",
# username=None, password=None):
# def parse_url(self):
# def parse_components(self):
# def dict(self):
# def __str__(self):
# def getdeepattr(obj, name):
# def setdeepattr(obj, name, value):
# def strip_accents(s):
# def cmp_obj_attrs(obj1, obj2, attr_list):
# def dir_tree_map(root, func, files=True, dirs=True):
# def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
# def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
# def is_unsupported_hw():
# def _toASCII(s):
# def upperASCII(s):
# def lowerASCII(s):
# def upcase_first_letter(text):
# class ProxyStringError(Exception):
# class ProxyString(object):
. Output only the next line. | sd_prefix = iutil.execWithCapture( |
Here is a snippet: <|code_start|> locales configuration.
"""
def __init__(self):
# the class inheriting from this class is responsible for populating
# these items with actual objects
self._languageStore = None
self._languageStoreFilter = None
self._languageEntry = None
self._langSelection = None
self._langSelectedRenderer = None
self._langSelectedColumn = None
self._langView = None
self._localeView = None
self._localeStore = None
self._localeSelection = None
self._arrow = None
def initialize(self):
# Render an arrow for the chosen language
if get_default_widget_direction() == Gtk.TextDirection.LTR:
self._arrow = Gtk.Image.new_from_file("/usr/share/anaconda/pixmaps/right-arrow-icon.png")
else:
self._arrow = Gtk.Image.new_from_file("/usr/share/anaconda/pixmaps/left-arrow-icon.png")
self._langSelectedColumn.set_cell_data_func(self._langSelectedRenderer,
self._render_lang_selected)
# fill the list with available translations
<|code_end|>
. Write the next line using the current file imports:
from gi.repository import Gtk, Pango
from pyanaconda import localization
from pyanaconda.iutil import strip_accents
from pyanaconda.ui.gui.utils import set_treeview_selection, get_default_widget_direction
and context from other files:
# Path: pyanaconda/localization.py
# LOCALE_CONF_FILE_PATH = "/etc/locale.conf"
# LANGCODE_RE = re.compile(r'(?P<language>[A-Za-z]+)'
# r'(_(?P<territory>[A-Za-z]+))?'
# r'(\.(?P<encoding>[-A-Za-z0-9]+))?'
# r'(@(?P<script>[-A-Za-z0-9]+))?')
# class LocalizationConfigError(Exception):
# class InvalidLocaleSpec(LocalizationConfigError):
# def parse_langcode(langcode):
# def is_supported_locale(locale):
# def langcode_matches_locale(langcode, locale):
# def find_best_locale_match(locale, langcodes):
# def get_match_score(locale, langcode):
# def setup_locale(locale, lang=None):
# def get_english_name(locale):
# def get_native_name(locale):
# def get_available_translations(localedir=None):
# def get_language_locales(lang):
# def get_territory_locales(territory):
# def get_locale_keyboards(locale):
# def get_locale_timezones(locale):
# def get_locale_territory(locale):
# def write_language_configuration(lang, root):
# def load_firmware_language(lang):
#
# Path: pyanaconda/iutil.py
# def strip_accents(s):
# """This function takes arbitrary unicode string
# and returns it with all the diacritics removed.
#
# :param s: arbitrary string
# :type s: unicode
#
# :return: s with diacritics removed
# :rtype: unicode
#
# """
# return ''.join((c for c in unicodedata.normalize('NFD', s)
# if unicodedata.category(c) != 'Mn'))
#
# Path: pyanaconda/ui/gui/utils.py
# def set_treeview_selection(treeview, item, col=0):
# """
# Select the given item in the given treeview and scroll to it.
#
# :param treeview: treeview to select and item in
# :type treeview: GtkTreeView
# :param item: item to be selected
# :type item: str
# :param col: column to search for the item in
# :type col: int
# :return: selected iterator or None if item was not found
# :rtype: GtkTreeIter or None
#
# """
#
# model = treeview.get_model()
# itr = model.get_iter_first()
# while itr and not model[itr][col] == item:
# itr = model.iter_next(itr)
#
# if not itr:
# # item not found, cannot be selected
# return None
#
# # otherwise select the item and scroll to it
# selection = treeview.get_selection()
# selection.select_iter(itr)
# path = model.get_path(itr)
#
# # row_align=0.5 tells GTK to move the cell to the middle of the
# # treeview viewport (0.0 should align it with the top, 1.0 with bottom)
# # If the cell is the uppermost one, it should align it with the top
# # of the viewport.
# #
# # Unfortunately, this does not work as expected due to a bug in GTK.
# # (see rhbz#970048)
# treeview.scroll_to_cell(path, use_align=True, row_align=0.5)
#
# return itr
#
# def get_default_widget_direction():
# """
# Function to get default widget direction (RTL/LTR) for the current language
# configuration.
#
# XXX: this should be provided by the Gtk itself (#1008821)
#
# :return: either Gtk.TextDirection.LTR or Gtk.TextDirection.RTL
# :rtype: GtkTextDirection
#
# """
#
# # this is quite a hack, but it's exactly the same check Gtk uses internally
# xlated = gettext.ldgettext("gtk30", "default:LTR")
# if xlated == "default:LTR":
# return Gtk.TextDirection.LTR
# else:
# return Gtk.TextDirection.RTL
, which may include functions, classes, or code. Output only the next line. | for lang in localization.get_available_translations(): |
Given the following code snippet before the placeholder: <|code_start|> else:
self._arrow = Gtk.Image.new_from_file("/usr/share/anaconda/pixmaps/left-arrow-icon.png")
self._langSelectedColumn.set_cell_data_func(self._langSelectedRenderer,
self._render_lang_selected)
# fill the list with available translations
for lang in localization.get_available_translations():
self._add_language(self._languageStore,
localization.get_native_name(lang),
localization.get_english_name(lang), lang)
# make filtering work
self._languageStoreFilter.set_visible_func(self._matches_entry, None)
def _matches_entry(self, model, itr, *args):
# Nothing in the text entry? Display everything.
entry = self._languageEntry.get_text().strip()
if not entry:
return True
# Need to strip out the pango markup before attempting to match.
# Otherwise, starting to type "span" for "spanish" will match everything
# due to the enclosing span tag.
# (success, attrs, native, accel)
native = Pango.parse_markup(model[itr][0], -1, "_")[2]
english = model[itr][1]
# Otherwise, filter the list showing only what is matched by the
# text entry. Either the English or native names can match.
lowered = entry.lower()
<|code_end|>
, predict the next line using imports from the current file:
from gi.repository import Gtk, Pango
from pyanaconda import localization
from pyanaconda.iutil import strip_accents
from pyanaconda.ui.gui.utils import set_treeview_selection, get_default_widget_direction
and context including class names, function names, and sometimes code from other files:
# Path: pyanaconda/localization.py
# LOCALE_CONF_FILE_PATH = "/etc/locale.conf"
# LANGCODE_RE = re.compile(r'(?P<language>[A-Za-z]+)'
# r'(_(?P<territory>[A-Za-z]+))?'
# r'(\.(?P<encoding>[-A-Za-z0-9]+))?'
# r'(@(?P<script>[-A-Za-z0-9]+))?')
# class LocalizationConfigError(Exception):
# class InvalidLocaleSpec(LocalizationConfigError):
# def parse_langcode(langcode):
# def is_supported_locale(locale):
# def langcode_matches_locale(langcode, locale):
# def find_best_locale_match(locale, langcodes):
# def get_match_score(locale, langcode):
# def setup_locale(locale, lang=None):
# def get_english_name(locale):
# def get_native_name(locale):
# def get_available_translations(localedir=None):
# def get_language_locales(lang):
# def get_territory_locales(territory):
# def get_locale_keyboards(locale):
# def get_locale_timezones(locale):
# def get_locale_territory(locale):
# def write_language_configuration(lang, root):
# def load_firmware_language(lang):
#
# Path: pyanaconda/iutil.py
# def strip_accents(s):
# """This function takes arbitrary unicode string
# and returns it with all the diacritics removed.
#
# :param s: arbitrary string
# :type s: unicode
#
# :return: s with diacritics removed
# :rtype: unicode
#
# """
# return ''.join((c for c in unicodedata.normalize('NFD', s)
# if unicodedata.category(c) != 'Mn'))
#
# Path: pyanaconda/ui/gui/utils.py
# def set_treeview_selection(treeview, item, col=0):
# """
# Select the given item in the given treeview and scroll to it.
#
# :param treeview: treeview to select and item in
# :type treeview: GtkTreeView
# :param item: item to be selected
# :type item: str
# :param col: column to search for the item in
# :type col: int
# :return: selected iterator or None if item was not found
# :rtype: GtkTreeIter or None
#
# """
#
# model = treeview.get_model()
# itr = model.get_iter_first()
# while itr and not model[itr][col] == item:
# itr = model.iter_next(itr)
#
# if not itr:
# # item not found, cannot be selected
# return None
#
# # otherwise select the item and scroll to it
# selection = treeview.get_selection()
# selection.select_iter(itr)
# path = model.get_path(itr)
#
# # row_align=0.5 tells GTK to move the cell to the middle of the
# # treeview viewport (0.0 should align it with the top, 1.0 with bottom)
# # If the cell is the uppermost one, it should align it with the top
# # of the viewport.
# #
# # Unfortunately, this does not work as expected due to a bug in GTK.
# # (see rhbz#970048)
# treeview.scroll_to_cell(path, use_align=True, row_align=0.5)
#
# return itr
#
# def get_default_widget_direction():
# """
# Function to get default widget direction (RTL/LTR) for the current language
# configuration.
#
# XXX: this should be provided by the Gtk itself (#1008821)
#
# :return: either Gtk.TextDirection.LTR or Gtk.TextDirection.RTL
# :rtype: GtkTextDirection
#
# """
#
# # this is quite a hack, but it's exactly the same check Gtk uses internally
# xlated = gettext.ldgettext("gtk30", "default:LTR")
# if xlated == "default:LTR":
# return Gtk.TextDirection.LTR
# else:
# return Gtk.TextDirection.RTL
. Output only the next line. | translit = strip_accents(unicode(native, "utf-8")).lower() |
Based on the snippet: <|code_start|> else:
renderer.set_property("pixbuf", None)
def _add_language(self, store, native, english, lang):
"""Override this method with a valid implementation"""
raise NotImplementedError()
def _add_locale(self, store, native, locale):
"""Override this method with a valid implementation"""
raise NotImplementedError()
def _select_locale(self, locale):
"""
Try to select the given locale in the language and locale
treeviews. This method tries to find the best match for the given
locale.
:return: a pair of selected iterators (language and locale)
:rtype: a pair of GtkTreeIter or None objects
"""
# get lang and select it
parts = localization.parse_langcode(locale)
if "language" not in parts:
# invalid locale, cannot select
return (None, None)
<|code_end|>
, predict the immediate next line with the help of imports:
from gi.repository import Gtk, Pango
from pyanaconda import localization
from pyanaconda.iutil import strip_accents
from pyanaconda.ui.gui.utils import set_treeview_selection, get_default_widget_direction
and context (classes, functions, sometimes code) from other files:
# Path: pyanaconda/localization.py
# LOCALE_CONF_FILE_PATH = "/etc/locale.conf"
# LANGCODE_RE = re.compile(r'(?P<language>[A-Za-z]+)'
# r'(_(?P<territory>[A-Za-z]+))?'
# r'(\.(?P<encoding>[-A-Za-z0-9]+))?'
# r'(@(?P<script>[-A-Za-z0-9]+))?')
# class LocalizationConfigError(Exception):
# class InvalidLocaleSpec(LocalizationConfigError):
# def parse_langcode(langcode):
# def is_supported_locale(locale):
# def langcode_matches_locale(langcode, locale):
# def find_best_locale_match(locale, langcodes):
# def get_match_score(locale, langcode):
# def setup_locale(locale, lang=None):
# def get_english_name(locale):
# def get_native_name(locale):
# def get_available_translations(localedir=None):
# def get_language_locales(lang):
# def get_territory_locales(territory):
# def get_locale_keyboards(locale):
# def get_locale_timezones(locale):
# def get_locale_territory(locale):
# def write_language_configuration(lang, root):
# def load_firmware_language(lang):
#
# Path: pyanaconda/iutil.py
# def strip_accents(s):
# """This function takes arbitrary unicode string
# and returns it with all the diacritics removed.
#
# :param s: arbitrary string
# :type s: unicode
#
# :return: s with diacritics removed
# :rtype: unicode
#
# """
# return ''.join((c for c in unicodedata.normalize('NFD', s)
# if unicodedata.category(c) != 'Mn'))
#
# Path: pyanaconda/ui/gui/utils.py
# def set_treeview_selection(treeview, item, col=0):
# """
# Select the given item in the given treeview and scroll to it.
#
# :param treeview: treeview to select and item in
# :type treeview: GtkTreeView
# :param item: item to be selected
# :type item: str
# :param col: column to search for the item in
# :type col: int
# :return: selected iterator or None if item was not found
# :rtype: GtkTreeIter or None
#
# """
#
# model = treeview.get_model()
# itr = model.get_iter_first()
# while itr and not model[itr][col] == item:
# itr = model.iter_next(itr)
#
# if not itr:
# # item not found, cannot be selected
# return None
#
# # otherwise select the item and scroll to it
# selection = treeview.get_selection()
# selection.select_iter(itr)
# path = model.get_path(itr)
#
# # row_align=0.5 tells GTK to move the cell to the middle of the
# # treeview viewport (0.0 should align it with the top, 1.0 with bottom)
# # If the cell is the uppermost one, it should align it with the top
# # of the viewport.
# #
# # Unfortunately, this does not work as expected due to a bug in GTK.
# # (see rhbz#970048)
# treeview.scroll_to_cell(path, use_align=True, row_align=0.5)
#
# return itr
#
# def get_default_widget_direction():
# """
# Function to get default widget direction (RTL/LTR) for the current language
# configuration.
#
# XXX: this should be provided by the Gtk itself (#1008821)
#
# :return: either Gtk.TextDirection.LTR or Gtk.TextDirection.RTL
# :rtype: GtkTextDirection
#
# """
#
# # this is quite a hack, but it's exactly the same check Gtk uses internally
# xlated = gettext.ldgettext("gtk30", "default:LTR")
# if xlated == "default:LTR":
# return Gtk.TextDirection.LTR
# else:
# return Gtk.TextDirection.RTL
. Output only the next line. | lang_itr = set_treeview_selection(self._langView, parts["language"], col=2) |
Given snippet: <|code_start|>screens handling languages or locales configuration.
"""
class LangLocaleHandler(object):
"""
Class that could be used as a mixin for screens handling languages or
locales configuration.
"""
def __init__(self):
# the class inheriting from this class is responsible for populating
# these items with actual objects
self._languageStore = None
self._languageStoreFilter = None
self._languageEntry = None
self._langSelection = None
self._langSelectedRenderer = None
self._langSelectedColumn = None
self._langView = None
self._localeView = None
self._localeStore = None
self._localeSelection = None
self._arrow = None
def initialize(self):
# Render an arrow for the chosen language
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from gi.repository import Gtk, Pango
from pyanaconda import localization
from pyanaconda.iutil import strip_accents
from pyanaconda.ui.gui.utils import set_treeview_selection, get_default_widget_direction
and context:
# Path: pyanaconda/localization.py
# LOCALE_CONF_FILE_PATH = "/etc/locale.conf"
# LANGCODE_RE = re.compile(r'(?P<language>[A-Za-z]+)'
# r'(_(?P<territory>[A-Za-z]+))?'
# r'(\.(?P<encoding>[-A-Za-z0-9]+))?'
# r'(@(?P<script>[-A-Za-z0-9]+))?')
# class LocalizationConfigError(Exception):
# class InvalidLocaleSpec(LocalizationConfigError):
# def parse_langcode(langcode):
# def is_supported_locale(locale):
# def langcode_matches_locale(langcode, locale):
# def find_best_locale_match(locale, langcodes):
# def get_match_score(locale, langcode):
# def setup_locale(locale, lang=None):
# def get_english_name(locale):
# def get_native_name(locale):
# def get_available_translations(localedir=None):
# def get_language_locales(lang):
# def get_territory_locales(territory):
# def get_locale_keyboards(locale):
# def get_locale_timezones(locale):
# def get_locale_territory(locale):
# def write_language_configuration(lang, root):
# def load_firmware_language(lang):
#
# Path: pyanaconda/iutil.py
# def strip_accents(s):
# """This function takes arbitrary unicode string
# and returns it with all the diacritics removed.
#
# :param s: arbitrary string
# :type s: unicode
#
# :return: s with diacritics removed
# :rtype: unicode
#
# """
# return ''.join((c for c in unicodedata.normalize('NFD', s)
# if unicodedata.category(c) != 'Mn'))
#
# Path: pyanaconda/ui/gui/utils.py
# def set_treeview_selection(treeview, item, col=0):
# """
# Select the given item in the given treeview and scroll to it.
#
# :param treeview: treeview to select and item in
# :type treeview: GtkTreeView
# :param item: item to be selected
# :type item: str
# :param col: column to search for the item in
# :type col: int
# :return: selected iterator or None if item was not found
# :rtype: GtkTreeIter or None
#
# """
#
# model = treeview.get_model()
# itr = model.get_iter_first()
# while itr and not model[itr][col] == item:
# itr = model.iter_next(itr)
#
# if not itr:
# # item not found, cannot be selected
# return None
#
# # otherwise select the item and scroll to it
# selection = treeview.get_selection()
# selection.select_iter(itr)
# path = model.get_path(itr)
#
# # row_align=0.5 tells GTK to move the cell to the middle of the
# # treeview viewport (0.0 should align it with the top, 1.0 with bottom)
# # If the cell is the uppermost one, it should align it with the top
# # of the viewport.
# #
# # Unfortunately, this does not work as expected due to a bug in GTK.
# # (see rhbz#970048)
# treeview.scroll_to_cell(path, use_align=True, row_align=0.5)
#
# return itr
#
# def get_default_widget_direction():
# """
# Function to get default widget direction (RTL/LTR) for the current language
# configuration.
#
# XXX: this should be provided by the Gtk itself (#1008821)
#
# :return: either Gtk.TextDirection.LTR or Gtk.TextDirection.RTL
# :rtype: GtkTextDirection
#
# """
#
# # this is quite a hack, but it's exactly the same check Gtk uses internally
# xlated = gettext.ldgettext("gtk30", "default:LTR")
# if xlated == "default:LTR":
# return Gtk.TextDirection.LTR
# else:
# return Gtk.TextDirection.RTL
which might include code, classes, or functions. Output only the next line. | if get_default_widget_direction() == Gtk.TextDirection.LTR: |
Given the following code snippet before the placeholder: <|code_start|>def gtk_call_once(func, *args):
"""Wrapper for GLib.idle_add call that ensures the func is called
only once.
"""
def wrap(args):
func(*args)
return False
GLib.idle_add(wrap, args)
def gtk_action_wait(func):
"""Decorator method which ensures every call of the decorated function to be
executed in the context of Gtk main loop even if called from a non-main
thread and returns the ret value after the decorated method finishes.
"""
queue = Queue.Queue()
def _idle_method(q_args):
"""This method contains the code for the main loop to execute.
"""
queue, args = q_args
ret = func(*args)
queue.put(ret)
return False
def _call_method(*args):
"""The new body for the decorated method. If needed, it uses closure
bound queue variable which is valid until the reference to this
method is destroyed."""
<|code_end|>
, predict the next line using imports from the current file:
from pyanaconda.threads import threadMgr
from contextlib import contextmanager
from gi.repository import Gtk, GLib, AnacondaWidgets
from pyanaconda.ui.gui import ANACONDA_WINDOW_GROUP
import Queue
import gettext
and context including class names, function names, and sometimes code from other files:
# Path: pyanaconda/threads.py
# class ThreadManager(object):
# class AnacondaThread(threading.Thread):
# def __init__(self):
# def __call__(self):
# def add(self, obj):
# def remove(self, name):
# def exists(self, name):
# def get(self, name):
# def wait(self, name):
# def wait_all(self):
# def set_error(self, name, *exc_info):
# def get_error(self, name):
# def any_errors(self):
# def raise_error(self, name):
# def in_main_thread(self):
# def running(self):
# def names(self):
# def __init__(self, *args, **kwargs):
# def run(self, *args, **kwargs):
# def initThreading():
. Output only the next line. | if threadMgr.in_main_thread(): |
Continue the code snippet: <|code_start|> stderr=subprocess.STDOUT,
preexec_fn=chroot, cwd=root, env=env)
out = proc.communicate()[0]
if out:
for line in out.splitlines():
program_log.info(line)
if stdout:
stdout.write(line)
stdout.write("\n")
except OSError as e:
program_log.error("Error running %s: %s", argv[0], e.strerror)
raise
program_log.debug("Return code: %d", proc.returncode)
return (proc.returncode, out)
def execWithRedirect(command, argv, stdin=None, stdout=None,
root='/', env_prune=None):
""" Run an external program and redirect the output to a file.
@param command The command to run
@param argv The argument list
@param stdin The file object to read stdin from.
@param stdout Optional file object to redirect stdout and stderr to.
@param root The directory to chroot to before running command.
@param env_prune environment variable to remove before execution
@return The return code of the command
"""
<|code_end|>
. Use current file imports:
import glob
import os
import stat
import os.path
import errno
import subprocess
import unicodedata
import string
import types
import logging
from threading import Thread
from Queue import Queue, Empty
from pyanaconda.flags import flags
from pyanaconda.constants import DRACUT_SHUTDOWN_EJECT, ROOT_PATH, TRANSLATIONS_UPDATE_DIR, UNSUPPORTED_HW
from pyanaconda.regexes import PROXY_URL_PARSE
from pyanaconda.anaconda_log import program_log_lock
and context (classes, functions, or code) from other files:
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DRACUT_SHUTDOWN_EJECT = "/run/initramfs/usr/lib/dracut/hooks/shutdown/99anaconda-eject.sh"
#
# ROOT_PATH = "/mnt/sysimage"
#
# TRANSLATIONS_UPDATE_DIR="/tmp/updates/po"
#
# UNSUPPORTED_HW = 1 << 28
#
# Path: pyanaconda/anaconda_log.py
# DEFAULT_TTY_LEVEL = logging.INFO
# ENTRY_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s"
# TTY_FORMAT = "%(levelname)s %(name)s: %(message)s"
# STDOUT_FORMAT = "%(asctime)s %(message)s"
# DATE_FORMAT = "%H:%M:%S"
# MAIN_LOG_FILE = "/tmp/anaconda.log"
# MAIN_LOG_TTY = "/dev/tty3"
# PROGRAM_LOG_FILE = "/tmp/program.log"
# PROGRAM_LOG_TTY = "/dev/tty5"
# STORAGE_LOG_FILE = "/tmp/storage.log"
# PACKAGING_LOG_FILE = "/tmp/packaging.log"
# SENSITIVE_INFO_LOG_FILE = "/tmp/sensitive-info.log"
# ANACONDA_SYSLOG_FACILITY = SysLogHandler.LOG_LOCAL1
# SYSLOG_CFGFILE = "/etc/rsyslog.conf"
# VIRTIO_PORT = "/dev/virtio-ports/org.fedoraproject.anaconda.log.0"
# TEMPLATE = "*.* @@%s\n"
# TEMPLATE = "*.* %s;anaconda_syslog\n"
# def autoSetLevel(handler, value):
# def setHandlersLevel(logr, level):
# def __init__(self,
# address=('localhost', SYSLOG_UDP_PORT),
# facility=SysLogHandler.LOG_USER,
# tag=''):
# def emit(self, record):
# def __init__ (self):
# def addFileHandler (self, dest, addToLogger, minLevel=DEFAULT_TTY_LEVEL,
# fmtStr=ENTRY_FORMAT,
# autoLevel=False):
# def forwardToSyslog(self, logr):
# def showwarning(self, message, category, filename, lineno,
# file=sys.stderr, line=None):
# def restartSyslog(self):
# def updateRemote(self, remote_syslog):
# def setupVirtio(self):
# def init():
# class AnacondaSyslogHandler(SysLogHandler):
# class AnacondaLog:
. Output only the next line. | if flags.testing: |
Continue the code snippet: <|code_start|>
service_name = service + ".service"
ret = execWithRedirect("systemctl", [command, service_name])
return ret
def start_service(service):
return _run_systemctl("start", service)
def stop_service(service):
return _run_systemctl("stop", service)
def restart_service(service):
return _run_systemctl("restart", service)
def service_running(service):
ret = _run_systemctl("status", service)
return ret == 0
def dracut_eject(device):
"""
Use dracut shutdown hook to eject media after the system is shutdown.
This is needed because we are running from the squashfs.img on the media
so ejecting too early will crash the installer.
"""
if not device:
return
try:
<|code_end|>
. Use current file imports:
import glob
import os
import stat
import os.path
import errno
import subprocess
import unicodedata
import string
import types
import logging
from threading import Thread
from Queue import Queue, Empty
from pyanaconda.flags import flags
from pyanaconda.constants import DRACUT_SHUTDOWN_EJECT, ROOT_PATH, TRANSLATIONS_UPDATE_DIR, UNSUPPORTED_HW
from pyanaconda.regexes import PROXY_URL_PARSE
from pyanaconda.anaconda_log import program_log_lock
and context (classes, functions, or code) from other files:
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DRACUT_SHUTDOWN_EJECT = "/run/initramfs/usr/lib/dracut/hooks/shutdown/99anaconda-eject.sh"
#
# ROOT_PATH = "/mnt/sysimage"
#
# TRANSLATIONS_UPDATE_DIR="/tmp/updates/po"
#
# UNSUPPORTED_HW = 1 << 28
#
# Path: pyanaconda/anaconda_log.py
# DEFAULT_TTY_LEVEL = logging.INFO
# ENTRY_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s"
# TTY_FORMAT = "%(levelname)s %(name)s: %(message)s"
# STDOUT_FORMAT = "%(asctime)s %(message)s"
# DATE_FORMAT = "%H:%M:%S"
# MAIN_LOG_FILE = "/tmp/anaconda.log"
# MAIN_LOG_TTY = "/dev/tty3"
# PROGRAM_LOG_FILE = "/tmp/program.log"
# PROGRAM_LOG_TTY = "/dev/tty5"
# STORAGE_LOG_FILE = "/tmp/storage.log"
# PACKAGING_LOG_FILE = "/tmp/packaging.log"
# SENSITIVE_INFO_LOG_FILE = "/tmp/sensitive-info.log"
# ANACONDA_SYSLOG_FACILITY = SysLogHandler.LOG_LOCAL1
# SYSLOG_CFGFILE = "/etc/rsyslog.conf"
# VIRTIO_PORT = "/dev/virtio-ports/org.fedoraproject.anaconda.log.0"
# TEMPLATE = "*.* @@%s\n"
# TEMPLATE = "*.* %s;anaconda_syslog\n"
# def autoSetLevel(handler, value):
# def setHandlersLevel(logr, level):
# def __init__(self,
# address=('localhost', SYSLOG_UDP_PORT),
# facility=SysLogHandler.LOG_USER,
# tag=''):
# def emit(self, record):
# def __init__ (self):
# def addFileHandler (self, dest, addToLogger, minLevel=DEFAULT_TTY_LEVEL,
# fmtStr=ENTRY_FORMAT,
# autoLevel=False):
# def forwardToSyslog(self, logr):
# def showwarning(self, message, category, filename, lineno,
# file=sys.stderr, line=None):
# def restartSyslog(self):
# def updateRemote(self, remote_syslog):
# def setupVirtio(self):
# def init():
# class AnacondaSyslogHandler(SysLogHandler):
# class AnacondaLog:
. Output only the next line. | if not os.path.exists(DRACUT_SHUTDOWN_EJECT): |
Continue the code snippet: <|code_start|># iutil.py - generic install utility functions
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Erik Troan <ewt@redhat.com>
#
log = logging.getLogger("anaconda")
program_log = logging.getLogger("program")
def augmentEnv():
env = os.environ.copy()
env.update({"LC_ALL": "C",
<|code_end|>
. Use current file imports:
import glob
import os
import stat
import os.path
import errno
import subprocess
import unicodedata
import string
import types
import logging
from threading import Thread
from Queue import Queue, Empty
from pyanaconda.flags import flags
from pyanaconda.constants import DRACUT_SHUTDOWN_EJECT, ROOT_PATH, TRANSLATIONS_UPDATE_DIR, UNSUPPORTED_HW
from pyanaconda.regexes import PROXY_URL_PARSE
from pyanaconda.anaconda_log import program_log_lock
and context (classes, functions, or code) from other files:
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DRACUT_SHUTDOWN_EJECT = "/run/initramfs/usr/lib/dracut/hooks/shutdown/99anaconda-eject.sh"
#
# ROOT_PATH = "/mnt/sysimage"
#
# TRANSLATIONS_UPDATE_DIR="/tmp/updates/po"
#
# UNSUPPORTED_HW = 1 << 28
#
# Path: pyanaconda/anaconda_log.py
# DEFAULT_TTY_LEVEL = logging.INFO
# ENTRY_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s"
# TTY_FORMAT = "%(levelname)s %(name)s: %(message)s"
# STDOUT_FORMAT = "%(asctime)s %(message)s"
# DATE_FORMAT = "%H:%M:%S"
# MAIN_LOG_FILE = "/tmp/anaconda.log"
# MAIN_LOG_TTY = "/dev/tty3"
# PROGRAM_LOG_FILE = "/tmp/program.log"
# PROGRAM_LOG_TTY = "/dev/tty5"
# STORAGE_LOG_FILE = "/tmp/storage.log"
# PACKAGING_LOG_FILE = "/tmp/packaging.log"
# SENSITIVE_INFO_LOG_FILE = "/tmp/sensitive-info.log"
# ANACONDA_SYSLOG_FACILITY = SysLogHandler.LOG_LOCAL1
# SYSLOG_CFGFILE = "/etc/rsyslog.conf"
# VIRTIO_PORT = "/dev/virtio-ports/org.fedoraproject.anaconda.log.0"
# TEMPLATE = "*.* @@%s\n"
# TEMPLATE = "*.* %s;anaconda_syslog\n"
# def autoSetLevel(handler, value):
# def setHandlersLevel(logr, level):
# def __init__(self,
# address=('localhost', SYSLOG_UDP_PORT),
# facility=SysLogHandler.LOG_USER,
# tag=''):
# def emit(self, record):
# def __init__ (self):
# def addFileHandler (self, dest, addToLogger, minLevel=DEFAULT_TTY_LEVEL,
# fmtStr=ENTRY_FORMAT,
# autoLevel=False):
# def forwardToSyslog(self, logr):
# def showwarning(self, message, category, filename, lineno,
# file=sys.stderr, line=None):
# def restartSyslog(self):
# def updateRemote(self, remote_syslog):
# def setupVirtio(self):
# def init():
# class AnacondaSyslogHandler(SysLogHandler):
# class AnacondaLog:
. Output only the next line. | "ANA_INSTALL_PATH": ROOT_PATH |
Given the following code snippet before the placeholder: <|code_start|> options = ''
host = ''
path = ''
if nfsurl:
s = nfsurl.split(":")
s.pop(0)
if len(s) >= 3:
(options, host, path) = s[:3]
elif len(s) == 2:
(host, path) = s
else:
host = s[0]
return (options, host, path)
def add_po_path(module, directory):
""" Looks to see what translations are under a given path and tells
the gettext module to use that path as the base dir """
for d in os.listdir(directory):
if not os.path.isdir("%s/%s" %(directory,d)):
continue
if not os.path.exists("%s/%s/LC_MESSAGES" %(directory,d)):
continue
for basename in os.listdir("%s/%s/LC_MESSAGES" %(directory,d)):
if not basename.endswith(".mo"):
continue
log.info("setting %s as translation source for %s", directory, basename[:-3])
module.bindtextdomain(basename[:-3], directory)
def setup_translations(module):
<|code_end|>
, predict the next line using imports from the current file:
import glob
import os
import stat
import os.path
import errno
import subprocess
import unicodedata
import string
import types
import logging
from threading import Thread
from Queue import Queue, Empty
from pyanaconda.flags import flags
from pyanaconda.constants import DRACUT_SHUTDOWN_EJECT, ROOT_PATH, TRANSLATIONS_UPDATE_DIR, UNSUPPORTED_HW
from pyanaconda.regexes import PROXY_URL_PARSE
from pyanaconda.anaconda_log import program_log_lock
and context including class names, function names, and sometimes code from other files:
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DRACUT_SHUTDOWN_EJECT = "/run/initramfs/usr/lib/dracut/hooks/shutdown/99anaconda-eject.sh"
#
# ROOT_PATH = "/mnt/sysimage"
#
# TRANSLATIONS_UPDATE_DIR="/tmp/updates/po"
#
# UNSUPPORTED_HW = 1 << 28
#
# Path: pyanaconda/anaconda_log.py
# DEFAULT_TTY_LEVEL = logging.INFO
# ENTRY_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s"
# TTY_FORMAT = "%(levelname)s %(name)s: %(message)s"
# STDOUT_FORMAT = "%(asctime)s %(message)s"
# DATE_FORMAT = "%H:%M:%S"
# MAIN_LOG_FILE = "/tmp/anaconda.log"
# MAIN_LOG_TTY = "/dev/tty3"
# PROGRAM_LOG_FILE = "/tmp/program.log"
# PROGRAM_LOG_TTY = "/dev/tty5"
# STORAGE_LOG_FILE = "/tmp/storage.log"
# PACKAGING_LOG_FILE = "/tmp/packaging.log"
# SENSITIVE_INFO_LOG_FILE = "/tmp/sensitive-info.log"
# ANACONDA_SYSLOG_FACILITY = SysLogHandler.LOG_LOCAL1
# SYSLOG_CFGFILE = "/etc/rsyslog.conf"
# VIRTIO_PORT = "/dev/virtio-ports/org.fedoraproject.anaconda.log.0"
# TEMPLATE = "*.* @@%s\n"
# TEMPLATE = "*.* %s;anaconda_syslog\n"
# def autoSetLevel(handler, value):
# def setHandlersLevel(logr, level):
# def __init__(self,
# address=('localhost', SYSLOG_UDP_PORT),
# facility=SysLogHandler.LOG_USER,
# tag=''):
# def emit(self, record):
# def __init__ (self):
# def addFileHandler (self, dest, addToLogger, minLevel=DEFAULT_TTY_LEVEL,
# fmtStr=ENTRY_FORMAT,
# autoLevel=False):
# def forwardToSyslog(self, logr):
# def showwarning(self, message, category, filename, lineno,
# file=sys.stderr, line=None):
# def restartSyslog(self):
# def updateRemote(self, remote_syslog):
# def setupVirtio(self):
# def init():
# class AnacondaSyslogHandler(SysLogHandler):
# class AnacondaLog:
. Output only the next line. | if os.path.isdir(TRANSLATIONS_UPDATE_DIR): |
Based on the snippet: <|code_start|> def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
stats = os.stat(path)
if (from_uid and stats.st_uid != from_uid) or \
(from_gid and stats.st_gid != from_gid):
# owner UID or GID not matching, do nothing
return
# UID and GID matching or not required
os.chown(path, uid, gid)
if not from_uid_only and not from_gid_only:
# the easy way
dir_tree_map(root, lambda path: os.chown(path, uid, gid))
else:
# conditional chown
dir_tree_map(root, lambda path: conditional_chown(path, uid, gid,
from_uid_only,
from_gid_only))
def is_unsupported_hw():
""" Check to see if the hardware is supported or not.
:returns: True if this is unsupported hardware, False otherwise
:rtype: bool
"""
try:
tainted = long(open("/proc/sys/kernel/tainted").read())
except (IOError, ValueError):
tainted = 0L
<|code_end|>
, predict the immediate next line with the help of imports:
import glob
import os
import stat
import os.path
import errno
import subprocess
import unicodedata
import string
import types
import logging
from threading import Thread
from Queue import Queue, Empty
from pyanaconda.flags import flags
from pyanaconda.constants import DRACUT_SHUTDOWN_EJECT, ROOT_PATH, TRANSLATIONS_UPDATE_DIR, UNSUPPORTED_HW
from pyanaconda.regexes import PROXY_URL_PARSE
from pyanaconda.anaconda_log import program_log_lock
and context (classes, functions, sometimes code) from other files:
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DRACUT_SHUTDOWN_EJECT = "/run/initramfs/usr/lib/dracut/hooks/shutdown/99anaconda-eject.sh"
#
# ROOT_PATH = "/mnt/sysimage"
#
# TRANSLATIONS_UPDATE_DIR="/tmp/updates/po"
#
# UNSUPPORTED_HW = 1 << 28
#
# Path: pyanaconda/anaconda_log.py
# DEFAULT_TTY_LEVEL = logging.INFO
# ENTRY_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s"
# TTY_FORMAT = "%(levelname)s %(name)s: %(message)s"
# STDOUT_FORMAT = "%(asctime)s %(message)s"
# DATE_FORMAT = "%H:%M:%S"
# MAIN_LOG_FILE = "/tmp/anaconda.log"
# MAIN_LOG_TTY = "/dev/tty3"
# PROGRAM_LOG_FILE = "/tmp/program.log"
# PROGRAM_LOG_TTY = "/dev/tty5"
# STORAGE_LOG_FILE = "/tmp/storage.log"
# PACKAGING_LOG_FILE = "/tmp/packaging.log"
# SENSITIVE_INFO_LOG_FILE = "/tmp/sensitive-info.log"
# ANACONDA_SYSLOG_FACILITY = SysLogHandler.LOG_LOCAL1
# SYSLOG_CFGFILE = "/etc/rsyslog.conf"
# VIRTIO_PORT = "/dev/virtio-ports/org.fedoraproject.anaconda.log.0"
# TEMPLATE = "*.* @@%s\n"
# TEMPLATE = "*.* %s;anaconda_syslog\n"
# def autoSetLevel(handler, value):
# def setHandlersLevel(logr, level):
# def __init__(self,
# address=('localhost', SYSLOG_UDP_PORT),
# facility=SysLogHandler.LOG_USER,
# tag=''):
# def emit(self, record):
# def __init__ (self):
# def addFileHandler (self, dest, addToLogger, minLevel=DEFAULT_TTY_LEVEL,
# fmtStr=ENTRY_FORMAT,
# autoLevel=False):
# def forwardToSyslog(self, logr):
# def showwarning(self, message, category, filename, lineno,
# file=sys.stderr, line=None):
# def restartSyslog(self):
# def updateRemote(self, remote_syslog):
# def setupVirtio(self):
# def init():
# class AnacondaSyslogHandler(SysLogHandler):
# class AnacondaLog:
. Output only the next line. | status = bool(tainted & UNSUPPORTED_HW) |
Here is a snippet: <|code_start|>
log = logging.getLogger("anaconda")
program_log = logging.getLogger("program")
def augmentEnv():
env = os.environ.copy()
env.update({"LC_ALL": "C",
"ANA_INSTALL_PATH": ROOT_PATH
})
return env
def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None):
""" Run an external program, log the output and return it to the caller
@param argv The command to run and argument
@param root The directory to chroot to before running command.
@param stdin The file object to read stdin from.
@param stdout Optional file object to write stdout and stderr to.
@param env_prune environment variable to remove before execution
@return The return code of the command and the output
"""
if env_prune is None:
env_prune = []
def chroot():
if root and root != '/':
os.chroot(root)
os.chdir("/")
<|code_end|>
. Write the next line using the current file imports:
import glob
import os
import stat
import os.path
import errno
import subprocess
import unicodedata
import string
import types
import logging
from threading import Thread
from Queue import Queue, Empty
from pyanaconda.flags import flags
from pyanaconda.constants import DRACUT_SHUTDOWN_EJECT, ROOT_PATH, TRANSLATIONS_UPDATE_DIR, UNSUPPORTED_HW
from pyanaconda.regexes import PROXY_URL_PARSE
from pyanaconda.anaconda_log import program_log_lock
and context from other files:
# Path: pyanaconda/flags.py
# class Flags(object):
# class BootArgs(OrderedDict):
# def __setattr__(self, attr, val):
# def get(self, attr, val=None):
# def set_cmdline_bool(self, flag):
# def __init__(self, read_cmdline=True):
# def read_cmdline(self):
# def __init__(self, cmdline=None, files=None):
# def read(self, filenames):
# def readstr(self, cmdline):
# def getbool(self, arg, default=False):
# def can_touch_runtime_system(msg, touch_live=False):
#
# Path: pyanaconda/constants.py
# DRACUT_SHUTDOWN_EJECT = "/run/initramfs/usr/lib/dracut/hooks/shutdown/99anaconda-eject.sh"
#
# ROOT_PATH = "/mnt/sysimage"
#
# TRANSLATIONS_UPDATE_DIR="/tmp/updates/po"
#
# UNSUPPORTED_HW = 1 << 28
#
# Path: pyanaconda/anaconda_log.py
# DEFAULT_TTY_LEVEL = logging.INFO
# ENTRY_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s"
# TTY_FORMAT = "%(levelname)s %(name)s: %(message)s"
# STDOUT_FORMAT = "%(asctime)s %(message)s"
# DATE_FORMAT = "%H:%M:%S"
# MAIN_LOG_FILE = "/tmp/anaconda.log"
# MAIN_LOG_TTY = "/dev/tty3"
# PROGRAM_LOG_FILE = "/tmp/program.log"
# PROGRAM_LOG_TTY = "/dev/tty5"
# STORAGE_LOG_FILE = "/tmp/storage.log"
# PACKAGING_LOG_FILE = "/tmp/packaging.log"
# SENSITIVE_INFO_LOG_FILE = "/tmp/sensitive-info.log"
# ANACONDA_SYSLOG_FACILITY = SysLogHandler.LOG_LOCAL1
# SYSLOG_CFGFILE = "/etc/rsyslog.conf"
# VIRTIO_PORT = "/dev/virtio-ports/org.fedoraproject.anaconda.log.0"
# TEMPLATE = "*.* @@%s\n"
# TEMPLATE = "*.* %s;anaconda_syslog\n"
# def autoSetLevel(handler, value):
# def setHandlersLevel(logr, level):
# def __init__(self,
# address=('localhost', SYSLOG_UDP_PORT),
# facility=SysLogHandler.LOG_USER,
# tag=''):
# def emit(self, record):
# def __init__ (self):
# def addFileHandler (self, dest, addToLogger, minLevel=DEFAULT_TTY_LEVEL,
# fmtStr=ENTRY_FORMAT,
# autoLevel=False):
# def forwardToSyslog(self, logr):
# def showwarning(self, message, category, filename, lineno,
# file=sys.stderr, line=None):
# def restartSyslog(self):
# def updateRemote(self, remote_syslog):
# def setupVirtio(self):
# def init():
# class AnacondaSyslogHandler(SysLogHandler):
# class AnacondaLog:
, which may include functions, classes, or code. Output only the next line. | with program_log_lock: |
Using the snippet: <|code_start|> """
pass
@property
def window(self):
"""Return an object with show_all and hide methods that is to be used
to display this UI object.
"""
raise TypeError("UIObject.window has to be overriden")
@property
def data(self):
return self._data
class FirstbootSpokeMixIn(object):
"""This MixIn class marks Spokes as usable for Firstboot
and Anaconda.
"""
@classmethod
def should_run(cls, environment, data):
"""This method is responsible for beginning Spoke initialization
in the firstboot environment (even before __init__).
It should return True if the spoke is to be shown
and False if it should be skipped.
It might be called multiple times, with or without (None)
the data argument.
"""
<|code_end|>
, determine the next line of code. You have imports:
import os
import imp
import inspect
import copy
import sys
import types
import logging
from pyanaconda.constants import ANACONDA_ENVIRON, FIRSTBOOT_ENVIRON
from pyanaconda.errors import RemovedModuleError
from pykickstart.constants import FIRSTBOOT_RECONFIG
and context (class names, function names, or code) available:
# Path: pyanaconda/constants.py
# ANACONDA_ENVIRON = "anaconda"
#
# FIRSTBOOT_ENVIRON = "firstboot"
#
# Path: pyanaconda/errors.py
# class RemovedModuleError(ImportError):
# pass
. Output only the next line. | if environment == ANACONDA_ENVIRON: |
Here is a snippet: <|code_start|>
@property
def window(self):
"""Return an object with show_all and hide methods that is to be used
to display this UI object.
"""
raise TypeError("UIObject.window has to be overriden")
@property
def data(self):
return self._data
class FirstbootSpokeMixIn(object):
"""This MixIn class marks Spokes as usable for Firstboot
and Anaconda.
"""
@classmethod
def should_run(cls, environment, data):
"""This method is responsible for beginning Spoke initialization
in the firstboot environment (even before __init__).
It should return True if the spoke is to be shown
and False if it should be skipped.
It might be called multiple times, with or without (None)
the data argument.
"""
if environment == ANACONDA_ENVIRON:
return True
<|code_end|>
. Write the next line using the current file imports:
import os
import imp
import inspect
import copy
import sys
import types
import logging
from pyanaconda.constants import ANACONDA_ENVIRON, FIRSTBOOT_ENVIRON
from pyanaconda.errors import RemovedModuleError
from pykickstart.constants import FIRSTBOOT_RECONFIG
and context from other files:
# Path: pyanaconda/constants.py
# ANACONDA_ENVIRON = "anaconda"
#
# FIRSTBOOT_ENVIRON = "firstboot"
#
# Path: pyanaconda/errors.py
# class RemovedModuleError(ImportError):
# pass
, which may include functions, classes, or code. Output only the next line. | elif environment == FIRSTBOOT_ENVIRON and data is None: |
Predict the next line after this snippet: <|code_start|> fo, module_path, module_flags)
# get the filenames without the extensions so we can compare those
# with the .py[co]? equivalence in mind
# - we do not have to care about files without extension as the
# condition at the beginning of the for loop filters out those
# - module_flags[0] contains the extension of the module imp found
candidate_name = module_path[:module_path.rindex(module_flags[0])]
loaded_name, loaded_ext = module.__file__.rsplit(".", 1)
# restore the extension dot eaten by split
loaded_ext = "." + loaded_ext
# do not collect classes when the module is already imported
# from different path than we are traversing
# this condition checks the module name without file extension
if candidate_name != loaded_name:
continue
# if the candidate file is .py[co]? and the loaded is not (.so)
# skip the file as well
if module_flags[0].startswith(".py") and not loaded_ext.startswith(".py"):
continue
# if the candidate file is not .py[co]? and the loaded is
# skip the file as well
if not module_flags[0].startswith(".py") and loaded_ext.startswith(".py"):
continue
<|code_end|>
using the current file's imports:
import os
import imp
import inspect
import copy
import sys
import types
import logging
from pyanaconda.constants import ANACONDA_ENVIRON, FIRSTBOOT_ENVIRON
from pyanaconda.errors import RemovedModuleError
from pykickstart.constants import FIRSTBOOT_RECONFIG
and any relevant context from other files:
# Path: pyanaconda/constants.py
# ANACONDA_ENVIRON = "anaconda"
#
# FIRSTBOOT_ENVIRON = "firstboot"
#
# Path: pyanaconda/errors.py
# class RemovedModuleError(ImportError):
# pass
. Output only the next line. | except RemovedModuleError: |
Based on the snippet: <|code_start|>
log = logging.getLogger("anaconda")
# A lot of effort, but it only allows a limited set of flags to be referenced
class Flags(object):
def __setattr__(self, attr, val):
# pylint: disable-msg=E1101
if attr not in self.__dict__ and not self._in_init:
raise AttributeError(attr)
else:
self.__dict__[attr] = val
def get(self, attr, val=None):
return getattr(self, attr, val)
def set_cmdline_bool(self, flag):
if flag in self.cmdline:
setattr(self, flag, self.cmdline.getbool(flag))
def __init__(self, read_cmdline=True):
self.__dict__['_in_init'] = True
self.livecdInstall = 0
self.dlabel = 0
self.ibft = 1
self.iscsi = 0
self.usevnc = 0
self.vncquestion = True
self.mpath = 1
self.dmraid = 1
<|code_end|>
, predict the immediate next line with the help of imports:
import selinux
import shlex
import glob
import logging
from pyanaconda.constants import SELINUX_DEFAULT
from collections import OrderedDict
and context (classes, functions, sometimes code) from other files:
# Path: pyanaconda/constants.py
# SELINUX_DEFAULT = 1
. Output only the next line. | self.selinux = SELINUX_DEFAULT |
Given snippet: <|code_start|>
class UnknownMethodGetError(Exception):
"""Object does not have Get, most probably being invalid"""
def __str__(self):
return self.__repr__()
def _get_proxy(bus_type=Gio.BusType.SYSTEM,
flags=Gio.DBusProxyFlags.NONE,
info=None,
name="org.freedesktop.NetworkManager",
object_path="/org/freedesktop/NetworkManager",
interface_name="org.freedesktop.NetworkManager",
cancellable=None):
proxy = Gio.DBusProxy.new_for_bus_sync(bus_type,
flags,
info,
name,
object_path,
interface_name,
cancellable)
return proxy
def _get_property(object_path, prop, interface_name_suffix=""):
interface_name = "org.freedesktop.NetworkManager" + interface_name_suffix
proxy = _get_proxy(object_path=object_path, interface_name="org.freedesktop.DBus.Properties")
args = GLib.Variant('(ss)', (interface_name, prop))
try:
prop = proxy.call_sync("Get",
args,
Gio.DBusCallFlags.NONE,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from gi.repository import Gio, GLib
from gi.repository import NetworkManager
from pyanaconda.constants import DEFAULT_DBUS_TIMEOUT
import IPy
import struct
import socket
import re
and context:
# Path: pyanaconda/constants.py
# DEFAULT_DBUS_TIMEOUT = -1 # use default
which might include code, classes, or functions. Output only the next line. | DEFAULT_DBUS_TIMEOUT, |
Continue the code snippet: <|code_start|>
class CharacterAdmin(admin.ModelAdmin):
list_display = ('name', 'level', 'race', 'cclass', 'player',
'created', 'modified')
list_filter = ('cclass__name', 'race__name', 'created', 'modified')
search_fields = ['name', 'background', 'race__name', 'cclass__name',
'player__username']
readonly_fields = ('created', 'modified')
fieldsets = [
(None, {'fields': ['player', 'name', 'level', 'race', 'cclass',
'background']}),
('Hit Points', {'fields': ['max_hit_points', 'current_hit_points']}),
('Stats', {'fields': ['strength', 'dexterity', 'constitution',
'intelligence', 'wisdom', 'charisma']}),
(None, {'fields': ['experience_points', 'inventory']}),
('Date Information', {'fields': ['created', 'modified']}),
]
<|code_end|>
. Use current file imports:
from django.contrib import admin
from characters.models import Character, Race, Class, Item
and context (classes, functions, or code) from other files:
# Path: characters/models.py
# class Character(models.Model):
#
# objects = CharacterManager()
#
# player = models.ForeignKey(User)
#
# name = models.CharField(max_length=200)
# background = models.TextField()
#
# race = models.ForeignKey(Race)
# # This name isn't ideal, but 'class' is a Python builtin, so here we are...
# # I went with 'cclass' as short for 'character class'.
# cclass = models.ForeignKey(Class, verbose_name="class")
#
# alignment = models.CharField(max_length=2, choices=Alignment.CHOICES,
# default=Alignment.NEUTRAL)
#
# level = models.IntegerField(default=1)
# experience_points = models.IntegerField(default=0)
#
# max_hit_points = models.IntegerField(default=10)
# current_hit_points = models.IntegerField(default=10)
#
# strength = models.IntegerField(default=generate_stat)
# dexterity = models.IntegerField(default=generate_stat)
# constitution = models.IntegerField(default=generate_stat)
# intelligence = models.IntegerField(default=generate_stat)
# wisdom = models.IntegerField(default=generate_stat)
# charisma = models.IntegerField(default=generate_stat)
#
# inventory = models.ManyToManyField(Item, null=True, blank=True)
#
# created = models.DateTimeField(auto_now_add=True)
# modified = models.DateTimeField(auto_now=True)
#
# def __str__(self):
# return self.name
#
# class Race(models.Model):
#
# name = models.CharField('race', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Class(models.Model):
#
# name = models.CharField('class', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = "Classes"
#
# class Item(models.Model):
#
# name = models.CharField(max_length=200)
# description = models.TextField()
#
# value = models.IntegerField(default=0)
# weight = models.IntegerField(default=1)
#
# def __str__(self):
# return self.name
. Output only the next line. | admin.site.register(Character, CharacterAdmin) |
Given the following code snippet before the placeholder: <|code_start|>
class CharacterAdmin(admin.ModelAdmin):
list_display = ('name', 'level', 'race', 'cclass', 'player',
'created', 'modified')
list_filter = ('cclass__name', 'race__name', 'created', 'modified')
search_fields = ['name', 'background', 'race__name', 'cclass__name',
'player__username']
readonly_fields = ('created', 'modified')
fieldsets = [
(None, {'fields': ['player', 'name', 'level', 'race', 'cclass',
'background']}),
('Hit Points', {'fields': ['max_hit_points', 'current_hit_points']}),
('Stats', {'fields': ['strength', 'dexterity', 'constitution',
'intelligence', 'wisdom', 'charisma']}),
(None, {'fields': ['experience_points', 'inventory']}),
('Date Information', {'fields': ['created', 'modified']}),
]
admin.site.register(Character, CharacterAdmin)
<|code_end|>
, predict the next line using imports from the current file:
from django.contrib import admin
from characters.models import Character, Race, Class, Item
and context including class names, function names, and sometimes code from other files:
# Path: characters/models.py
# class Character(models.Model):
#
# objects = CharacterManager()
#
# player = models.ForeignKey(User)
#
# name = models.CharField(max_length=200)
# background = models.TextField()
#
# race = models.ForeignKey(Race)
# # This name isn't ideal, but 'class' is a Python builtin, so here we are...
# # I went with 'cclass' as short for 'character class'.
# cclass = models.ForeignKey(Class, verbose_name="class")
#
# alignment = models.CharField(max_length=2, choices=Alignment.CHOICES,
# default=Alignment.NEUTRAL)
#
# level = models.IntegerField(default=1)
# experience_points = models.IntegerField(default=0)
#
# max_hit_points = models.IntegerField(default=10)
# current_hit_points = models.IntegerField(default=10)
#
# strength = models.IntegerField(default=generate_stat)
# dexterity = models.IntegerField(default=generate_stat)
# constitution = models.IntegerField(default=generate_stat)
# intelligence = models.IntegerField(default=generate_stat)
# wisdom = models.IntegerField(default=generate_stat)
# charisma = models.IntegerField(default=generate_stat)
#
# inventory = models.ManyToManyField(Item, null=True, blank=True)
#
# created = models.DateTimeField(auto_now_add=True)
# modified = models.DateTimeField(auto_now=True)
#
# def __str__(self):
# return self.name
#
# class Race(models.Model):
#
# name = models.CharField('race', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Class(models.Model):
#
# name = models.CharField('class', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = "Classes"
#
# class Item(models.Model):
#
# name = models.CharField(max_length=200)
# description = models.TextField()
#
# value = models.IntegerField(default=0)
# weight = models.IntegerField(default=1)
#
# def __str__(self):
# return self.name
. Output only the next line. | admin.site.register(Race) |
Continue the code snippet: <|code_start|>
class CharacterAdmin(admin.ModelAdmin):
list_display = ('name', 'level', 'race', 'cclass', 'player',
'created', 'modified')
list_filter = ('cclass__name', 'race__name', 'created', 'modified')
search_fields = ['name', 'background', 'race__name', 'cclass__name',
'player__username']
readonly_fields = ('created', 'modified')
fieldsets = [
(None, {'fields': ['player', 'name', 'level', 'race', 'cclass',
'background']}),
('Hit Points', {'fields': ['max_hit_points', 'current_hit_points']}),
('Stats', {'fields': ['strength', 'dexterity', 'constitution',
'intelligence', 'wisdom', 'charisma']}),
(None, {'fields': ['experience_points', 'inventory']}),
('Date Information', {'fields': ['created', 'modified']}),
]
admin.site.register(Character, CharacterAdmin)
admin.site.register(Race)
<|code_end|>
. Use current file imports:
from django.contrib import admin
from characters.models import Character, Race, Class, Item
and context (classes, functions, or code) from other files:
# Path: characters/models.py
# class Character(models.Model):
#
# objects = CharacterManager()
#
# player = models.ForeignKey(User)
#
# name = models.CharField(max_length=200)
# background = models.TextField()
#
# race = models.ForeignKey(Race)
# # This name isn't ideal, but 'class' is a Python builtin, so here we are...
# # I went with 'cclass' as short for 'character class'.
# cclass = models.ForeignKey(Class, verbose_name="class")
#
# alignment = models.CharField(max_length=2, choices=Alignment.CHOICES,
# default=Alignment.NEUTRAL)
#
# level = models.IntegerField(default=1)
# experience_points = models.IntegerField(default=0)
#
# max_hit_points = models.IntegerField(default=10)
# current_hit_points = models.IntegerField(default=10)
#
# strength = models.IntegerField(default=generate_stat)
# dexterity = models.IntegerField(default=generate_stat)
# constitution = models.IntegerField(default=generate_stat)
# intelligence = models.IntegerField(default=generate_stat)
# wisdom = models.IntegerField(default=generate_stat)
# charisma = models.IntegerField(default=generate_stat)
#
# inventory = models.ManyToManyField(Item, null=True, blank=True)
#
# created = models.DateTimeField(auto_now_add=True)
# modified = models.DateTimeField(auto_now=True)
#
# def __str__(self):
# return self.name
#
# class Race(models.Model):
#
# name = models.CharField('race', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Class(models.Model):
#
# name = models.CharField('class', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = "Classes"
#
# class Item(models.Model):
#
# name = models.CharField(max_length=200)
# description = models.TextField()
#
# value = models.IntegerField(default=0)
# weight = models.IntegerField(default=1)
#
# def __str__(self):
# return self.name
. Output only the next line. | admin.site.register(Class) |
Continue the code snippet: <|code_start|>
class CharacterAdmin(admin.ModelAdmin):
list_display = ('name', 'level', 'race', 'cclass', 'player',
'created', 'modified')
list_filter = ('cclass__name', 'race__name', 'created', 'modified')
search_fields = ['name', 'background', 'race__name', 'cclass__name',
'player__username']
readonly_fields = ('created', 'modified')
fieldsets = [
(None, {'fields': ['player', 'name', 'level', 'race', 'cclass',
'background']}),
('Hit Points', {'fields': ['max_hit_points', 'current_hit_points']}),
('Stats', {'fields': ['strength', 'dexterity', 'constitution',
'intelligence', 'wisdom', 'charisma']}),
(None, {'fields': ['experience_points', 'inventory']}),
('Date Information', {'fields': ['created', 'modified']}),
]
admin.site.register(Character, CharacterAdmin)
admin.site.register(Race)
admin.site.register(Class)
<|code_end|>
. Use current file imports:
from django.contrib import admin
from characters.models import Character, Race, Class, Item
and context (classes, functions, or code) from other files:
# Path: characters/models.py
# class Character(models.Model):
#
# objects = CharacterManager()
#
# player = models.ForeignKey(User)
#
# name = models.CharField(max_length=200)
# background = models.TextField()
#
# race = models.ForeignKey(Race)
# # This name isn't ideal, but 'class' is a Python builtin, so here we are...
# # I went with 'cclass' as short for 'character class'.
# cclass = models.ForeignKey(Class, verbose_name="class")
#
# alignment = models.CharField(max_length=2, choices=Alignment.CHOICES,
# default=Alignment.NEUTRAL)
#
# level = models.IntegerField(default=1)
# experience_points = models.IntegerField(default=0)
#
# max_hit_points = models.IntegerField(default=10)
# current_hit_points = models.IntegerField(default=10)
#
# strength = models.IntegerField(default=generate_stat)
# dexterity = models.IntegerField(default=generate_stat)
# constitution = models.IntegerField(default=generate_stat)
# intelligence = models.IntegerField(default=generate_stat)
# wisdom = models.IntegerField(default=generate_stat)
# charisma = models.IntegerField(default=generate_stat)
#
# inventory = models.ManyToManyField(Item, null=True, blank=True)
#
# created = models.DateTimeField(auto_now_add=True)
# modified = models.DateTimeField(auto_now=True)
#
# def __str__(self):
# return self.name
#
# class Race(models.Model):
#
# name = models.CharField('race', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Class(models.Model):
#
# name = models.CharField('class', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = "Classes"
#
# class Item(models.Model):
#
# name = models.CharField(max_length=200)
# description = models.TextField()
#
# value = models.IntegerField(default=0)
# weight = models.IntegerField(default=1)
#
# def __str__(self):
# return self.name
. Output only the next line. | admin.site.register(Item) |
Predict the next line after this snippet: <|code_start|>
class CharacterIndexView(ListView):
template_name = 'characters/index.html'
context_object_name = 'all_characters' # better than 'object_list'
def get_queryset(self):
return Character.objects.all().order_by('name')
class CharacterDetailView(DetailView):
model = Character
template_name = 'characters/view_character.html'
class CharacterCreationView(CreateView):
model = Character
template_name = 'characters/create_character.html'
<|code_end|>
using the current file's imports:
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from characters.forms import CharacterForm
from characters.models import Character, Class, Race
and any relevant context from other files:
# Path: characters/forms.py
# class CharacterForm(forms.ModelForm):
#
# class Meta:
# model = Character
# fields = ['name', 'background', 'race', 'cclass', 'alignment',
# 'strength', 'dexterity', 'constitution', 'intelligence',
# 'wisdom', 'charisma', 'max_hit_points']
#
# def __init__(self, *args, **kwargs):
# super(CharacterForm, self).__init__(*args, **kwargs)
# for field in ['strength', 'dexterity', 'constitution', 'intelligence',
# 'wisdom', 'charisma', 'max_hit_points']:
# self.fields[field].widget.attrs['readonly'] = True
#
# Path: characters/models.py
# class Character(models.Model):
#
# objects = CharacterManager()
#
# player = models.ForeignKey(User)
#
# name = models.CharField(max_length=200)
# background = models.TextField()
#
# race = models.ForeignKey(Race)
# # This name isn't ideal, but 'class' is a Python builtin, so here we are...
# # I went with 'cclass' as short for 'character class'.
# cclass = models.ForeignKey(Class, verbose_name="class")
#
# alignment = models.CharField(max_length=2, choices=Alignment.CHOICES,
# default=Alignment.NEUTRAL)
#
# level = models.IntegerField(default=1)
# experience_points = models.IntegerField(default=0)
#
# max_hit_points = models.IntegerField(default=10)
# current_hit_points = models.IntegerField(default=10)
#
# strength = models.IntegerField(default=generate_stat)
# dexterity = models.IntegerField(default=generate_stat)
# constitution = models.IntegerField(default=generate_stat)
# intelligence = models.IntegerField(default=generate_stat)
# wisdom = models.IntegerField(default=generate_stat)
# charisma = models.IntegerField(default=generate_stat)
#
# inventory = models.ManyToManyField(Item, null=True, blank=True)
#
# created = models.DateTimeField(auto_now_add=True)
# modified = models.DateTimeField(auto_now=True)
#
# def __str__(self):
# return self.name
#
# class Class(models.Model):
#
# name = models.CharField('class', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = "Classes"
#
# class Race(models.Model):
#
# name = models.CharField('race', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
. Output only the next line. | form_class = CharacterForm |
Continue the code snippet: <|code_start|>
class CharacterIndexView(ListView):
template_name = 'characters/index.html'
context_object_name = 'all_characters' # better than 'object_list'
def get_queryset(self):
<|code_end|>
. Use current file imports:
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from characters.forms import CharacterForm
from characters.models import Character, Class, Race
and context (classes, functions, or code) from other files:
# Path: characters/forms.py
# class CharacterForm(forms.ModelForm):
#
# class Meta:
# model = Character
# fields = ['name', 'background', 'race', 'cclass', 'alignment',
# 'strength', 'dexterity', 'constitution', 'intelligence',
# 'wisdom', 'charisma', 'max_hit_points']
#
# def __init__(self, *args, **kwargs):
# super(CharacterForm, self).__init__(*args, **kwargs)
# for field in ['strength', 'dexterity', 'constitution', 'intelligence',
# 'wisdom', 'charisma', 'max_hit_points']:
# self.fields[field].widget.attrs['readonly'] = True
#
# Path: characters/models.py
# class Character(models.Model):
#
# objects = CharacterManager()
#
# player = models.ForeignKey(User)
#
# name = models.CharField(max_length=200)
# background = models.TextField()
#
# race = models.ForeignKey(Race)
# # This name isn't ideal, but 'class' is a Python builtin, so here we are...
# # I went with 'cclass' as short for 'character class'.
# cclass = models.ForeignKey(Class, verbose_name="class")
#
# alignment = models.CharField(max_length=2, choices=Alignment.CHOICES,
# default=Alignment.NEUTRAL)
#
# level = models.IntegerField(default=1)
# experience_points = models.IntegerField(default=0)
#
# max_hit_points = models.IntegerField(default=10)
# current_hit_points = models.IntegerField(default=10)
#
# strength = models.IntegerField(default=generate_stat)
# dexterity = models.IntegerField(default=generate_stat)
# constitution = models.IntegerField(default=generate_stat)
# intelligence = models.IntegerField(default=generate_stat)
# wisdom = models.IntegerField(default=generate_stat)
# charisma = models.IntegerField(default=generate_stat)
#
# inventory = models.ManyToManyField(Item, null=True, blank=True)
#
# created = models.DateTimeField(auto_now_add=True)
# modified = models.DateTimeField(auto_now=True)
#
# def __str__(self):
# return self.name
#
# class Class(models.Model):
#
# name = models.CharField('class', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = "Classes"
#
# class Race(models.Model):
#
# name = models.CharField('race', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
. Output only the next line. | return Character.objects.all().order_by('name') |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
def load_data(apps, schema_editor):
Race(name="Human", description="Enh.").save()
<|code_end|>
. Use current file imports:
(from django.db import models, migrations
from characters.models import Race, Class)
and context including class names, function names, or small code snippets from other files:
# Path: characters/models.py
# class Race(models.Model):
#
# name = models.CharField('race', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Class(models.Model):
#
# name = models.CharField('class', max_length=200)
# description = models.TextField()
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = "Classes"
. Output only the next line. | Class(name="Generic", description="Meh.").save() |
Given the code snippet: <|code_start|> it would be possible to get almost any object through a series of nested joins!
"""
super(MongoQuerySettingsDict, self).__init__()
self.update({k: v
for k, v in locals().items()
if k not in {'__class__', 'self'}})
# NOTE: before you say your BOO at me for using locals(), consider the following...
# we have 20+ variables we have to pass to the update() function, and we have to make sure we've forgotten none.
# At the same time, this method is only called ONCE per model, during the initialization phase.
# All of this tells me that this is just the right situation to summon locals() to our assitance.
# Otherwise, we'll have a large, long, ugly list of variables, and we'll have to keep it updated every time
# there is a new setting in town.
# Therefore, locals().
def and_more(self, **settings):
""" Copy the object and add more settings to it """
return self.__class__(**{**self, **settings})
@classmethod
def pluck_from(cls, dict, skip=('max_items',)):
""" Initialize the class by plucking kwargs from a dictionary.
This is useful when you have a dict with configuration for multiple classes, and you want to initialize
this one by getting only the keys you need.
Example: pluck MongoQuerySettingsDict from a StrictCrudHelperSettingsDict.
Args:
skip: List of key names to skip when copying. Sometimes it just does not make sense to copy all values.
"""
<|code_end|>
, generate the next line using the imports in this file:
from typing import *
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.sql.elements import BinaryExpression
from .inspect import pluck_kwargs_from
and context (functions, classes, or occasionally code) from other files:
# Path: mongosql/util/inspect.py
# def pluck_kwargs_from(dct: Mapping, for_func: Callable, skip: Tuple[str] = ()) -> dict:
# """ Analyze a function, pluck the arguments it needs from a dict """
# defaults = get_function_defaults(for_func)
#
# # Get the values for these kwargs
# kwargs = {k: dct.get(k, defaults[k])
# for k in defaults.keys()
# if k not in skip}
#
# # Done
# return kwargs
. Output only the next line. | kwargs = pluck_kwargs_from(dict, |
Here is a snippet: <|code_start|>```
#### Syntax
The Group Operator, as you have seen, receives an array of column names.
* Array syntax.
List of column names, optionally suffixed by the sort direction: `-` for `DESC`, `+` for `ASC`.
The default is `+`.
Example:
```javascript
{ group: [ 'a+', 'b-', 'c' ] } // -> a ASC, b DESC, c DESC
```
* String syntax
List of columns, with optional `+` / `-`, separated by whitespace.
Example:
```javascript
{ group: 'a+ b- c' }
```
"""
<|code_end|>
. Write the next line using the current file imports:
from .sort import MongoSort
and context from other files:
# Path: mongosql/handlers/sort.py
# class MongoSort(MongoQueryHandlerBase):
# """ MongoDB sorting
#
# * None: no sorting
# * OrderedDict({ a: +1, b: -1 })
# * [ 'a+', 'b-', 'c' ] - array of strings '<column>[<+|->]'. default direction = +1
# * dict({a: +1}) -- you can only use a dict with ONE COLUMN (because of its unstable order)
#
# Supports: Columns, hybrid properties
# """
#
# query_object_section_name = 'sort'
#
# def __init__(self, model, bags, legacy_fields=None):
# # Legacy fields
# self.legacy_fields = frozenset(legacy_fields or ())
#
# # Parent
# super(MongoSort, self).__init__(model, bags)
#
# # On input
# #: OderedDict() of a sort spec: {key: +1|-1}
# self.sort_spec = None
#
# def _get_supported_bags(self):
# return CombinedBag(
# col=self.bags.columns,
# colp=self.bags.column_properties,
# hybrid=self.bags.hybrid_properties,
# assocproxy=self.bags.association_proxies,
# legacy=FakeBag({n: None for n in self.legacy_fields}),
# )
#
# def _input(self, spec):
# """ Reusable method: fits both MongoSort and MongoGroup """
#
# # Empty
# if not spec:
# spec = []
#
# # String syntax
# if isinstance(spec, str):
# # Split by whitespace and convert to a list
# spec = spec.split()
#
# # List
# if isinstance(spec, (list, tuple)):
# # Strings: convert "column[+-]" into an ordered dict
# if all(isinstance(v, str) for v in spec):
# spec = OrderedDict([
# [v[:-1], -1 if v[-1] == '-' else +1]
# if v[-1] in {'+', '-'}
# else [v, +1]
# for v in spec
# ])
#
# # Dict
# if isinstance(spec, OrderedDict):
# pass # nothing to do here
# elif isinstance(spec, dict):
# if len(spec) > 1:
# raise InvalidQueryError('{} is a plain object; can only have 1 column '
# 'because of unstable ordering of object keys; '
# 'use list syntax instead'
# .format(self.query_object_section_name))
# spec = OrderedDict(spec)
# else:
# raise InvalidQueryError('{name} must be either a list, a string, or an object; {type} provided.'
# .format(name=self.query_object_section_name, type=type(spec)))
#
# # Validate directions: +1 or -1
# if not all(dir in {-1, +1} for field, dir in spec.items()):
# raise InvalidQueryError('{} direction can be either +1 or -1'.format(self.query_object_section_name))
#
# # Validate columns
# self.validate_properties(spec.keys())
# return spec
#
# def input(self, sort_spec):
# super(MongoSort, self).input(sort_spec)
# self.sort_spec = self._input(sort_spec)
# return self
#
# def merge(self, sort_spec):
# self.sort_spec.update(self._input(sort_spec))
# return self
#
# def compile_columns(self):
# return [
# self.supported_bags.get(name).desc() if d == -1 else self.supported_bags.get(name)
# for name, d in self.sort_spec.items()
# if name not in self.supported_bags.bag('legacy') # remove fake items
# ]
#
# # Not Implemented for this Query Object handler
# compile_options = NotImplemented
# compile_statement = NotImplemented
# compile_statements = NotImplemented
#
# def alter_query(self, query, as_relation=None):
# if not self.sort_spec:
# return query # short-circuit
# return query.order_by(*self.compile_columns())
#
# def get_final_input_value(self):
# return [f'{name}{"-" if d == -1 else ""}'
# for name, d in self.sort_spec.items()]
#
# # Extra stuff
#
# def undefer_columns_involved_in_sorting(self, as_relation):
# """ undefer() columns required for this sort """
# # Get the names of the columns
# order_by_column_names = [c.key or c.element.key
# for c in self.compile_columns()]
#
# # Return options: undefer() every column
# return (as_relation.undefer(column_name)
# for column_name in order_by_column_names)
, which may include functions, classes, or code. Output only the next line. | class MongoGroup(MongoSort): |
Using the snippet: <|code_start|>
class method_decorator_test(unittest.TestCase):
def test_method_decorators(self):
# === Test: method decorator
# Create a class
class A:
@method_decorator_1(1)
def a(self): pass
@method_decorator_1(2)
def b(self): pass
@method_decorator_2(3)
def c(self): pass
# isinstance() checks
<|code_end|>
, determine the next line of code. You have imports:
import unittest
from functools import wraps
from mongosql.util.method_decorator import method_decorator
and context (class names, function names, or code) available:
# Path: mongosql/util/method_decorator.py
# class method_decorator(metaclass=method_decorator_meta):
# """ A decorator that marks a method, receives arguments, adds metadata, and provides custom group behavior
#
# Sometimes in Python there's a need to mark some methods of a class and then use them for some sort
# of special processing.
#
# The important goals here are:
# 1) to be able to mark methods,
# 2) to be able to execute them transparently,
# 3) to be able to collect them and get their names,
# 4) to be able to store metadata on them (by receiving arguments)
#
# I've found out that a good solution would be to implement a class decorator,
# which is also a descriptor. This way, we'll have an attribute that lets you transparently use the method,
# but also knows some metadata about it.
#
# This wrapper can also contain some business-logic related to this decorator,
# which lets us keep all the relevant logic in one place.
# """
#
# # The name of the property to install onto every wrapped method
# # Please override, or set `None` if this behavior is undesired
# METHOD_PROPERTY_NAME = 'method_decorator'
#
# def __init__(self): # override me to receive arguments
# # Handler method
# self.method = None
# # Handler method function name
# self.method_name = None
#
# def __call__(self, handler_method):
# # Make sure the object itself is callable only once
# if self.method is not None:
# raise RuntimeError("@{decorator}, when used, is not itself callable".format(decorator=self.__class__.__name__))
#
# # The handler method to use for saving the field's data
# self.method = handler_method
# self.method_name = handler_method.__name__
#
# # Store ourselves as a property of the wrapped function :)
# if self.METHOD_PROPERTY_NAME:
# setattr(self.method, self.METHOD_PROPERTY_NAME, self)
#
# # Use the proper update_wrapper() for we are a decorator
# update_wrapper(self, self.method)
#
# # Done
# return self # This is what is saved on the class' __dict__
#
# def __get__(self, instance, owner):
# """ Magic descriptor: return the wrapped method when accessed """
# # This descriptor magic makes the decorated method accessible directly, even though it's wrapped.
# # This is how it works:
# # whenever a method is wrapped with @saves_relations, there is this decorator class standing in the object's
# # dict instead of the method. The decorator is not callable anymore.
# # However, because it's a descriptor (has the __get__ method), when you access this method
# # (by using class.method or object.method), it will hide itself and give you the wrapped method instead.
#
# # We, however, will have to pass the `self` argument manually, because this descriptor magic
# # breaks python's passing of `self` to the method
# if instance is None:
# # Accessing a class attribute directly
# # We return the decorator object. It's callable.
# return self
#
# # Old behavior:
# # # Accessing a class attribute directly
# # # We return the method function, so that subclasses can actually call invoke it unwrapped.
# # return self.method # got from the class
# else:
# # Accessing an object's attribute
# # We prepare for calling the method.
# return partial(self.method, instance) # pass the `self`
#
# def __repr__(self):
# return '@{decorator}({func})'.format(decorator=self.__class__.__name__, func=self.method_name)
#
# # region: Usage API
#
# @classmethod
# def is_decorated(cls, method) -> bool:
# """ Check whether the given method is decorated with @cls()
#
# It also supports detecting methods wrapped with multiple decorators, one of them being @cls.
# Note that it works only when update_wrapper() was properly used.
# """
# return isinstance(method, cls)
#
# @classmethod
# def get_method_decorator(cls, Klass: type, name: str) -> 'method_decorator':
# """ Get the decorator object, stored as `METHOD_PROPERTY_NAME` on the wrapped method """
# return getattr(getattr(Klass, name), cls.METHOD_PROPERTY_NAME)
#
# @classmethod
# @lru_cache(256) # can't be too many views out there.. :)
# def all_decorators_from(cls, Klass: type) -> Iterable['method_decorator']:
# """ Get all decorator objects from a class (cached)
#
# Note that it won't collect any inherited handler methods:
# only those declared directly on this class.
# """
# if not isinstance(Klass, type):
# raise ValueError('Can only collect decorators from a class, not from an object {}'.format(Klass))
#
# return tuple(
# attr
# for attr in Klass.__dict__.values()
# if cls.is_decorated(attr))
#
# # endregion
. Output only the next line. | self.assertTrue(isinstance(method_decorator.get_method_decorator(A, 'a'), method_decorator_1)) |
Continue the code snippet: <|code_start|> .format(list(invalid_keys)))
# Validated "related_models": all keys must be models, not names
invalid_keys = set(v
for v in self._nested_model_settings.keys()
if not isinstance(v, DeclarativeMeta))
invalid_keys -= {'*'}
if invalid_keys:
raise KeyError('Invalid related model object provided to "related_models": {!r}'
.format(list(invalid_keys)))
def get_settings(self, handler_name: str, handler_cls: type) -> dict:
""" Get settings for the given handler
Because we do not know in advance how many handlers we will have, what their names will be,
and what classes implement them, we have to handle them one by one.
Every time a class is given us, we analyze its __init__() method in order to know its kwargs and its default values.
Then, we take the matching keys from the settings dict, we take defaults from the argument defaults,
and make it all into `kwargs` that will be given to the class.
In addition to that, if the settings contain `handler_name=False`, then it means it's disabled.
is_handler_enabled() method will later tell that to MongoQuery.
"""
# Now we know the handler name
# See if it's actually disabled
if not self._settings.get('{}_enabled'.format(handler_name), True):
self._disabled_handlers.add(handler_name)
# Analyze a function, pluck the arguments that it needs
<|code_end|>
. Use current file imports:
from typing import Union
from sqlalchemy.ext.declarative import DeclarativeMeta
from mongosql.util.inspect import pluck_kwargs_from
from ..exc import DisabledError
import mongosql
and context (classes, functions, or code) from other files:
# Path: mongosql/util/inspect.py
# def pluck_kwargs_from(dct: Mapping, for_func: Callable, skip: Tuple[str] = ()) -> dict:
# """ Analyze a function, pluck the arguments it needs from a dict """
# defaults = get_function_defaults(for_func)
#
# # Get the values for these kwargs
# kwargs = {k: dct.get(k, defaults[k])
# for k in defaults.keys()
# if k not in skip}
#
# # Done
# return kwargs
#
# Path: mongosql/exc.py
# class DisabledError(InvalidQueryError):
# """ The feature is disabled """
. Output only the next line. | kwargs = pluck_kwargs_from(self._settings, for_func=handler_cls.__init__) |
Based on the snippet: <|code_start|> Then, we take the matching keys from the settings dict, we take defaults from the argument defaults,
and make it all into `kwargs` that will be given to the class.
In addition to that, if the settings contain `handler_name=False`, then it means it's disabled.
is_handler_enabled() method will later tell that to MongoQuery.
"""
# Now we know the handler name
# See if it's actually disabled
if not self._settings.get('{}_enabled'.format(handler_name), True):
self._disabled_handlers.add(handler_name)
# Analyze a function, pluck the arguments that it needs
kwargs = pluck_kwargs_from(self._settings, for_func=handler_cls.__init__)
kwargs_names = kwargs.keys() # always all of them
# Store the data that we'll need
self._handler_kwargs_names[handler_name] = set(kwargs_names)
self._handler_names.add(handler_name)
self._all_known_kwargs_names.update(kwargs_names)
# Done
return kwargs # for the handler's __init__()
def is_handler_enabled(self, handler_name: str) -> bool:
""" Test if the handler is enabled in the configuration """
return handler_name not in self._disabled_handlers
def raise_if_not_handler_enabled(self, model_name: str, handler_name: str):
""" Raise an error if the handler is not enabled """
if not self.is_handler_enabled(handler_name):
<|code_end|>
, predict the immediate next line with the help of imports:
from typing import Union
from sqlalchemy.ext.declarative import DeclarativeMeta
from mongosql.util.inspect import pluck_kwargs_from
from ..exc import DisabledError
import mongosql
and context (classes, functions, sometimes code) from other files:
# Path: mongosql/util/inspect.py
# def pluck_kwargs_from(dct: Mapping, for_func: Callable, skip: Tuple[str] = ()) -> dict:
# """ Analyze a function, pluck the arguments it needs from a dict """
# defaults = get_function_defaults(for_func)
#
# # Get the values for these kwargs
# kwargs = {k: dct.get(k, defaults[k])
# for k in defaults.keys()
# if k not in skip}
#
# # Done
# return kwargs
#
# Path: mongosql/exc.py
# class DisabledError(InvalidQueryError):
# """ The feature is disabled """
. Output only the next line. | raise DisabledError('Query handler "{}" is disabled for "{}"' |
Predict the next line after this snippet: <|code_start|> 'articles', {JSON_AGG}(a)),
{JSON_AGG}(c)
FROM u
LEFT JOIN a ON(u.id=a.uid)
LEFT JOIN c ON (a.id=c.aid)
GROUP BY u.id;
"""
def test_semisingle_line_agg__json(n):
""" Aggregate only 1st level objects; things that are nested deeper are expelled to the outskirts """
query = SEMILINE_AGG_QUERY_TEMPLATE.format(
JSON_BUILD_OBJECT='json_build_object',
JSON_AGG='json_agg',
)
for i in range(n):
rows = ssn.execute(query).fetchall()
def test_semisingle_line_agg__jsonb(n):
""" Aggregate only 1st level objects; things that are nested deeper are expelled to the outskirts """
query = SEMILINE_AGG_QUERY_TEMPLATE.format(
JSON_BUILD_OBJECT='jsonb_build_object',
JSON_AGG='jsonb_agg',
)
for i in range(n):
rows = ssn.execute(query).fetchall()
# Run
<|code_end|>
using the current file's imports:
from sqlalchemy.orm import selectinload, joinedload
from tests.benchmarks.benchmark_utils import benchmark_parallel_funcs
from tests.models import get_big_db_for_benchmarks, User, Article
and any relevant context from other files:
# Path: tests/benchmarks/benchmark_utils.py
# def benchmark_parallel_funcs(n_iterations, n_parts, *funcs):
# """ Run the given `funcs` test functions `n_iterations` times.
#
# Every function receives the `n` argument and is supposed to do its job `n` times in a loop.
# This is to reduce the impact of a repeated function call, and to let your tests initialize before they run.
#
# Names of those functions are used in displaying results.
# """
# return benchmark_parallel(
# n_iterations,
# n_parts,
# **{f.__name__: f for f in funcs})
#
# Path: tests/models.py
# def get_big_db_for_benchmarks(n_users, n_articles_per_user, n_comments_per_article):
# # Connect, create tables
# engine, Session = init_database(autoflush=True)
# drop_all(engine)
# create_all(engine)
#
# # Fill DB
# ssn = Session()
# ssn.begin()
# ssn.add_all(content_samples_random(n_users, n_articles_per_user, n_comments_per_article))
# ssn.commit()
#
# # Done
# return engine, Session
#
# class User(Base):
# __tablename__ = 'u'
#
# id = Column(Integer, primary_key=True)
# name = Column(String)
# tags = Column(pg.ARRAY(String)) # ARRAY field
# age = Column(Integer)
#
# # Maps an SQL expression as a column
# age_in_10 = column_property(age + 10, deferred=True)
#
# master_id = Column(ForeignKey('u.id', ondelete='SET NULL'), nullable=True)
# master = relationship(lambda: User, remote_side=lambda: User.id, foreign_keys=master_id)
#
# @property
# def user_calculated(self):
# return self.age + 10
#
# def __repr__(self):
# return 'User(id={}, name={!r})'.format(self.id, self.name)
#
# class Article(Base):
# __tablename__ = 'a'
#
# id = Column(Integer, primary_key=True)
# uid = Column(Integer, ForeignKey(User.id))
# title = Column(String)
# theme = Column(String)
# data = Column(pg.JSON) # JSON field
#
# user = relationship(User, backref=backref('articles'))
#
# @property
# def calculated(self):
# return len(self.title) + self.uid
#
# @calculated.setter
# def calculated(self, value):
# self.title += value
#
# @hybrid_property
# def hybrid(self):
# return self.id > 10 and self.user.age > 18
#
# @hybrid.expression
# def hybrid(cls):
# return and_(cls.id > 10, cls.user.has(User.age > 18))
#
# def __repr__(self):
# return 'Article(id={}, uid={!r}, title={!r})'.format(self.id, self.uid, self.title)
. Output only the next line. | res = benchmark_parallel_funcs( |
Next line prediction: <|code_start|>That's 500% overhead!
That's alright if you load just one object. But when all we need is load a bunch of objects and immediately convert
them to JSON, that's a huge, huge overhead for no added benefit. We don't need no ORM features for this task.
Sidenote: joinedload() is somehow 20% faster than selectinload(). Surprise!
But that is probably because we didn't have many fields.
Now, @vdmit11 has suggested a crazy idea: what if we make JSON object in
Postgres? `jsonb_agg()` is what he has suggested. I've tested different ways to do it, and discovered that it really
is faster.
Using `json_agg()` is somehow 2x faster than `jsonb_agg()`, both in Postgres 9.6 and 11.5.
We can also win an additional 2x by not using `to_json()` on rows, but return tuples.
Both this techniques let us fetch the results 3.5x faster than `selectinload()`, 2.7x faster than `joinedload()`.
But this will give us tuples.
If we try to use `to_json()` and fetch keyed objects, it's more convenient, but reduces the performance improvement
to just 1.5x, which brings it close to what SqlAlchemy does.
Conclusion: forming JSON directly in Postgres can potentially speed up some queries 3x. But this is only applicable
to those queries that feed the data to JSON immediately. It's worth doing, but is rather complicated.
The problems with this approach: we'll have to make sure that `@property` fields are included into the results if
they were specified in a projection.
"""
# Init DB
<|code_end|>
. Use current file imports:
(from sqlalchemy.orm import selectinload, joinedload
from tests.benchmarks.benchmark_utils import benchmark_parallel_funcs
from tests.models import get_big_db_for_benchmarks, User, Article)
and context including class names, function names, or small code snippets from other files:
# Path: tests/benchmarks/benchmark_utils.py
# def benchmark_parallel_funcs(n_iterations, n_parts, *funcs):
# """ Run the given `funcs` test functions `n_iterations` times.
#
# Every function receives the `n` argument and is supposed to do its job `n` times in a loop.
# This is to reduce the impact of a repeated function call, and to let your tests initialize before they run.
#
# Names of those functions are used in displaying results.
# """
# return benchmark_parallel(
# n_iterations,
# n_parts,
# **{f.__name__: f for f in funcs})
#
# Path: tests/models.py
# def get_big_db_for_benchmarks(n_users, n_articles_per_user, n_comments_per_article):
# # Connect, create tables
# engine, Session = init_database(autoflush=True)
# drop_all(engine)
# create_all(engine)
#
# # Fill DB
# ssn = Session()
# ssn.begin()
# ssn.add_all(content_samples_random(n_users, n_articles_per_user, n_comments_per_article))
# ssn.commit()
#
# # Done
# return engine, Session
#
# class User(Base):
# __tablename__ = 'u'
#
# id = Column(Integer, primary_key=True)
# name = Column(String)
# tags = Column(pg.ARRAY(String)) # ARRAY field
# age = Column(Integer)
#
# # Maps an SQL expression as a column
# age_in_10 = column_property(age + 10, deferred=True)
#
# master_id = Column(ForeignKey('u.id', ondelete='SET NULL'), nullable=True)
# master = relationship(lambda: User, remote_side=lambda: User.id, foreign_keys=master_id)
#
# @property
# def user_calculated(self):
# return self.age + 10
#
# def __repr__(self):
# return 'User(id={}, name={!r})'.format(self.id, self.name)
#
# class Article(Base):
# __tablename__ = 'a'
#
# id = Column(Integer, primary_key=True)
# uid = Column(Integer, ForeignKey(User.id))
# title = Column(String)
# theme = Column(String)
# data = Column(pg.JSON) # JSON field
#
# user = relationship(User, backref=backref('articles'))
#
# @property
# def calculated(self):
# return len(self.title) + self.uid
#
# @calculated.setter
# def calculated(self, value):
# self.title += value
#
# @hybrid_property
# def hybrid(self):
# return self.id > 10 and self.user.age > 18
#
# @hybrid.expression
# def hybrid(cls):
# return and_(cls.id > 10, cls.user.has(User.age > 18))
#
# def __repr__(self):
# return 'Article(id={}, uid={!r}, title={!r})'.format(self.id, self.uid, self.title)
. Output only the next line. | engine, Session = get_big_db_for_benchmarks(n_users=100, |
Given snippet: <|code_start|>We can also win an additional 2x by not using `to_json()` on rows, but return tuples.
Both this techniques let us fetch the results 3.5x faster than `selectinload()`, 2.7x faster than `joinedload()`.
But this will give us tuples.
If we try to use `to_json()` and fetch keyed objects, it's more convenient, but reduces the performance improvement
to just 1.5x, which brings it close to what SqlAlchemy does.
Conclusion: forming JSON directly in Postgres can potentially speed up some queries 3x. But this is only applicable
to those queries that feed the data to JSON immediately. It's worth doing, but is rather complicated.
The problems with this approach: we'll have to make sure that `@property` fields are included into the results if
they were specified in a projection.
"""
# Init DB
engine, Session = get_big_db_for_benchmarks(n_users=100,
n_articles_per_user=5,
n_comments_per_article=3)
# Prepare
N_REPEATS = 500
ssn = Session()
# Tests
def test_selectinload(n):
""" Load Users+Articles+Comments, with selectinload() """
for i in range(n):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from sqlalchemy.orm import selectinload, joinedload
from tests.benchmarks.benchmark_utils import benchmark_parallel_funcs
from tests.models import get_big_db_for_benchmarks, User, Article
and context:
# Path: tests/benchmarks/benchmark_utils.py
# def benchmark_parallel_funcs(n_iterations, n_parts, *funcs):
# """ Run the given `funcs` test functions `n_iterations` times.
#
# Every function receives the `n` argument and is supposed to do its job `n` times in a loop.
# This is to reduce the impact of a repeated function call, and to let your tests initialize before they run.
#
# Names of those functions are used in displaying results.
# """
# return benchmark_parallel(
# n_iterations,
# n_parts,
# **{f.__name__: f for f in funcs})
#
# Path: tests/models.py
# def get_big_db_for_benchmarks(n_users, n_articles_per_user, n_comments_per_article):
# # Connect, create tables
# engine, Session = init_database(autoflush=True)
# drop_all(engine)
# create_all(engine)
#
# # Fill DB
# ssn = Session()
# ssn.begin()
# ssn.add_all(content_samples_random(n_users, n_articles_per_user, n_comments_per_article))
# ssn.commit()
#
# # Done
# return engine, Session
#
# class User(Base):
# __tablename__ = 'u'
#
# id = Column(Integer, primary_key=True)
# name = Column(String)
# tags = Column(pg.ARRAY(String)) # ARRAY field
# age = Column(Integer)
#
# # Maps an SQL expression as a column
# age_in_10 = column_property(age + 10, deferred=True)
#
# master_id = Column(ForeignKey('u.id', ondelete='SET NULL'), nullable=True)
# master = relationship(lambda: User, remote_side=lambda: User.id, foreign_keys=master_id)
#
# @property
# def user_calculated(self):
# return self.age + 10
#
# def __repr__(self):
# return 'User(id={}, name={!r})'.format(self.id, self.name)
#
# class Article(Base):
# __tablename__ = 'a'
#
# id = Column(Integer, primary_key=True)
# uid = Column(Integer, ForeignKey(User.id))
# title = Column(String)
# theme = Column(String)
# data = Column(pg.JSON) # JSON field
#
# user = relationship(User, backref=backref('articles'))
#
# @property
# def calculated(self):
# return len(self.title) + self.uid
#
# @calculated.setter
# def calculated(self, value):
# self.title += value
#
# @hybrid_property
# def hybrid(self):
# return self.id > 10 and self.user.age > 18
#
# @hybrid.expression
# def hybrid(cls):
# return and_(cls.id > 10, cls.user.has(User.age > 18))
#
# def __repr__(self):
# return 'Article(id={}, uid={!r}, title={!r})'.format(self.id, self.uid, self.title)
which might include code, classes, or functions. Output only the next line. | users = ssn.query(User).options( |
Here is a snippet: <|code_start|>Both this techniques let us fetch the results 3.5x faster than `selectinload()`, 2.7x faster than `joinedload()`.
But this will give us tuples.
If we try to use `to_json()` and fetch keyed objects, it's more convenient, but reduces the performance improvement
to just 1.5x, which brings it close to what SqlAlchemy does.
Conclusion: forming JSON directly in Postgres can potentially speed up some queries 3x. But this is only applicable
to those queries that feed the data to JSON immediately. It's worth doing, but is rather complicated.
The problems with this approach: we'll have to make sure that `@property` fields are included into the results if
they were specified in a projection.
"""
# Init DB
engine, Session = get_big_db_for_benchmarks(n_users=100,
n_articles_per_user=5,
n_comments_per_article=3)
# Prepare
N_REPEATS = 500
ssn = Session()
# Tests
def test_selectinload(n):
""" Load Users+Articles+Comments, with selectinload() """
for i in range(n):
users = ssn.query(User).options(
<|code_end|>
. Write the next line using the current file imports:
from sqlalchemy.orm import selectinload, joinedload
from tests.benchmarks.benchmark_utils import benchmark_parallel_funcs
from tests.models import get_big_db_for_benchmarks, User, Article
and context from other files:
# Path: tests/benchmarks/benchmark_utils.py
# def benchmark_parallel_funcs(n_iterations, n_parts, *funcs):
# """ Run the given `funcs` test functions `n_iterations` times.
#
# Every function receives the `n` argument and is supposed to do its job `n` times in a loop.
# This is to reduce the impact of a repeated function call, and to let your tests initialize before they run.
#
# Names of those functions are used in displaying results.
# """
# return benchmark_parallel(
# n_iterations,
# n_parts,
# **{f.__name__: f for f in funcs})
#
# Path: tests/models.py
# def get_big_db_for_benchmarks(n_users, n_articles_per_user, n_comments_per_article):
# # Connect, create tables
# engine, Session = init_database(autoflush=True)
# drop_all(engine)
# create_all(engine)
#
# # Fill DB
# ssn = Session()
# ssn.begin()
# ssn.add_all(content_samples_random(n_users, n_articles_per_user, n_comments_per_article))
# ssn.commit()
#
# # Done
# return engine, Session
#
# class User(Base):
# __tablename__ = 'u'
#
# id = Column(Integer, primary_key=True)
# name = Column(String)
# tags = Column(pg.ARRAY(String)) # ARRAY field
# age = Column(Integer)
#
# # Maps an SQL expression as a column
# age_in_10 = column_property(age + 10, deferred=True)
#
# master_id = Column(ForeignKey('u.id', ondelete='SET NULL'), nullable=True)
# master = relationship(lambda: User, remote_side=lambda: User.id, foreign_keys=master_id)
#
# @property
# def user_calculated(self):
# return self.age + 10
#
# def __repr__(self):
# return 'User(id={}, name={!r})'.format(self.id, self.name)
#
# class Article(Base):
# __tablename__ = 'a'
#
# id = Column(Integer, primary_key=True)
# uid = Column(Integer, ForeignKey(User.id))
# title = Column(String)
# theme = Column(String)
# data = Column(pg.JSON) # JSON field
#
# user = relationship(User, backref=backref('articles'))
#
# @property
# def calculated(self):
# return len(self.title) + self.uid
#
# @calculated.setter
# def calculated(self, value):
# self.title += value
#
# @hybrid_property
# def hybrid(self):
# return self.id > 10 and self.user.age > 18
#
# @hybrid.expression
# def hybrid(cls):
# return and_(cls.id > 10, cls.user.has(User.age > 18))
#
# def __repr__(self):
# return 'Article(id={}, uid={!r}, title={!r})'.format(self.id, self.uid, self.title)
, which may include functions, classes, or code. Output only the next line. | selectinload(User.articles).selectinload(Article.comments) |
Given snippet: <|code_start|> return "".join(
[
"\\begin{tikzpicture}[scale=.3,baseline=(current bounding box.center)]",
f"\n{tab}\\foreach \\x in {{1,...,{n}}} {{\n{tab*2}",
f"\\draw[ultra thin] (\\x,0)--(\\x,{n+1}); %vline\n{tab*2}",
f"\\draw[ultra thin] (0,\\x)--({n+1},\\x); %hline\n{tab}}}\n{tab}",
f"\n{tab}".join(
f"\\draw[fill=black] ({idx + 1},{val + 1}) circle (5pt);"
for (idx, val) in enumerate(self)
),
"\n\\end{tikzpicture}",
]
)
def containment_to_tikz(self, pattern: "Perm") -> Iterator[str]:
"""Return the tikz picture of the pattern within self."""
return (self._pattern_to_tikz(occ) for occ in self.occurrences_of(pattern))
def _pattern_to_tikz(self, occurrence: Tuple[int, ...]) -> str:
init = self.to_tikz()
init = init[0 : init.rfind("\\end{tikzpicture}")]
reds = "\n".join(
f" \\draw[red] ({idx + 1},{self[idx] + 1}) circle (10pt);"
for idx in occurrence
)
return f"{init}{reds}\n\\end{{tikzpicture}}"
def show(self, scale: float = 1.0) -> None:
"""Open a browser tab and display permutation graphically. Image can be
enlarged with scale parameter"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import bisect
import collections
import functools
import itertools
import math
import numbers
import operator
import random
from typing import (
TYPE_CHECKING,
Callable,
Deque,
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from permuta.misc import HTMLViewer
from permuta.misc.math import is_prime
from .patt import Patt
and context:
# Path: permuta/misc/display.py
# class HTMLViewer:
# """A class for opening html text in browser."""
#
# _THREAD_WAIT_TIME: ClassVar[float] = 5 # seconds
#
# @staticmethod
# def _remove_file_thread(fname: str) -> None:
# time.sleep(HTMLViewer._THREAD_WAIT_TIME)
# if os.path.exists(fname):
# os.remove(fname)
#
# @staticmethod
# def _remove_file(fname: str) -> None:
# threading.Thread(target=HTMLViewer._remove_file_thread, args=(fname,)).start()
#
# @staticmethod
# def open_html(html: str) -> None:
# """Open and render html string in browser."""
# with tempfile.NamedTemporaryFile(
# "r+", suffix=".html", delete=False
# ) as html_file:
# html_file.write(html)
# webbrowser.open_new_tab(f"file://{html_file.name}")
# HTMLViewer._remove_file(html_file.name)
#
# @staticmethod
# def open_svg(svg: str) -> None:
# """Open and render svg image string in browser."""
# HTMLViewer.open_html(f"<html><body>{svg}</body></html>")
#
# Path: permuta/misc/math.py
# def is_prime(n: int) -> bool:
# """Primality test using 6k+-1 optimization."""
# if n <= 3:
# return n > 1
# if n % 2 == 0 or n % 3 == 0:
# return False
# i = 5
# while i ** 2 <= n:
# if n % i == 0 or n % (i + 2) == 0:
# return False
# i += 6
# return True
#
# Path: permuta/patterns/patt.py
# class Patt(abc.ABC):
# """A permutation pattern, e.g. classical, bivincular and mesh patterns."""
#
# def avoided_by(self, *patts: "Patt") -> bool:
# """Check if self is avoided by all the provided patterns."""
# return all(self not in patt for patt in patts)
#
# def contained_in(self, *patts: "Patt") -> bool:
# """Check if self is a pattern of all the provided patterns."""
# return all(self in patt for patt in patts)
#
# def count_occurrences_in(self, patt: "Patt") -> int:
# """Count the number of occurrences of self in the pattern."""
# return sum(1 for _ in self.occurrences_in(patt))
#
# @abc.abstractmethod
# def occurrences_in(
# self, patt: "Patt", *args, **kwargs
# ) -> Iterator[Tuple[int, ...]]:
# """Find all indices of occurrences of self in pattern."""
#
# @abc.abstractmethod
# def __len__(self) -> int:
# """The length of the pattern."""
#
# @abc.abstractmethod
# def get_perm(self) -> "Perm":
# """Get the permutation part of the pattern"""
#
# @abc.abstractmethod
# def __contains__(self, patt: object) -> bool:
# """Does pattern contains another?"""
which might include code, classes, or functions. Output only the next line. | HTMLViewer.open_svg(self.to_svg(image_scale=scale)) |
Given the following code snippet before the placeholder: <|code_start|> def pinnacle_set(self) -> List[int]:
"""Return the pinnacle set of self.
See: https://arxiv.org/abs/2105.10388
https://arxiv.org/abs/1704.05494
https://arxiv.org/abs/2001.07325
Examples:
>>> Perm((5, 3, 4, 0, 2, 1)).pinnacle_set()
[4, 2]
>>> Perm((1, 2, 0)).pinnacle_set()
[2]
>>> Perm((2, 1, 0)).pinnacle_set()
[]
"""
return list(self.pinnacles())
def count_column_sum_primes(self) -> int:
"""Returns the number of primes in the column sums of the two line notation
of a permutation.
See: https://www.findstat.org/StatisticsDatabase/St001285/
https://arxiv.org/abs/1809.01012
Examples:
>>> Perm((0,)).count_column_sum_primes()
1
>>> Perm((0, 1)).count_column_sum_primes()
1
>>> Perm((1, 0)).count_column_sum_primes()
2
"""
<|code_end|>
, predict the next line using imports from the current file:
import bisect
import collections
import functools
import itertools
import math
import numbers
import operator
import random
from typing import (
TYPE_CHECKING,
Callable,
Deque,
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from permuta.misc import HTMLViewer
from permuta.misc.math import is_prime
from .patt import Patt
and context including class names, function names, and sometimes code from other files:
# Path: permuta/misc/display.py
# class HTMLViewer:
# """A class for opening html text in browser."""
#
# _THREAD_WAIT_TIME: ClassVar[float] = 5 # seconds
#
# @staticmethod
# def _remove_file_thread(fname: str) -> None:
# time.sleep(HTMLViewer._THREAD_WAIT_TIME)
# if os.path.exists(fname):
# os.remove(fname)
#
# @staticmethod
# def _remove_file(fname: str) -> None:
# threading.Thread(target=HTMLViewer._remove_file_thread, args=(fname,)).start()
#
# @staticmethod
# def open_html(html: str) -> None:
# """Open and render html string in browser."""
# with tempfile.NamedTemporaryFile(
# "r+", suffix=".html", delete=False
# ) as html_file:
# html_file.write(html)
# webbrowser.open_new_tab(f"file://{html_file.name}")
# HTMLViewer._remove_file(html_file.name)
#
# @staticmethod
# def open_svg(svg: str) -> None:
# """Open and render svg image string in browser."""
# HTMLViewer.open_html(f"<html><body>{svg}</body></html>")
#
# Path: permuta/misc/math.py
# def is_prime(n: int) -> bool:
# """Primality test using 6k+-1 optimization."""
# if n <= 3:
# return n > 1
# if n % 2 == 0 or n % 3 == 0:
# return False
# i = 5
# while i ** 2 <= n:
# if n % i == 0 or n % (i + 2) == 0:
# return False
# i += 6
# return True
#
# Path: permuta/patterns/patt.py
# class Patt(abc.ABC):
# """A permutation pattern, e.g. classical, bivincular and mesh patterns."""
#
# def avoided_by(self, *patts: "Patt") -> bool:
# """Check if self is avoided by all the provided patterns."""
# return all(self not in patt for patt in patts)
#
# def contained_in(self, *patts: "Patt") -> bool:
# """Check if self is a pattern of all the provided patterns."""
# return all(self in patt for patt in patts)
#
# def count_occurrences_in(self, patt: "Patt") -> int:
# """Count the number of occurrences of self in the pattern."""
# return sum(1 for _ in self.occurrences_in(patt))
#
# @abc.abstractmethod
# def occurrences_in(
# self, patt: "Patt", *args, **kwargs
# ) -> Iterator[Tuple[int, ...]]:
# """Find all indices of occurrences of self in pattern."""
#
# @abc.abstractmethod
# def __len__(self) -> int:
# """The length of the pattern."""
#
# @abc.abstractmethod
# def get_perm(self) -> "Perm":
# """Get the permutation part of the pattern"""
#
# @abc.abstractmethod
# def __contains__(self, patt: object) -> bool:
# """Does pattern contains another?"""
. Output only the next line. | return sum(1 for idx, val in enumerate(self) if is_prime(val + idx + 2)) |
Given the code snippet: <|code_start|># pylint: disable=super-init-not-called
# pylint: disable=too-many-lines
# pylint: disable=too-many-public-methods
__all__ = ("Perm",)
# Remove when pypy is 3.7 compatible andn replace TupleType with Tuple[int]
if TYPE_CHECKING:
TupleType = Tuple[int]
else:
TupleType = tuple
ApplyType = TypeVar("ApplyType")
<|code_end|>
, generate the next line using the imports in this file:
import bisect
import collections
import functools
import itertools
import math
import numbers
import operator
import random
from typing import (
TYPE_CHECKING,
Callable,
Deque,
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from permuta.misc import HTMLViewer
from permuta.misc.math import is_prime
from .patt import Patt
and context (functions, classes, or occasionally code) from other files:
# Path: permuta/misc/display.py
# class HTMLViewer:
# """A class for opening html text in browser."""
#
# _THREAD_WAIT_TIME: ClassVar[float] = 5 # seconds
#
# @staticmethod
# def _remove_file_thread(fname: str) -> None:
# time.sleep(HTMLViewer._THREAD_WAIT_TIME)
# if os.path.exists(fname):
# os.remove(fname)
#
# @staticmethod
# def _remove_file(fname: str) -> None:
# threading.Thread(target=HTMLViewer._remove_file_thread, args=(fname,)).start()
#
# @staticmethod
# def open_html(html: str) -> None:
# """Open and render html string in browser."""
# with tempfile.NamedTemporaryFile(
# "r+", suffix=".html", delete=False
# ) as html_file:
# html_file.write(html)
# webbrowser.open_new_tab(f"file://{html_file.name}")
# HTMLViewer._remove_file(html_file.name)
#
# @staticmethod
# def open_svg(svg: str) -> None:
# """Open and render svg image string in browser."""
# HTMLViewer.open_html(f"<html><body>{svg}</body></html>")
#
# Path: permuta/misc/math.py
# def is_prime(n: int) -> bool:
# """Primality test using 6k+-1 optimization."""
# if n <= 3:
# return n > 1
# if n % 2 == 0 or n % 3 == 0:
# return False
# i = 5
# while i ** 2 <= n:
# if n % i == 0 or n % (i + 2) == 0:
# return False
# i += 6
# return True
#
# Path: permuta/patterns/patt.py
# class Patt(abc.ABC):
# """A permutation pattern, e.g. classical, bivincular and mesh patterns."""
#
# def avoided_by(self, *patts: "Patt") -> bool:
# """Check if self is avoided by all the provided patterns."""
# return all(self not in patt for patt in patts)
#
# def contained_in(self, *patts: "Patt") -> bool:
# """Check if self is a pattern of all the provided patterns."""
# return all(self in patt for patt in patts)
#
# def count_occurrences_in(self, patt: "Patt") -> int:
# """Count the number of occurrences of self in the pattern."""
# return sum(1 for _ in self.occurrences_in(patt))
#
# @abc.abstractmethod
# def occurrences_in(
# self, patt: "Patt", *args, **kwargs
# ) -> Iterator[Tuple[int, ...]]:
# """Find all indices of occurrences of self in pattern."""
#
# @abc.abstractmethod
# def __len__(self) -> int:
# """The length of the pattern."""
#
# @abc.abstractmethod
# def get_perm(self) -> "Perm":
# """Get the permutation part of the pattern"""
#
# @abc.abstractmethod
# def __contains__(self, patt: object) -> bool:
# """Does pattern contains another?"""
. Output only the next line. | class Perm(TupleType, Patt): |
Using the snippet: <|code_start|>
class InsertionEncodingStrategy(EnumerationStrategy):
"""Enumeration strategies related to the insertion encoding."""
def applies(self) -> bool:
return InsertionEncodablePerms.is_insertion_encodable(
self.basis
) or InsertionEncodablePerms.is_insertion_encodable(
<|code_end|>
, determine the next line of code. You have imports:
from permuta.enumeration_strategies.abstract_strategy import EnumerationStrategy
from permuta.permutils.insertion_encodable import InsertionEncodablePerms
from permuta.permutils.symmetry import rotate_90_clockwise_set
and context (class names, function names, or code) available:
# Path: permuta/enumeration_strategies/abstract_strategy.py
# class EnumerationStrategy(ABC):
# """Abstract class for a strategy to enumerate a permutation classes."""
#
# def __init__(self, basis: Iterable[Perm]) -> None:
# self._basis = frozenset(basis)
#
# @property
# def basis(self) -> FrozenSet[Perm]:
# """Getter for basis."""
# return self._basis
#
# @classmethod
# def reference(cls) -> str:
# """A reference for the strategy."""
# raise NotImplementedError
#
# @abstractmethod
# def applies(self) -> bool:
# """Return True if the strategy can be used for the basis."""
#
# Path: permuta/permutils/insertion_encodable.py
# class InsertionEncodablePerms:
# """A static container of functions fortesting
# if a basis has a regular insertion encoding.
# """
#
# _ALL_PROPERTIES: ClassVar[int] = 15
# _CACHE: ClassVar[Dict[Tuple, int]] = {}
#
# @staticmethod
# def _is_incr_next_incr(perm: Perm) -> bool:
# n = len(perm)
# return not any(
# curr < prev and any(perm[j + 1] < perm[j] for j in range(i + 1, n - 1))
# for i, (prev, curr) in enumerate(zip(perm, islice(perm, 1, None)))
# )
#
# @staticmethod
# def _is_incr_next_decr(perm: Perm) -> bool:
# n = len(perm)
# return not any(
# curr < prev and any(perm[j + 1] > perm[j] for j in range(i + 1, n - 1))
# for i, (prev, curr) in enumerate(zip(perm, islice(perm, 1, None)))
# )
#
# @staticmethod
# def _is_decr_next_incr(perm: Perm) -> bool:
# n = len(perm)
# return not any(
# curr > prev and any(perm[j + 1] < perm[j] for j in range(i + 1, n - 1))
# for i, (prev, curr) in enumerate(zip(perm, islice(perm, 1, None)))
# )
#
# @staticmethod
# def _is_decr_next_decr(perm: Perm) -> bool:
# n = len(perm)
# return not any(
# curr > prev and any(perm[j + 1] > perm[j] for j in range(i + 1, n - 1))
# for i, (prev, curr) in enumerate(zip(perm, islice(perm, 1, None)))
# )
#
# @staticmethod
# def _insertion_encodable_properties(perm: Perm) -> int:
# properties = InsertionEncodablePerms._CACHE.get(perm, -1)
# if properties < 0:
# properties = sum(
# val << shift
# for shift, val in enumerate(
# (
# InsertionEncodablePerms._is_incr_next_decr(perm),
# InsertionEncodablePerms._is_incr_next_incr(perm),
# InsertionEncodablePerms._is_decr_next_decr(perm),
# InsertionEncodablePerms._is_decr_next_incr(perm),
# )
# )
# )
# InsertionEncodablePerms._CACHE[perm] = properties
# return properties
#
# @staticmethod
# def is_insertion_encodable_rightmost(basis: Iterable[Perm]) -> bool:
# """Check if basis is insertion encodable by rightmost."""
# curr = 0
# for perm in basis:
# curr = curr | InsertionEncodablePerms._insertion_encodable_properties(perm)
# if curr == InsertionEncodablePerms._ALL_PROPERTIES:
# return True
# return False
#
# @staticmethod
# def is_insertion_encodable_maximum(basis: Iterable[Perm]) -> bool:
# """Check if basis is insertion encodable by maximum."""
# curr = 0
# for perm in basis:
# curr = curr | InsertionEncodablePerms._insertion_encodable_properties(
# perm.rotate()
# )
# if curr == InsertionEncodablePerms._ALL_PROPERTIES:
# return True
# return False
#
# @staticmethod
# def is_insertion_encodable(basis: Iterable[Perm]) -> bool:
# """Check if basis is insertion encodable."""
# return InsertionEncodablePerms.is_insertion_encodable_rightmost(
# basis
# ) or InsertionEncodablePerms.is_insertion_encodable_maximum(basis)
#
# Path: permuta/permutils/symmetry.py
# def rotate_90_clockwise_set(perms: Iterable[Perm]) -> Iterator[Perm]:
# """Rotate all perms by 90 degrees."""
# return (perm.rotate() for perm in perms)
. Output only the next line. | rotate_90_clockwise_set(self.basis) |
Given the following code snippet before the placeholder: <|code_start|>
Returns keywords, literals, strings, numbers, arrays and dictionaries.
Arrays and dictionaries are represented as Python lists and dictionaries.
"""
while not self.results:
(pos, token) = self.nexttoken()
#print((pos,token), (self.curtype, self.curstack))
if isinstance(token, (int, float, bool, bytes, PSLiteral)):
# normal token
self.push((pos, token))
elif token == KEYWORD_ARRAY_BEGIN:
# begin array
self.start_type(pos, 'a')
elif token == KEYWORD_ARRAY_END:
# end array
try:
self.push(self.end_type('a'))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_DICT_BEGIN:
# begin dictionary
self.start_type(pos, 'd')
elif token == KEYWORD_DICT_END:
# end dictionary
try:
(pos, objs) = self.end_type('d')
if len(objs) % 2 != 0:
raise PSSyntaxError('Invalid dictionary construct: %r' % (objs,))
# construct a Python dictionary.
<|code_end|>
, predict the next line using imports from the current file:
import re
import logging
import unittest
from .utils import choplist
from io import BytesIO
from io import BytesIO
and context including class names, function names, and sometimes code from other files:
# Path: pdfminer/utils.py
# def choplist(n, seq):
# """Groups every n elements of the list."""
# r = []
# for x in seq:
# r.append(x)
# if len(r) == n:
# yield tuple(r)
# r = []
# return
. Output only the next line. | d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None) |
Based on the snippet: <|code_start|> def do_tag(self, tag, props=None):
return
def begin_page(self, page, ctm):
return
def end_page(self, page):
return
def begin_figure(self, name, bbox, matrix):
return
def end_figure(self, name):
return
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
return
def render_image(self, name, stream):
return
def render_string(self, textstate, seq):
return
## PDFTextDevice
##
class PDFTextDevice(PDFDevice):
def render_string(self, textstate, seq):
<|code_end|>
, predict the immediate next line with the help of imports:
from .utils import mult_matrix
from .utils import translate_matrix
from .utils import q
from .utils import bbox2str
from .utils import isnumber
from .pdffont import PDFUnicodeNotDefined
and context (classes, functions, sometimes code) from other files:
# Path: pdfminer/utils.py
# def mult_matrix(m1, m0):
# (a1, b1, c1, d1, e1, f1) = m1
# (a0, b0, c0, d0, e0, f0) = m0
# """Returns the multiplication of two matrices."""
# return (a0*a1+c0*b1, b0*a1+d0*b1,
# a0*c1+c0*d1, b0*c1+d0*d1,
# a0*e1+c0*f1+e0, b0*e1+d0*f1+f0)
#
# Path: pdfminer/utils.py
# def translate_matrix(m, v):
# """Translates a matrix by (x, y)."""
# (a, b, c, d, e, f) = m
# (x, y) = v
# return (a, b, c, d, x*a+y*c+e, x*b+y*d+f)
#
# Path: pdfminer/utils.py
# def q(s):
# """Quotes html string."""
# return (s.replace('&','&')
# .replace('<','<')
# .replace('>','>')
# .replace('"','"'))
#
# Path: pdfminer/utils.py
# def bbox2str(bbox):
# (x0, y0, x1, y1) = bbox
# return '%.3f,%.3f,%.3f,%.3f' % (x0, y0, x1, y1)
#
# Path: pdfminer/utils.py
# def isnumber(x):
# return isinstance(x, (int, float))
#
# Path: pdfminer/pdffont.py
# class PDFUnicodeNotDefined(PDFFontError):
# pass
. Output only the next line. | matrix = mult_matrix(textstate.matrix, self.ctm) |
Next line prediction: <|code_start|> fontsize = textstate.fontsize
scaling = textstate.scaling * .01
charspace = textstate.charspace * scaling
wordspace = textstate.wordspace * scaling
rise = textstate.rise
if font.is_multibyte():
wordspace = 0
dxscale = .001 * fontsize * scaling
if font.is_vertical():
textstate.linematrix = self.render_string_vertical(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
else:
textstate.linematrix = self.render_string_horizontal(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
return
def render_string_horizontal(self, seq, matrix, pos,
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
(x, y) = pos
needcharspace = False
for obj in seq:
if isnumber(obj):
x -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
x += charspace
<|code_end|>
. Use current file imports:
(from .utils import mult_matrix
from .utils import translate_matrix
from .utils import q
from .utils import bbox2str
from .utils import isnumber
from .pdffont import PDFUnicodeNotDefined)
and context including class names, function names, or small code snippets from other files:
# Path: pdfminer/utils.py
# def mult_matrix(m1, m0):
# (a1, b1, c1, d1, e1, f1) = m1
# (a0, b0, c0, d0, e0, f0) = m0
# """Returns the multiplication of two matrices."""
# return (a0*a1+c0*b1, b0*a1+d0*b1,
# a0*c1+c0*d1, b0*c1+d0*d1,
# a0*e1+c0*f1+e0, b0*e1+d0*f1+f0)
#
# Path: pdfminer/utils.py
# def translate_matrix(m, v):
# """Translates a matrix by (x, y)."""
# (a, b, c, d, e, f) = m
# (x, y) = v
# return (a, b, c, d, x*a+y*c+e, x*b+y*d+f)
#
# Path: pdfminer/utils.py
# def q(s):
# """Quotes html string."""
# return (s.replace('&','&')
# .replace('<','<')
# .replace('>','>')
# .replace('"','"'))
#
# Path: pdfminer/utils.py
# def bbox2str(bbox):
# (x0, y0, x1, y1) = bbox
# return '%.3f,%.3f,%.3f,%.3f' % (x0, y0, x1, y1)
#
# Path: pdfminer/utils.py
# def isnumber(x):
# return isinstance(x, (int, float))
#
# Path: pdfminer/pdffont.py
# class PDFUnicodeNotDefined(PDFFontError):
# pass
. Output only the next line. | x += self.render_char(translate_matrix(matrix, (x, y)), |
Next line prediction: <|code_start|> return (x, y)
def render_char(self, matrix, font, fontsize, scaling, rise, cid):
return 0
## TagExtractor
##
class TagExtractor(PDFDevice):
def __init__(self, rsrcmgr, outfp):
PDFDevice.__init__(self, rsrcmgr)
self.outfp = outfp
self.pageno = 0
self._stack = []
return
def render_string(self, textstate, seq):
font = textstate.font
text = ''
for obj in seq:
if not isinstance(obj, bytes):
continue
chars = font.decode(obj)
for cid in chars:
try:
char = font.to_unichr(cid)
text += char
except PDFUnicodeNotDefined:
pass
<|code_end|>
. Use current file imports:
(from .utils import mult_matrix
from .utils import translate_matrix
from .utils import q
from .utils import bbox2str
from .utils import isnumber
from .pdffont import PDFUnicodeNotDefined)
and context including class names, function names, or small code snippets from other files:
# Path: pdfminer/utils.py
# def mult_matrix(m1, m0):
# (a1, b1, c1, d1, e1, f1) = m1
# (a0, b0, c0, d0, e0, f0) = m0
# """Returns the multiplication of two matrices."""
# return (a0*a1+c0*b1, b0*a1+d0*b1,
# a0*c1+c0*d1, b0*c1+d0*d1,
# a0*e1+c0*f1+e0, b0*e1+d0*f1+f0)
#
# Path: pdfminer/utils.py
# def translate_matrix(m, v):
# """Translates a matrix by (x, y)."""
# (a, b, c, d, e, f) = m
# (x, y) = v
# return (a, b, c, d, x*a+y*c+e, x*b+y*d+f)
#
# Path: pdfminer/utils.py
# def q(s):
# """Quotes html string."""
# return (s.replace('&','&')
# .replace('<','<')
# .replace('>','>')
# .replace('"','"'))
#
# Path: pdfminer/utils.py
# def bbox2str(bbox):
# (x0, y0, x1, y1) = bbox
# return '%.3f,%.3f,%.3f,%.3f' % (x0, y0, x1, y1)
#
# Path: pdfminer/utils.py
# def isnumber(x):
# return isinstance(x, (int, float))
#
# Path: pdfminer/pdffont.py
# class PDFUnicodeNotDefined(PDFFontError):
# pass
. Output only the next line. | self.outfp.write(q(text)) |
Given snippet: <|code_start|>
## TagExtractor
##
class TagExtractor(PDFDevice):
def __init__(self, rsrcmgr, outfp):
PDFDevice.__init__(self, rsrcmgr)
self.outfp = outfp
self.pageno = 0
self._stack = []
return
def render_string(self, textstate, seq):
font = textstate.font
text = ''
for obj in seq:
if not isinstance(obj, bytes):
continue
chars = font.decode(obj)
for cid in chars:
try:
char = font.to_unichr(cid)
text += char
except PDFUnicodeNotDefined:
pass
self.outfp.write(q(text))
return
def begin_page(self, page, ctm):
self.outfp.write('<page id="%s" bbox="%s" rotate="%d">' %
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .utils import mult_matrix
from .utils import translate_matrix
from .utils import q
from .utils import bbox2str
from .utils import isnumber
from .pdffont import PDFUnicodeNotDefined
and context:
# Path: pdfminer/utils.py
# def mult_matrix(m1, m0):
# (a1, b1, c1, d1, e1, f1) = m1
# (a0, b0, c0, d0, e0, f0) = m0
# """Returns the multiplication of two matrices."""
# return (a0*a1+c0*b1, b0*a1+d0*b1,
# a0*c1+c0*d1, b0*c1+d0*d1,
# a0*e1+c0*f1+e0, b0*e1+d0*f1+f0)
#
# Path: pdfminer/utils.py
# def translate_matrix(m, v):
# """Translates a matrix by (x, y)."""
# (a, b, c, d, e, f) = m
# (x, y) = v
# return (a, b, c, d, x*a+y*c+e, x*b+y*d+f)
#
# Path: pdfminer/utils.py
# def q(s):
# """Quotes html string."""
# return (s.replace('&','&')
# .replace('<','<')
# .replace('>','>')
# .replace('"','"'))
#
# Path: pdfminer/utils.py
# def bbox2str(bbox):
# (x0, y0, x1, y1) = bbox
# return '%.3f,%.3f,%.3f,%.3f' % (x0, y0, x1, y1)
#
# Path: pdfminer/utils.py
# def isnumber(x):
# return isinstance(x, (int, float))
#
# Path: pdfminer/pdffont.py
# class PDFUnicodeNotDefined(PDFFontError):
# pass
which might include code, classes, or functions. Output only the next line. | (self.pageno, bbox2str(page.mediabox), page.rotate)) |
Using the snippet: <|code_start|>## PDFTextDevice
##
class PDFTextDevice(PDFDevice):
def render_string(self, textstate, seq):
matrix = mult_matrix(textstate.matrix, self.ctm)
font = textstate.font
fontsize = textstate.fontsize
scaling = textstate.scaling * .01
charspace = textstate.charspace * scaling
wordspace = textstate.wordspace * scaling
rise = textstate.rise
if font.is_multibyte():
wordspace = 0
dxscale = .001 * fontsize * scaling
if font.is_vertical():
textstate.linematrix = self.render_string_vertical(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
else:
textstate.linematrix = self.render_string_horizontal(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
return
def render_string_horizontal(self, seq, matrix, pos,
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
(x, y) = pos
needcharspace = False
for obj in seq:
<|code_end|>
, determine the next line of code. You have imports:
from .utils import mult_matrix
from .utils import translate_matrix
from .utils import q
from .utils import bbox2str
from .utils import isnumber
from .pdffont import PDFUnicodeNotDefined
and context (class names, function names, or code) available:
# Path: pdfminer/utils.py
# def mult_matrix(m1, m0):
# (a1, b1, c1, d1, e1, f1) = m1
# (a0, b0, c0, d0, e0, f0) = m0
# """Returns the multiplication of two matrices."""
# return (a0*a1+c0*b1, b0*a1+d0*b1,
# a0*c1+c0*d1, b0*c1+d0*d1,
# a0*e1+c0*f1+e0, b0*e1+d0*f1+f0)
#
# Path: pdfminer/utils.py
# def translate_matrix(m, v):
# """Translates a matrix by (x, y)."""
# (a, b, c, d, e, f) = m
# (x, y) = v
# return (a, b, c, d, x*a+y*c+e, x*b+y*d+f)
#
# Path: pdfminer/utils.py
# def q(s):
# """Quotes html string."""
# return (s.replace('&','&')
# .replace('<','<')
# .replace('>','>')
# .replace('"','"'))
#
# Path: pdfminer/utils.py
# def bbox2str(bbox):
# (x0, y0, x1, y1) = bbox
# return '%.3f,%.3f,%.3f,%.3f' % (x0, y0, x1, y1)
#
# Path: pdfminer/utils.py
# def isnumber(x):
# return isinstance(x, (int, float))
#
# Path: pdfminer/pdffont.py
# class PDFUnicodeNotDefined(PDFFontError):
# pass
. Output only the next line. | if isnumber(obj): |
Predict the next line after this snippet: <|code_start|> y += wordspace
needcharspace = True
return (x, y)
def render_char(self, matrix, font, fontsize, scaling, rise, cid):
return 0
## TagExtractor
##
class TagExtractor(PDFDevice):
def __init__(self, rsrcmgr, outfp):
PDFDevice.__init__(self, rsrcmgr)
self.outfp = outfp
self.pageno = 0
self._stack = []
return
def render_string(self, textstate, seq):
font = textstate.font
text = ''
for obj in seq:
if not isinstance(obj, bytes):
continue
chars = font.decode(obj)
for cid in chars:
try:
char = font.to_unichr(cid)
text += char
<|code_end|>
using the current file's imports:
from .utils import mult_matrix
from .utils import translate_matrix
from .utils import q
from .utils import bbox2str
from .utils import isnumber
from .pdffont import PDFUnicodeNotDefined
and any relevant context from other files:
# Path: pdfminer/utils.py
# def mult_matrix(m1, m0):
# (a1, b1, c1, d1, e1, f1) = m1
# (a0, b0, c0, d0, e0, f0) = m0
# """Returns the multiplication of two matrices."""
# return (a0*a1+c0*b1, b0*a1+d0*b1,
# a0*c1+c0*d1, b0*c1+d0*d1,
# a0*e1+c0*f1+e0, b0*e1+d0*f1+f0)
#
# Path: pdfminer/utils.py
# def translate_matrix(m, v):
# """Translates a matrix by (x, y)."""
# (a, b, c, d, e, f) = m
# (x, y) = v
# return (a, b, c, d, x*a+y*c+e, x*b+y*d+f)
#
# Path: pdfminer/utils.py
# def q(s):
# """Quotes html string."""
# return (s.replace('&','&')
# .replace('<','<')
# .replace('>','>')
# .replace('"','"'))
#
# Path: pdfminer/utils.py
# def bbox2str(bbox):
# (x0, y0, x1, y1) = bbox
# return '%.3f,%.3f,%.3f,%.3f' % (x0, y0, x1, y1)
#
# Path: pdfminer/utils.py
# def isnumber(x):
# return isinstance(x, (int, float))
#
# Path: pdfminer/pdffont.py
# class PDFUnicodeNotDefined(PDFFontError):
# pass
. Output only the next line. | except PDFUnicodeNotDefined: |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
# main
def main(argv):
def usage():
print('usage: %s [-c enc=codec] output_dir regname [cid2code.txt ...]' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'c:')
except getopt.GetoptError:
return usage()
enc2codec = {}
for (k, v) in opts:
if k == '-c':
(enc,_,codec) = v.partition('=')
enc2codec[enc] = codec
if not args: return usage()
outdir = args.pop(0)
if not args: return usage()
regname = args.pop(0)
<|code_end|>
with the help of current file imports:
import sys
import getopt
from pdfminer.cmapdb import convert_cmap
and context from other files:
# Path: pdfminer/cmapdb.py
# def convert_cmap(outdir, regname, enc2codec, paths):
# converter = CMapConverter(enc2codec)
#
# for path in paths:
# print('reading: %r...' % path)
# with open(path) as fp:
# converter.load(fp)
#
# files = []
# for enc in converter.get_encs():
# fname = '%s.marshal.gz' % enc
# path = os.path.join(outdir, fname)
# print('writing: %r...' % path)
# with gzip.open(path, 'wb') as fp:
# converter.dump_cmap(fp, enc)
# files.append(path)
#
# fname = 'to-unicode-%s.marshal.gz' % regname
# path = os.path.join(outdir, fname)
# print('writing: %r...' % path)
# with gzip.open(path, 'wb') as fp:
# converter.dump_unicodemap(fp)
# files.append(path)
# return files
, which may contain function names, class names, or code. Output only the next line. | convert_cmap(outdir, regname, enc2codec, args) |
Given the code snippet: <|code_start|> for i in (0, 255):
self.fp.write(struct.pack('BBBx', i, i, i))
elif ncols == 256:
# grayscale color table
for i in range(256):
self.fp.write(struct.pack('BBBx', i, i, i))
self.pos0 = self.fp.tell()
self.pos1 = self.pos0 + self.datasize
return
def write_line(self, y, data):
self.fp.seek(self.pos1 - (y+1)*self.linesize)
self.fp.write(data)
return
## ImageWriter
##
class ImageWriter:
def __init__(self, outdir):
self.outdir = outdir
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
return
def export_image(self, image):
stream = image.stream
filters = stream.get_filters()
(width, height) = image.srcsize
<|code_end|>
, generate the next line using the imports in this file:
import struct
import os
import os.path
from io import BytesIO
from .pdftypes import LITERALS_DCT_DECODE
from .pdfcolor import LITERAL_DEVICE_GRAY
from .pdfcolor import LITERAL_DEVICE_RGB
from .pdfcolor import LITERAL_DEVICE_CMYK
from PIL import Image
from PIL import ImageChops
and context (functions, classes, or occasionally code) from other files:
# Path: pdfminer/pdftypes.py
# LITERALS_DCT_DECODE = (LIT('DCTDecode'), LIT('DCT'))
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_GRAY = LIT('DeviceGray')
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_RGB = LIT('DeviceRGB')
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_CMYK = LIT('DeviceCMYK')
. Output only the next line. | if len(filters) == 1 and filters[0][0] in LITERALS_DCT_DECODE: |
Given the code snippet: <|code_start|> # grayscale color table
for i in range(256):
self.fp.write(struct.pack('BBBx', i, i, i))
self.pos0 = self.fp.tell()
self.pos1 = self.pos0 + self.datasize
return
def write_line(self, y, data):
self.fp.seek(self.pos1 - (y+1)*self.linesize)
self.fp.write(data)
return
## ImageWriter
##
class ImageWriter:
def __init__(self, outdir):
self.outdir = outdir
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
return
def export_image(self, image):
stream = image.stream
filters = stream.get_filters()
(width, height) = image.srcsize
if len(filters) == 1 and filters[0][0] in LITERALS_DCT_DECODE:
ext = '.jpg'
elif (image.bits == 1 or
<|code_end|>
, generate the next line using the imports in this file:
import struct
import os
import os.path
from io import BytesIO
from .pdftypes import LITERALS_DCT_DECODE
from .pdfcolor import LITERAL_DEVICE_GRAY
from .pdfcolor import LITERAL_DEVICE_RGB
from .pdfcolor import LITERAL_DEVICE_CMYK
from PIL import Image
from PIL import ImageChops
and context (functions, classes, or occasionally code) from other files:
# Path: pdfminer/pdftypes.py
# LITERALS_DCT_DECODE = (LIT('DCTDecode'), LIT('DCT'))
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_GRAY = LIT('DeviceGray')
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_RGB = LIT('DeviceRGB')
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_CMYK = LIT('DeviceCMYK')
. Output only the next line. | image.bits == 8 and image.colorspace in (LITERAL_DEVICE_RGB, LITERAL_DEVICE_GRAY)): |
Next line prediction: <|code_start|> # grayscale color table
for i in range(256):
self.fp.write(struct.pack('BBBx', i, i, i))
self.pos0 = self.fp.tell()
self.pos1 = self.pos0 + self.datasize
return
def write_line(self, y, data):
self.fp.seek(self.pos1 - (y+1)*self.linesize)
self.fp.write(data)
return
## ImageWriter
##
class ImageWriter:
def __init__(self, outdir):
self.outdir = outdir
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
return
def export_image(self, image):
stream = image.stream
filters = stream.get_filters()
(width, height) = image.srcsize
if len(filters) == 1 and filters[0][0] in LITERALS_DCT_DECODE:
ext = '.jpg'
elif (image.bits == 1 or
<|code_end|>
. Use current file imports:
(import struct
import os
import os.path
from io import BytesIO
from .pdftypes import LITERALS_DCT_DECODE
from .pdfcolor import LITERAL_DEVICE_GRAY
from .pdfcolor import LITERAL_DEVICE_RGB
from .pdfcolor import LITERAL_DEVICE_CMYK
from PIL import Image
from PIL import ImageChops)
and context including class names, function names, or small code snippets from other files:
# Path: pdfminer/pdftypes.py
# LITERALS_DCT_DECODE = (LIT('DCTDecode'), LIT('DCT'))
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_GRAY = LIT('DeviceGray')
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_RGB = LIT('DeviceRGB')
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_CMYK = LIT('DeviceCMYK')
. Output only the next line. | image.bits == 8 and image.colorspace in (LITERAL_DEVICE_RGB, LITERAL_DEVICE_GRAY)): |
Based on the snippet: <|code_start|> self.fp.write(data)
return
## ImageWriter
##
class ImageWriter:
def __init__(self, outdir):
self.outdir = outdir
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
return
def export_image(self, image):
stream = image.stream
filters = stream.get_filters()
(width, height) = image.srcsize
if len(filters) == 1 and filters[0][0] in LITERALS_DCT_DECODE:
ext = '.jpg'
elif (image.bits == 1 or
image.bits == 8 and image.colorspace in (LITERAL_DEVICE_RGB, LITERAL_DEVICE_GRAY)):
ext = '.%dx%d.bmp' % (width, height)
else:
ext = '.%d.%dx%d.img' % (image.bits, width, height)
name = image.name+ext
path = os.path.join(self.outdir, name)
with open(path, 'wb') as fp:
if ext == '.jpg':
raw_data = stream.get_rawdata()
<|code_end|>
, predict the immediate next line with the help of imports:
import struct
import os
import os.path
from io import BytesIO
from .pdftypes import LITERALS_DCT_DECODE
from .pdfcolor import LITERAL_DEVICE_GRAY
from .pdfcolor import LITERAL_DEVICE_RGB
from .pdfcolor import LITERAL_DEVICE_CMYK
from PIL import Image
from PIL import ImageChops
and context (classes, functions, sometimes code) from other files:
# Path: pdfminer/pdftypes.py
# LITERALS_DCT_DECODE = (LIT('DCTDecode'), LIT('DCT'))
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_GRAY = LIT('DeviceGray')
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_RGB = LIT('DeviceRGB')
#
# Path: pdfminer/pdfcolor.py
# LITERAL_DEVICE_CMYK = LIT('DeviceCMYK')
. Output only the next line. | if LITERAL_DEVICE_CMYK in image.colorspace: |
Continue the code snippet: <|code_start|> def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(529, 329)
self.selInfoWidget = QtGui.QWidget(Form)
self.selInfoWidget.setGeometry(QtCore.QRect(260, 10, 264, 222))
self.selInfoWidget.setObjectName("selInfoWidget")
self.gridLayout = QtGui.QGridLayout(self.selInfoWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.selDescLabel = QtGui.QLabel(self.selInfoWidget)
self.selDescLabel.setText("")
self.selDescLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.selDescLabel.setWordWrap(True)
self.selDescLabel.setObjectName("selDescLabel")
self.gridLayout.addWidget(self.selDescLabel, 0, 0, 1, 1)
self.selNameLabel = QtGui.QLabel(self.selInfoWidget)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.selNameLabel.setFont(font)
self.selNameLabel.setText("")
self.selNameLabel.setObjectName("selNameLabel")
self.gridLayout.addWidget(self.selNameLabel, 0, 1, 1, 1)
self.selectedTree = DataTreeWidget(self.selInfoWidget)
self.selectedTree.setObjectName("selectedTree")
self.selectedTree.headerItem().setText(0, "1")
self.gridLayout.addWidget(self.selectedTree, 1, 0, 1, 2)
self.hoverText = QtGui.QTextEdit(Form)
self.hoverText.setGeometry(QtCore.QRect(0, 240, 521, 81))
self.hoverText.setObjectName("hoverText")
<|code_end|>
. Use current file imports:
from PySide import QtCore, QtGui
from ..flowchart.FlowchartGraphicsView import FlowchartGraphicsView
from ..widgets.DataTreeWidget import DataTreeWidget
and context (classes, functions, or code) from other files:
# Path: soapy/pyqtgraph/flowchart/FlowchartGraphicsView.py
# class FlowchartGraphicsView(GraphicsView):
#
# sigHoverOver = QtCore.Signal(object)
# sigClicked = QtCore.Signal(object)
#
# def __init__(self, widget, *args):
# GraphicsView.__init__(self, *args, useOpenGL=False)
# self._vb = FlowchartViewBox(widget, lockAspect=True, invertY=True)
# self.setCentralItem(self._vb)
# self.setRenderHint(QtGui.QPainter.Antialiasing, True)
#
# def viewBox(self):
# return self._vb
#
# Path: soapy/pyqtgraph/widgets/DataTreeWidget.py
# class DataTreeWidget(QtGui.QTreeWidget):
# """
# Widget for displaying hierarchical python data structures
# (eg, nested dicts, lists, and arrays)
# """
#
#
# def __init__(self, parent=None, data=None):
# QtGui.QTreeWidget.__init__(self, parent)
# self.setVerticalScrollMode(self.ScrollPerPixel)
# self.setData(data)
# self.setColumnCount(3)
# self.setHeaderLabels(['key / index', 'type', 'value'])
#
# def setData(self, data, hideRoot=False):
# """data should be a dictionary."""
# self.clear()
# self.buildTree(data, self.invisibleRootItem(), hideRoot=hideRoot)
# #node = self.mkNode('', data)
# #while node.childCount() > 0:
# #c = node.child(0)
# #node.removeChild(c)
# #self.invisibleRootItem().addChild(c)
# self.expandToDepth(3)
# self.resizeColumnToContents(0)
#
# def buildTree(self, data, parent, name='', hideRoot=False):
# if hideRoot:
# node = parent
# else:
# typeStr = type(data).__name__
# if typeStr == 'instance':
# typeStr += ": " + data.__class__.__name__
# node = QtGui.QTreeWidgetItem([name, typeStr, ""])
# parent.addChild(node)
#
# if isinstance(data, types.TracebackType): ## convert traceback to a list of strings
# data = list(map(str.strip, traceback.format_list(traceback.extract_tb(data))))
# elif HAVE_METAARRAY and (hasattr(data, 'implements') and data.implements('MetaArray')):
# data = {
# 'data': data.view(np.ndarray),
# 'meta': data.infoCopy()
# }
#
# if isinstance(data, dict):
# for k in data.keys():
# self.buildTree(data[k], node, str(k))
# elif isinstance(data, list) or isinstance(data, tuple):
# for i in range(len(data)):
# self.buildTree(data[i], node, str(i))
# else:
# node.setText(2, str(data))
#
#
# #def mkNode(self, name, v):
# #if type(v) is list and len(v) > 0 and isinstance(v[0], dict):
# #inds = map(unicode, range(len(v)))
# #v = OrderedDict(zip(inds, v))
# #if isinstance(v, dict):
# ##print "\nadd tree", k, v
# #node = QtGui.QTreeWidgetItem([name])
# #for k in v:
# #newNode = self.mkNode(k, v[k])
# #node.addChild(newNode)
# #else:
# ##print "\nadd value", k, str(v)
# #node = QtGui.QTreeWidgetItem([unicode(name), unicode(v)])
# #return node
. Output only the next line. | self.view = FlowchartGraphicsView(Form) |
Predict the next line after this snippet: <|code_start|># Created: Mon Dec 23 10:10:51 2013
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(529, 329)
self.selInfoWidget = QtGui.QWidget(Form)
self.selInfoWidget.setGeometry(QtCore.QRect(260, 10, 264, 222))
self.selInfoWidget.setObjectName("selInfoWidget")
self.gridLayout = QtGui.QGridLayout(self.selInfoWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.selDescLabel = QtGui.QLabel(self.selInfoWidget)
self.selDescLabel.setText("")
self.selDescLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.selDescLabel.setWordWrap(True)
self.selDescLabel.setObjectName("selDescLabel")
self.gridLayout.addWidget(self.selDescLabel, 0, 0, 1, 1)
self.selNameLabel = QtGui.QLabel(self.selInfoWidget)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.selNameLabel.setFont(font)
self.selNameLabel.setText("")
self.selNameLabel.setObjectName("selNameLabel")
self.gridLayout.addWidget(self.selNameLabel, 0, 1, 1, 1)
<|code_end|>
using the current file's imports:
from PySide import QtCore, QtGui
from ..flowchart.FlowchartGraphicsView import FlowchartGraphicsView
from ..widgets.DataTreeWidget import DataTreeWidget
and any relevant context from other files:
# Path: soapy/pyqtgraph/flowchart/FlowchartGraphicsView.py
# class FlowchartGraphicsView(GraphicsView):
#
# sigHoverOver = QtCore.Signal(object)
# sigClicked = QtCore.Signal(object)
#
# def __init__(self, widget, *args):
# GraphicsView.__init__(self, *args, useOpenGL=False)
# self._vb = FlowchartViewBox(widget, lockAspect=True, invertY=True)
# self.setCentralItem(self._vb)
# self.setRenderHint(QtGui.QPainter.Antialiasing, True)
#
# def viewBox(self):
# return self._vb
#
# Path: soapy/pyqtgraph/widgets/DataTreeWidget.py
# class DataTreeWidget(QtGui.QTreeWidget):
# """
# Widget for displaying hierarchical python data structures
# (eg, nested dicts, lists, and arrays)
# """
#
#
# def __init__(self, parent=None, data=None):
# QtGui.QTreeWidget.__init__(self, parent)
# self.setVerticalScrollMode(self.ScrollPerPixel)
# self.setData(data)
# self.setColumnCount(3)
# self.setHeaderLabels(['key / index', 'type', 'value'])
#
# def setData(self, data, hideRoot=False):
# """data should be a dictionary."""
# self.clear()
# self.buildTree(data, self.invisibleRootItem(), hideRoot=hideRoot)
# #node = self.mkNode('', data)
# #while node.childCount() > 0:
# #c = node.child(0)
# #node.removeChild(c)
# #self.invisibleRootItem().addChild(c)
# self.expandToDepth(3)
# self.resizeColumnToContents(0)
#
# def buildTree(self, data, parent, name='', hideRoot=False):
# if hideRoot:
# node = parent
# else:
# typeStr = type(data).__name__
# if typeStr == 'instance':
# typeStr += ": " + data.__class__.__name__
# node = QtGui.QTreeWidgetItem([name, typeStr, ""])
# parent.addChild(node)
#
# if isinstance(data, types.TracebackType): ## convert traceback to a list of strings
# data = list(map(str.strip, traceback.format_list(traceback.extract_tb(data))))
# elif HAVE_METAARRAY and (hasattr(data, 'implements') and data.implements('MetaArray')):
# data = {
# 'data': data.view(np.ndarray),
# 'meta': data.infoCopy()
# }
#
# if isinstance(data, dict):
# for k in data.keys():
# self.buildTree(data[k], node, str(k))
# elif isinstance(data, list) or isinstance(data, tuple):
# for i in range(len(data)):
# self.buildTree(data[i], node, str(i))
# else:
# node.setText(2, str(data))
#
#
# #def mkNode(self, name, v):
# #if type(v) is list and len(v) > 0 and isinstance(v[0], dict):
# #inds = map(unicode, range(len(v)))
# #v = OrderedDict(zip(inds, v))
# #if isinstance(v, dict):
# ##print "\nadd tree", k, v
# #node = QtGui.QTreeWidgetItem([name])
# #for k in v:
# #newNode = self.mkNode(k, v[k])
# #node.addChild(newNode)
# #else:
# ##print "\nadd value", k, str(v)
# #node = QtGui.QTreeWidgetItem([unicode(name), unicode(v)])
# #return node
. Output only the next line. | self.selectedTree = DataTreeWidget(self.selInfoWidget) |
Based on the snippet: <|code_start|> """Builds an inverted index on the prefixes of the strings in the input
column in the input table.
Prefix index is used by prefix filter and edit distance join.
"""
def __init__(self, table, index_attr, tokenizer,
sim_measure_type, threshold, token_ordering):
self.table = table
self.index_attr = index_attr
self.tokenizer = tokenizer
self.sim_measure_type = sim_measure_type
self.threshold = threshold
self.token_ordering = token_ordering
self.index = None
super(self.__class__, self).__init__()
def build(self, cache_empty_records=True):
"""Build prefix index."""
self.index = {}
empty_records = []
row_id = 0
for row in self.table:
# tokenize string and order the tokens using the token ordering
index_string = row[self.index_attr]
index_attr_tokens = order_using_token_ordering(
self.tokenizer.tokenize(index_string), self.token_ordering)
# compute prefix length
num_tokens = len(index_attr_tokens)
<|code_end|>
, predict the immediate next line with the help of imports:
from py_stringsimjoin.filter.filter_utils import get_prefix_length
from py_stringsimjoin.index.index import Index
from py_stringsimjoin.utils.token_ordering import order_using_token_ordering
and context (classes, functions, sometimes code) from other files:
# Path: py_stringsimjoin/filter/filter_utils.py
# def get_prefix_length(num_tokens, sim_measure_type, threshold, tokenizer):
# """Computes prefix length.
#
# References:
# * String Similarity Joins: An Experimental Evaluation, VLDB 2014.
# """
#
# if num_tokens == 0:
# return 0
#
# if sim_measure_type == 'COSINE':
# return int(num_tokens -
# ceil(threshold * threshold * num_tokens) + 1)
# elif sim_measure_type == 'DICE':
# return int(num_tokens -
# ceil((threshold / (2 - threshold)) * num_tokens) + 1)
# elif sim_measure_type == 'EDIT_DISTANCE':
# return min(tokenizer.qval * threshold + 1, num_tokens)
# elif sim_measure_type == 'JACCARD':
# return int(num_tokens - ceil(threshold * num_tokens) + 1)
# elif sim_measure_type == 'OVERLAP':
# return max(num_tokens - threshold + 1, 0)
#
# Path: py_stringsimjoin/index/index.py
# class Index(object):
# pass
#
# Path: py_stringsimjoin/utils/token_ordering.py
# def order_using_token_ordering(tokens, token_ordering):
# ordered_tokens = []
#
# for token in tokens:
# order = token_ordering.get(token)
# if order is not None:
# ordered_tokens.append(order)
#
# ordered_tokens.sort()
#
# return ordered_tokens
. Output only the next line. | prefix_length = get_prefix_length( |
Predict the next line after this snippet: <|code_start|>
class PrefixIndex(Index):
"""Builds an inverted index on the prefixes of the strings in the input
column in the input table.
Prefix index is used by prefix filter and edit distance join.
"""
def __init__(self, table, index_attr, tokenizer,
sim_measure_type, threshold, token_ordering):
self.table = table
self.index_attr = index_attr
self.tokenizer = tokenizer
self.sim_measure_type = sim_measure_type
self.threshold = threshold
self.token_ordering = token_ordering
self.index = None
super(self.__class__, self).__init__()
def build(self, cache_empty_records=True):
"""Build prefix index."""
self.index = {}
empty_records = []
row_id = 0
for row in self.table:
# tokenize string and order the tokens using the token ordering
index_string = row[self.index_attr]
<|code_end|>
using the current file's imports:
from py_stringsimjoin.filter.filter_utils import get_prefix_length
from py_stringsimjoin.index.index import Index
from py_stringsimjoin.utils.token_ordering import order_using_token_ordering
and any relevant context from other files:
# Path: py_stringsimjoin/filter/filter_utils.py
# def get_prefix_length(num_tokens, sim_measure_type, threshold, tokenizer):
# """Computes prefix length.
#
# References:
# * String Similarity Joins: An Experimental Evaluation, VLDB 2014.
# """
#
# if num_tokens == 0:
# return 0
#
# if sim_measure_type == 'COSINE':
# return int(num_tokens -
# ceil(threshold * threshold * num_tokens) + 1)
# elif sim_measure_type == 'DICE':
# return int(num_tokens -
# ceil((threshold / (2 - threshold)) * num_tokens) + 1)
# elif sim_measure_type == 'EDIT_DISTANCE':
# return min(tokenizer.qval * threshold + 1, num_tokens)
# elif sim_measure_type == 'JACCARD':
# return int(num_tokens - ceil(threshold * num_tokens) + 1)
# elif sim_measure_type == 'OVERLAP':
# return max(num_tokens - threshold + 1, 0)
#
# Path: py_stringsimjoin/index/index.py
# class Index(object):
# pass
#
# Path: py_stringsimjoin/utils/token_ordering.py
# def order_using_token_ordering(tokens, token_ordering):
# ordered_tokens = []
#
# for token in tokens:
# order = token_ordering.get(token)
# if order is not None:
# ordered_tokens.append(order)
#
# ordered_tokens.sort()
#
# return ordered_tokens
. Output only the next line. | index_attr_tokens = order_using_token_ordering( |
Continue the code snippet: <|code_start|>
class ProfileTableForJoinTestCases(unittest.TestCase):
def setUp(self):
self.table = pd.DataFrame([('1', 'data science'),
('2', None),
('3', 'data integration'),
('4', ''),
('5', 'data science')],
columns = ['id', 'attr'])
def test_profile_table_for_join(self):
<|code_end|>
. Use current file imports:
import unittest
import pandas as pd
from nose.tools import assert_equal, assert_list_equal, raises
from py_stringsimjoin.profiler.profiler import profile_table_for_join
and context (classes, functions, or code) from other files:
# Path: py_stringsimjoin/profiler/profiler.py
# def profile_table_for_join(input_table, profile_attrs=None):
# """Profiles the attributes in the table to suggest implications for join.
#
# Args:
# input_table (DataFrame): input table to profile.
# profile_attrs (list): list of attribute names from the input table to be
# profiled (defaults to None). If not provided, all attributes in the
# input table will be profiled.
#
# Returns:
# A dataframe consisting of profile output. Specifically, the dataframe
# contains three columns,
#
# 1) 'Unique values' column, which shows the number of unique values in
# each attribute,
# 2) 'Missing values' column, which shows the number of missing values in
# each attribute, and
# 3) 'Comments' column, which contains comments about each attribute.
#
# The output dataframe is indexed by attribute name, so that the
# statistics for each attribute can be easily accessed using the attribute name.
# """
#
# # check if the input table is a dataframe
# validate_input_table(input_table, 'input table')
#
# profile_output = []
#
# if profile_attrs is None:
# profile_attrs = list(input_table.columns.values)
# else:
# # check if the profile attributes exist
# for attr in profile_attrs:
# validate_attr(attr, input_table.columns,
# 'profile attribute', 'input table')
#
# num_rows = len(input_table)
#
# for attr in profile_attrs:
# # compute number of unique values in the column
# unique_values = len(input_table[attr].unique())
#
# # compute number of missing values in the column
# missing_values = sum(pd.isnull(input_table[attr]))
#
# # compute percentage of unique values in the column
# unique_percent = round((float(unique_values) / float(num_rows)) * 100,
# 2)
#
# # compute percentage of missing values in the column
# missing_percent = round((float(missing_values) / float(num_rows)) * 100,
# 2)
#
# # format stats for better display
# formatted_unique_stat = _format_statistic(unique_values, unique_percent)
# formatted_missing_stat = _format_statistic(missing_values,
# missing_percent)
#
# comments = ''
# # if there are missing values in the column, add a comment.
# if missing_percent > 0:
# comments = ''.join(['Joining on this attribute will ignore ',
# formatted_missing_stat, ' rows.'])
# # if the column consists of unique values, add a comment.
# if unique_percent == 100.0 and missing_values == 0:
# comments = 'This attribute can be used as a key attribute.'
#
# profile_output.append((attr, formatted_unique_stat,
# formatted_missing_stat, comments))
#
# # compose output dataframe containing the profiling results.
# output_header = ['Attribute', 'Unique values', 'Missing values', 'Comments']
# output_df = pd.DataFrame(profile_output, columns=output_header)
# return output_df.set_index('Attribute')
. Output only the next line. | profile_output = profile_table_for_join(self.table) |
Given the following code snippet before the placeholder: <|code_start|>
def validate_sim_measure_type(sim_measure_type):
"""Check if the input sim_measure_type is one of the supported types."""
sim_measure_types = ['COSINE', 'DICE', 'EDIT_DISTANCE', 'JACCARD',
'OVERLAP']
if sim_measure_type.upper() not in sim_measure_types:
raise TypeError('\'' + sim_measure_type + '\' is not a valid ' + \
'sim_measure_type. Supported types are COSINE, DICE' + \
', EDIT_DISTANCE, JACCARD and OVERLAP.')
return True
def validate_comp_op_for_sim_measure(comp_op, sim_measure_type):
"""Check if the comparison operator is valid for the sim_measure_type."""
if sim_measure_type == 'EDIT_DISTANCE':
if comp_op not in ['<=', '<', '=']:
raise AssertionError('Comparison operator not supported. ' + \
'Supported comparison operators for ' + sim_measure_type + \
' are <=, < and =.')
else:
if comp_op not in ['>=', '>', '=']:
raise AssertionError('Comparison operator not supported. ' + \
'Supported comparison operators for ' + sim_measure_type + \
' are >=, > and =.')
return True
def validate_comp_op(comp_op):
"""Check if the comparison operator is valid."""
<|code_end|>
, predict the next line using imports from the current file:
import os
import numpy as np
import pandas as pd
from py_stringmatching.tokenizer.tokenizer import Tokenizer
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
from py_stringsimjoin.utils.generic_helper import COMP_OP_MAP
and context including class names, function names, and sometimes code from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# COMP_OP_MAP = {'>=': operator.ge,
# '>': operator.gt,
# '<=': operator.le,
# '<': operator.lt,
# '=': operator.eq,
# '!=': operator.ne}
. Output only the next line. | if comp_op not in COMP_OP_MAP.keys(): |
Here is a snippet: <|code_start|>
def __init__(self, table, index_attr, tokenizer,
sim_measure_type, threshold, token_ordering):
self.table = table
self.index_attr = index_attr
self.tokenizer = tokenizer
self.sim_measure_type = sim_measure_type
self.threshold = threshold
self.token_ordering = token_ordering
self.index = None
self.size_cache = None
self.min_length = maxsize
self.max_length = 0
super(self.__class__, self).__init__()
def build(self, cache_empty_records=True, cache_tokens=False):
"""Build position index."""
self.index = {}
self.size_cache = []
cached_tokens = []
empty_records = []
row_id = 0
for row in self.table:
# tokenize string and order the tokens using the token ordering
index_string = row[self.index_attr]
index_attr_tokens = order_using_token_ordering(
self.tokenizer.tokenize(index_string), self.token_ordering)
# compute prefix length
num_tokens = len(index_attr_tokens)
<|code_end|>
. Write the next line using the current file imports:
from sys import maxsize
from py_stringsimjoin.filter.filter_utils import get_prefix_length
from py_stringsimjoin.index.index import Index
from py_stringsimjoin.utils.token_ordering import order_using_token_ordering
and context from other files:
# Path: py_stringsimjoin/filter/filter_utils.py
# def get_prefix_length(num_tokens, sim_measure_type, threshold, tokenizer):
# """Computes prefix length.
#
# References:
# * String Similarity Joins: An Experimental Evaluation, VLDB 2014.
# """
#
# if num_tokens == 0:
# return 0
#
# if sim_measure_type == 'COSINE':
# return int(num_tokens -
# ceil(threshold * threshold * num_tokens) + 1)
# elif sim_measure_type == 'DICE':
# return int(num_tokens -
# ceil((threshold / (2 - threshold)) * num_tokens) + 1)
# elif sim_measure_type == 'EDIT_DISTANCE':
# return min(tokenizer.qval * threshold + 1, num_tokens)
# elif sim_measure_type == 'JACCARD':
# return int(num_tokens - ceil(threshold * num_tokens) + 1)
# elif sim_measure_type == 'OVERLAP':
# return max(num_tokens - threshold + 1, 0)
#
# Path: py_stringsimjoin/index/index.py
# class Index(object):
# pass
#
# Path: py_stringsimjoin/utils/token_ordering.py
# def order_using_token_ordering(tokens, token_ordering):
# ordered_tokens = []
#
# for token in tokens:
# order = token_ordering.get(token)
# if order is not None:
# ordered_tokens.append(order)
#
# ordered_tokens.sort()
#
# return ordered_tokens
, which may include functions, classes, or code. Output only the next line. | prefix_length = get_prefix_length( |
Here is a snippet: <|code_start|> """Builds a position index on the input column in the input table.
Position index is used by position filter, dice join, cosine join and
jaccard join.
"""
def __init__(self, table, index_attr, tokenizer,
sim_measure_type, threshold, token_ordering):
self.table = table
self.index_attr = index_attr
self.tokenizer = tokenizer
self.sim_measure_type = sim_measure_type
self.threshold = threshold
self.token_ordering = token_ordering
self.index = None
self.size_cache = None
self.min_length = maxsize
self.max_length = 0
super(self.__class__, self).__init__()
def build(self, cache_empty_records=True, cache_tokens=False):
"""Build position index."""
self.index = {}
self.size_cache = []
cached_tokens = []
empty_records = []
row_id = 0
for row in self.table:
# tokenize string and order the tokens using the token ordering
index_string = row[self.index_attr]
<|code_end|>
. Write the next line using the current file imports:
from sys import maxsize
from py_stringsimjoin.filter.filter_utils import get_prefix_length
from py_stringsimjoin.index.index import Index
from py_stringsimjoin.utils.token_ordering import order_using_token_ordering
and context from other files:
# Path: py_stringsimjoin/filter/filter_utils.py
# def get_prefix_length(num_tokens, sim_measure_type, threshold, tokenizer):
# """Computes prefix length.
#
# References:
# * String Similarity Joins: An Experimental Evaluation, VLDB 2014.
# """
#
# if num_tokens == 0:
# return 0
#
# if sim_measure_type == 'COSINE':
# return int(num_tokens -
# ceil(threshold * threshold * num_tokens) + 1)
# elif sim_measure_type == 'DICE':
# return int(num_tokens -
# ceil((threshold / (2 - threshold)) * num_tokens) + 1)
# elif sim_measure_type == 'EDIT_DISTANCE':
# return min(tokenizer.qval * threshold + 1, num_tokens)
# elif sim_measure_type == 'JACCARD':
# return int(num_tokens - ceil(threshold * num_tokens) + 1)
# elif sim_measure_type == 'OVERLAP':
# return max(num_tokens - threshold + 1, 0)
#
# Path: py_stringsimjoin/index/index.py
# class Index(object):
# pass
#
# Path: py_stringsimjoin/utils/token_ordering.py
# def order_using_token_ordering(tokens, token_ordering):
# ordered_tokens = []
#
# for token in tokens:
# order = token_ordering.get(token)
# if order is not None:
# ordered_tokens.append(order)
#
# ordered_tokens.sort()
#
# return ordered_tokens
, which may include functions, classes, or code. Output only the next line. | index_attr_tokens = order_using_token_ordering( |
Continue the code snippet: <|code_start|>
class OverlapJoinValidTestCases(unittest.TestCase):
def test_overlap_join_using_tokenizer_with_return_set_false(self):
A = pd.DataFrame([{'id':1, 'attr':'hello'}])
B = pd.DataFrame([{'id':1, 'attr':'he ll'}])
qg2_tok = QgramTokenizer(2)
assert_equal(qg2_tok.get_return_set(), False)
<|code_end|>
. Use current file imports:
import os
import unittest
import pandas as pd
from nose.tools import assert_equal, raises
from py_stringmatching.tokenizer.delimiter_tokenizer import DelimiterTokenizer
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
from six import iteritems
from py_stringsimjoin.join.overlap_join import overlap_join
and context (classes, functions, or code) from other files:
# Path: py_stringsimjoin/join/overlap_join.py
# def overlap_join(ltable, rtable,
# l_key_attr, r_key_attr,
# l_join_attr, r_join_attr,
# tokenizer, threshold, comp_op='>=',
# allow_missing=False,
# l_out_attrs=None, r_out_attrs=None,
# l_out_prefix='l_', r_out_prefix='r_',
# out_sim_score=True, n_jobs=1, show_progress=True):
# """Join two tables using overlap measure.
#
# For two sets X and Y, the overlap between them is given by:
#
# :math:`overlap(X, Y) = |X \\cap Y|`
#
# Finds tuple pairs from left table and right table such that the overlap
# between the join attributes satisfies the condition on input threshold. For
# example, if the comparison operator is '>=', finds tuple pairs whose
# overlap between the strings that are the values of the join attributes is
# greater than or equal to the input threshold, as specified in "threshold".
#
# Args:
# ltable (DataFrame): left input table.
#
# rtable (DataFrame): right input table.
#
# l_key_attr (string): key attribute in left table.
#
# r_key_attr (string): key attribute in right table.
#
# l_join_attr (string): join attribute in left table.
#
# r_join_attr (string): join attribute in right table.
#
# tokenizer (Tokenizer): tokenizer to be used to tokenize join
# attributes.
#
# threshold (float): overlap threshold to be satisfied.
#
# comp_op (string): comparison operator. Supported values are '>=', '>'
# and '=' (defaults to '>=').
#
# allow_missing (boolean): flag to indicate whether tuple pairs with
# missing value in at least one of the join attributes should be
# included in the output (defaults to False). If this flag is set to
# True, a tuple in ltable with missing value in the join attribute
# will be matched with every tuple in rtable and vice versa.
#
# l_out_attrs (list): list of attribute names from the left table to be
# included in the output table (defaults to None).
#
# r_out_attrs (list): list of attribute names from the right table to be
# included in the output table (defaults to None).
#
# l_out_prefix (string): prefix to be used for the attribute names coming
# from the left table, in the output table (defaults to 'l\_').
#
# r_out_prefix (string): prefix to be used for the attribute names coming
# from the right table, in the output table (defaults to 'r\_').
#
# out_sim_score (boolean): flag to indicate whether similarity score
# should be included in the output table (defaults to True). Setting
# this flag to True will add a column named '_sim_score' in the
# output table. This column will contain the similarity scores for the
# tuple pairs in the output.
#
# n_jobs (int): number of parallel jobs to use for the computation
# (defaults to 1). If -1 is given, all CPUs are used. If 1 is given,
# no parallel computing code is used at all, which is useful for
# debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used
# (where n_cpus is the total number of CPUs in the machine). Thus for
# n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs)
# becomes less than 1, then no parallel computing code will be used
# (i.e., equivalent to the default).
#
# show_progress (boolean): flag to indicate whether task progress should
# be displayed to the user (defaults to True).
#
# Returns:
# An output table containing tuple pairs that satisfy the join
# condition (DataFrame).
# """
#
# from py_stringsimjoin import __use_cython__
# if __use_cython__:
# from py_stringsimjoin.join.overlap_join_cy import overlap_join_cy
# return overlap_join_cy(ltable, rtable,
# l_key_attr, r_key_attr,
# l_join_attr, r_join_attr,
# tokenizer, threshold, comp_op, allow_missing,
# l_out_attrs, r_out_attrs,
# l_out_prefix, r_out_prefix,
# out_sim_score, n_jobs, show_progress)
# else:
# from py_stringsimjoin.join.overlap_join_py import overlap_join_py
# return overlap_join_py(ltable, rtable,
# l_key_attr, r_key_attr,
# l_join_attr, r_join_attr,
# tokenizer, threshold, comp_op, allow_missing,
# l_out_attrs, r_out_attrs,
# l_out_prefix, r_out_prefix,
# out_sim_score, n_jobs, show_progress)
. Output only the next line. | c = overlap_join(A, B, 'id', 'id', 'attr', 'attr', qg2_tok, 1) |
Given the code snippet: <|code_start|> candset_splits[job_index],
candset_l_key_attr, candset_r_key_attr,
ltable_projected, rtable_projected,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
self,
(show_progress and (job_index==n_jobs-1)))
for job_index in range(n_jobs))
output_table = pd.concat(results)
return output_table
def _filter_candset_split(candset,
candset_l_key_attr, candset_r_key_attr,
ltable, rtable,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
filter_object, show_progress):
# Find column indices of key attr and filter attr in ltable
l_columns = list(ltable.columns.values)
l_key_attr_index = l_columns.index(l_key_attr)
l_filter_attr_index = l_columns.index(l_filter_attr)
# Find column indices of key attr and filter attr in rtable
r_columns = list(rtable.columns.values)
r_key_attr_index = r_columns.index(r_key_attr)
r_filter_attr_index = r_columns.index(r_filter_attr)
# Build a dictionary on ltable
<|code_end|>
, generate the next line using the imports in this file:
from joblib import delayed, Parallel
from py_stringsimjoin.utils.generic_helper import build_dict_from_table, \
get_num_processes_to_launch, split_table
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table
import pandas as pd
import pyprind
and context (functions, classes, or occasionally code) from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def build_dict_from_table(table, key_attr_index, join_attr_index,
# remove_null=True):
# table_dict = {}
# for row in table.itertuples(index=False):
# if remove_null and pd.isnull(row[join_attr_index]):
# continue
# table_dict[row[key_attr_index]] = tuple(row)
# return table_dict
#
# def get_num_processes_to_launch(n_jobs):
# # determine number of processes to launch parallely
# num_procs = n_jobs
# if n_jobs < 0:
# num_cpus = multiprocessing.cpu_count()
# num_procs = num_cpus + 1 + n_jobs
# return max(num_procs, 1)
#
# def split_table(table, num_splits):
# splits = []
# split_size = 1.0/num_splits*len(table)
# for i in xrange(num_splits):
# splits.append(table[int(round(i*split_size)):
# int(round((i+1)*split_size))])
# return splits
#
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_attr_type(attr, attr_type, attr_label, table_label):
# """Check if the attribute is not of numeric type."""
# if attr_type != np.object:
# raise AssertionError(attr_label + ' \'' + attr + '\' in ' +
# table_label + ' is not of string type.')
# return True
#
# def validate_key_attr(key_attr, table, table_label):
# """Check if the attribute is a valid key attribute."""
# unique_flag = len(table[key_attr].unique()) == len(table)
# nan_flag = sum(table[key_attr].isnull()) == 0
# if not (unique_flag and nan_flag):
# raise AssertionError('\'' + key_attr + '\' is not a key attribute ' + \
# 'in ' + table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
. Output only the next line. | ltable_dict = build_dict_from_table(ltable, l_key_attr_index, |
Next line prediction: <|code_start|> 'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_filter_attr, ltable.columns,
'filter attribute', 'left table')
validate_attr(r_filter_attr, rtable.columns,
'filter attribute', 'right table')
# check if the filter attributes are not of numeric type
validate_attr_type(l_filter_attr, ltable[l_filter_attr].dtype,
'filter attribute', 'left table')
validate_attr_type(r_filter_attr, rtable[r_filter_attr].dtype,
'filter attribute', 'right table')
# check if the key attributes are unique and do not contain
# missing values
validate_key_attr(l_key_attr, ltable, 'left table')
validate_key_attr(r_key_attr, rtable, 'right table')
# check for empty candset
if candset.empty:
return candset
# Do a projection on the input dataframes to keep only required
# attributes. Note that this does not create a copy of the dataframes.
# It only creates a view on original dataframes.
ltable_projected = ltable[[l_key_attr, l_filter_attr]]
rtable_projected = rtable[[r_key_attr, r_filter_attr]]
# computes the actual number of jobs to launch.
<|code_end|>
. Use current file imports:
(from joblib import delayed, Parallel
from py_stringsimjoin.utils.generic_helper import build_dict_from_table, \
get_num_processes_to_launch, split_table
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table
import pandas as pd
import pyprind)
and context including class names, function names, or small code snippets from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def build_dict_from_table(table, key_attr_index, join_attr_index,
# remove_null=True):
# table_dict = {}
# for row in table.itertuples(index=False):
# if remove_null and pd.isnull(row[join_attr_index]):
# continue
# table_dict[row[key_attr_index]] = tuple(row)
# return table_dict
#
# def get_num_processes_to_launch(n_jobs):
# # determine number of processes to launch parallely
# num_procs = n_jobs
# if n_jobs < 0:
# num_cpus = multiprocessing.cpu_count()
# num_procs = num_cpus + 1 + n_jobs
# return max(num_procs, 1)
#
# def split_table(table, num_splits):
# splits = []
# split_size = 1.0/num_splits*len(table)
# for i in xrange(num_splits):
# splits.append(table[int(round(i*split_size)):
# int(round((i+1)*split_size))])
# return splits
#
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_attr_type(attr, attr_type, attr_label, table_label):
# """Check if the attribute is not of numeric type."""
# if attr_type != np.object:
# raise AssertionError(attr_label + ' \'' + attr + '\' in ' +
# table_label + ' is not of string type.')
# return True
#
# def validate_key_attr(key_attr, table, table_label):
# """Check if the attribute is a valid key attribute."""
# unique_flag = len(table[key_attr].unique()) == len(table)
# nan_flag = sum(table[key_attr].isnull()) == 0
# if not (unique_flag and nan_flag):
# raise AssertionError('\'' + key_attr + '\' is not a key attribute ' + \
# 'in ' + table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
. Output only the next line. | n_jobs = min(get_num_processes_to_launch(n_jobs), len(candset)) |
Given snippet: <|code_start|>
# check if the key attributes are unique and do not contain
# missing values
validate_key_attr(l_key_attr, ltable, 'left table')
validate_key_attr(r_key_attr, rtable, 'right table')
# check for empty candset
if candset.empty:
return candset
# Do a projection on the input dataframes to keep only required
# attributes. Note that this does not create a copy of the dataframes.
# It only creates a view on original dataframes.
ltable_projected = ltable[[l_key_attr, l_filter_attr]]
rtable_projected = rtable[[r_key_attr, r_filter_attr]]
# computes the actual number of jobs to launch.
n_jobs = min(get_num_processes_to_launch(n_jobs), len(candset))
if n_jobs <= 1:
# if n_jobs is 1, do not use any parallel code.
output_table = _filter_candset_split(candset,
candset_l_key_attr, candset_r_key_attr,
ltable_projected, rtable_projected,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
self, show_progress)
else:
# if n_jobs is above 1, split the candset into n_jobs splits and
# filter each candset split in a separate process.
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from joblib import delayed, Parallel
from py_stringsimjoin.utils.generic_helper import build_dict_from_table, \
get_num_processes_to_launch, split_table
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table
import pandas as pd
import pyprind
and context:
# Path: py_stringsimjoin/utils/generic_helper.py
# def build_dict_from_table(table, key_attr_index, join_attr_index,
# remove_null=True):
# table_dict = {}
# for row in table.itertuples(index=False):
# if remove_null and pd.isnull(row[join_attr_index]):
# continue
# table_dict[row[key_attr_index]] = tuple(row)
# return table_dict
#
# def get_num_processes_to_launch(n_jobs):
# # determine number of processes to launch parallely
# num_procs = n_jobs
# if n_jobs < 0:
# num_cpus = multiprocessing.cpu_count()
# num_procs = num_cpus + 1 + n_jobs
# return max(num_procs, 1)
#
# def split_table(table, num_splits):
# splits = []
# split_size = 1.0/num_splits*len(table)
# for i in xrange(num_splits):
# splits.append(table[int(round(i*split_size)):
# int(round((i+1)*split_size))])
# return splits
#
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_attr_type(attr, attr_type, attr_label, table_label):
# """Check if the attribute is not of numeric type."""
# if attr_type != np.object:
# raise AssertionError(attr_label + ' \'' + attr + '\' in ' +
# table_label + ' is not of string type.')
# return True
#
# def validate_key_attr(key_attr, table, table_label):
# """Check if the attribute is a valid key attribute."""
# unique_flag = len(table[key_attr].unique()) == len(table)
# nan_flag = sum(table[key_attr].isnull()) == 0
# if not (unique_flag and nan_flag):
# raise AssertionError('\'' + key_attr + '\' is not a key attribute ' + \
# 'in ' + table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
which might include code, classes, or functions. Output only the next line. | candset_splits = split_table(candset, n_jobs) |
Based on the snippet: <|code_start|> r_key_attr (string): key attribute in right table.
l_filter_attr (string): attribute in left table on which the filter
should be applied.
r_filter_attr (string): attribute in right table on which the filter
should be applied.
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is
given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used (where n_cpus is the total
number of CPUs in the machine). Thus for n_jobs = -2, all CPUs
but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1,
then no parallel computing code will be used (i.e., equivalent
to the default).
show_progress (boolean): flag to indicate whether task progress
should be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs from the candidate set that
survive the filter (DataFrame).
"""
# check if the input candset is a dataframe
validate_input_table(candset, 'candset')
# check if the candset key attributes exist
<|code_end|>
, predict the immediate next line with the help of imports:
from joblib import delayed, Parallel
from py_stringsimjoin.utils.generic_helper import build_dict_from_table, \
get_num_processes_to_launch, split_table
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table
import pandas as pd
import pyprind
and context (classes, functions, sometimes code) from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def build_dict_from_table(table, key_attr_index, join_attr_index,
# remove_null=True):
# table_dict = {}
# for row in table.itertuples(index=False):
# if remove_null and pd.isnull(row[join_attr_index]):
# continue
# table_dict[row[key_attr_index]] = tuple(row)
# return table_dict
#
# def get_num_processes_to_launch(n_jobs):
# # determine number of processes to launch parallely
# num_procs = n_jobs
# if n_jobs < 0:
# num_cpus = multiprocessing.cpu_count()
# num_procs = num_cpus + 1 + n_jobs
# return max(num_procs, 1)
#
# def split_table(table, num_splits):
# splits = []
# split_size = 1.0/num_splits*len(table)
# for i in xrange(num_splits):
# splits.append(table[int(round(i*split_size)):
# int(round((i+1)*split_size))])
# return splits
#
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_attr_type(attr, attr_type, attr_label, table_label):
# """Check if the attribute is not of numeric type."""
# if attr_type != np.object:
# raise AssertionError(attr_label + ' \'' + attr + '\' in ' +
# table_label + ' is not of string type.')
# return True
#
# def validate_key_attr(key_attr, table, table_label):
# """Check if the attribute is a valid key attribute."""
# unique_flag = len(table[key_attr].unique()) == len(table)
# nan_flag = sum(table[key_attr].isnull()) == 0
# if not (unique_flag and nan_flag):
# raise AssertionError('\'' + key_attr + '\' is not a key attribute ' + \
# 'in ' + table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
. Output only the next line. | validate_attr(candset_l_key_attr, candset.columns, |
Given the code snippet: <|code_start|>
Returns:
An output table containing tuple pairs from the candidate set that
survive the filter (DataFrame).
"""
# check if the input candset is a dataframe
validate_input_table(candset, 'candset')
# check if the candset key attributes exist
validate_attr(candset_l_key_attr, candset.columns,
'left key attribute', 'candset')
validate_attr(candset_r_key_attr, candset.columns,
'right key attribute', 'candset')
# check if the input tables are dataframes
validate_input_table(ltable, 'left table')
validate_input_table(rtable, 'right table')
# check if the key attributes filter join attributes exist
validate_attr(l_key_attr, ltable.columns,
'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_filter_attr, ltable.columns,
'filter attribute', 'left table')
validate_attr(r_filter_attr, rtable.columns,
'filter attribute', 'right table')
# check if the filter attributes are not of numeric type
<|code_end|>
, generate the next line using the imports in this file:
from joblib import delayed, Parallel
from py_stringsimjoin.utils.generic_helper import build_dict_from_table, \
get_num_processes_to_launch, split_table
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table
import pandas as pd
import pyprind
and context (functions, classes, or occasionally code) from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def build_dict_from_table(table, key_attr_index, join_attr_index,
# remove_null=True):
# table_dict = {}
# for row in table.itertuples(index=False):
# if remove_null and pd.isnull(row[join_attr_index]):
# continue
# table_dict[row[key_attr_index]] = tuple(row)
# return table_dict
#
# def get_num_processes_to_launch(n_jobs):
# # determine number of processes to launch parallely
# num_procs = n_jobs
# if n_jobs < 0:
# num_cpus = multiprocessing.cpu_count()
# num_procs = num_cpus + 1 + n_jobs
# return max(num_procs, 1)
#
# def split_table(table, num_splits):
# splits = []
# split_size = 1.0/num_splits*len(table)
# for i in xrange(num_splits):
# splits.append(table[int(round(i*split_size)):
# int(round((i+1)*split_size))])
# return splits
#
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_attr_type(attr, attr_type, attr_label, table_label):
# """Check if the attribute is not of numeric type."""
# if attr_type != np.object:
# raise AssertionError(attr_label + ' \'' + attr + '\' in ' +
# table_label + ' is not of string type.')
# return True
#
# def validate_key_attr(key_attr, table, table_label):
# """Check if the attribute is a valid key attribute."""
# unique_flag = len(table[key_attr].unique()) == len(table)
# nan_flag = sum(table[key_attr].isnull()) == 0
# if not (unique_flag and nan_flag):
# raise AssertionError('\'' + key_attr + '\' is not a key attribute ' + \
# 'in ' + table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
. Output only the next line. | validate_attr_type(l_filter_attr, ltable[l_filter_attr].dtype, |
Predict the next line after this snippet: <|code_start|> validate_input_table(candset, 'candset')
# check if the candset key attributes exist
validate_attr(candset_l_key_attr, candset.columns,
'left key attribute', 'candset')
validate_attr(candset_r_key_attr, candset.columns,
'right key attribute', 'candset')
# check if the input tables are dataframes
validate_input_table(ltable, 'left table')
validate_input_table(rtable, 'right table')
# check if the key attributes filter join attributes exist
validate_attr(l_key_attr, ltable.columns,
'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_filter_attr, ltable.columns,
'filter attribute', 'left table')
validate_attr(r_filter_attr, rtable.columns,
'filter attribute', 'right table')
# check if the filter attributes are not of numeric type
validate_attr_type(l_filter_attr, ltable[l_filter_attr].dtype,
'filter attribute', 'left table')
validate_attr_type(r_filter_attr, rtable[r_filter_attr].dtype,
'filter attribute', 'right table')
# check if the key attributes are unique and do not contain
# missing values
<|code_end|>
using the current file's imports:
from joblib import delayed, Parallel
from py_stringsimjoin.utils.generic_helper import build_dict_from_table, \
get_num_processes_to_launch, split_table
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table
import pandas as pd
import pyprind
and any relevant context from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def build_dict_from_table(table, key_attr_index, join_attr_index,
# remove_null=True):
# table_dict = {}
# for row in table.itertuples(index=False):
# if remove_null and pd.isnull(row[join_attr_index]):
# continue
# table_dict[row[key_attr_index]] = tuple(row)
# return table_dict
#
# def get_num_processes_to_launch(n_jobs):
# # determine number of processes to launch parallely
# num_procs = n_jobs
# if n_jobs < 0:
# num_cpus = multiprocessing.cpu_count()
# num_procs = num_cpus + 1 + n_jobs
# return max(num_procs, 1)
#
# def split_table(table, num_splits):
# splits = []
# split_size = 1.0/num_splits*len(table)
# for i in xrange(num_splits):
# splits.append(table[int(round(i*split_size)):
# int(round((i+1)*split_size))])
# return splits
#
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_attr_type(attr, attr_type, attr_label, table_label):
# """Check if the attribute is not of numeric type."""
# if attr_type != np.object:
# raise AssertionError(attr_label + ' \'' + attr + '\' in ' +
# table_label + ' is not of string type.')
# return True
#
# def validate_key_attr(key_attr, table, table_label):
# """Check if the attribute is a valid key attribute."""
# unique_flag = len(table[key_attr].unique()) == len(table)
# nan_flag = sum(table[key_attr].isnull()) == 0
# if not (unique_flag and nan_flag):
# raise AssertionError('\'' + key_attr + '\' is not a key attribute ' + \
# 'in ' + table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
. Output only the next line. | validate_key_attr(l_key_attr, ltable, 'left table') |
Here is a snippet: <|code_start|>
l_key_attr (string): key attribute in left table.
r_key_attr (string): key attribute in right table.
l_filter_attr (string): attribute in left table on which the filter
should be applied.
r_filter_attr (string): attribute in right table on which the filter
should be applied.
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is
given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used (where n_cpus is the total
number of CPUs in the machine). Thus for n_jobs = -2, all CPUs
but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1,
then no parallel computing code will be used (i.e., equivalent
to the default).
show_progress (boolean): flag to indicate whether task progress
should be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs from the candidate set that
survive the filter (DataFrame).
"""
# check if the input candset is a dataframe
<|code_end|>
. Write the next line using the current file imports:
from joblib import delayed, Parallel
from py_stringsimjoin.utils.generic_helper import build_dict_from_table, \
get_num_processes_to_launch, split_table
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table
import pandas as pd
import pyprind
and context from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def build_dict_from_table(table, key_attr_index, join_attr_index,
# remove_null=True):
# table_dict = {}
# for row in table.itertuples(index=False):
# if remove_null and pd.isnull(row[join_attr_index]):
# continue
# table_dict[row[key_attr_index]] = tuple(row)
# return table_dict
#
# def get_num_processes_to_launch(n_jobs):
# # determine number of processes to launch parallely
# num_procs = n_jobs
# if n_jobs < 0:
# num_cpus = multiprocessing.cpu_count()
# num_procs = num_cpus + 1 + n_jobs
# return max(num_procs, 1)
#
# def split_table(table, num_splits):
# splits = []
# split_size = 1.0/num_splits*len(table)
# for i in xrange(num_splits):
# splits.append(table[int(round(i*split_size)):
# int(round((i+1)*split_size))])
# return splits
#
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_attr_type(attr, attr_type, attr_label, table_label):
# """Check if the attribute is not of numeric type."""
# if attr_type != np.object:
# raise AssertionError(attr_label + ' \'' + attr + '\' in ' +
# table_label + ' is not of string type.')
# return True
#
# def validate_key_attr(key_attr, table, table_label):
# """Check if the attribute is a valid key attribute."""
# unique_flag = len(table[key_attr].unique()) == len(table)
# nan_flag = sum(table[key_attr].isnull()) == 0
# if not (unique_flag and nan_flag):
# raise AssertionError('\'' + key_attr + '\' is not a key attribute ' + \
# 'in ' + table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
, which may include functions, classes, or code. Output only the next line. | validate_input_table(candset, 'candset') |
Predict the next line for this snippet: <|code_start|>
class GetNumProcessesToLaunchTestCases(unittest.TestCase):
def setUp(self):
self.cpu_count = multiprocessing.cpu_count()
def test_n_jobs_minus_1(self):
<|code_end|>
with the help of current file imports:
import multiprocessing
import unittest
import pandas as pd
from nose.tools import assert_equal, assert_list_equal, raises
from py_stringsimjoin.utils.generic_helper import get_num_processes_to_launch
and context from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def get_num_processes_to_launch(n_jobs):
# # determine number of processes to launch parallely
# num_procs = n_jobs
# if n_jobs < 0:
# num_cpus = multiprocessing.cpu_count()
# num_procs = num_cpus + 1 + n_jobs
# return max(num_procs, 1)
, which may contain function names, class names, or code. Output only the next line. | assert_equal(get_num_processes_to_launch(-1), self.cpu_count) |
Given snippet: <|code_start|> Args:
input_table (DataFrame): input table to profile.
profile_attrs (list): list of attribute names from the input table to be
profiled (defaults to None). If not provided, all attributes in the
input table will be profiled.
Returns:
A dataframe consisting of profile output. Specifically, the dataframe
contains three columns,
1) 'Unique values' column, which shows the number of unique values in
each attribute,
2) 'Missing values' column, which shows the number of missing values in
each attribute, and
3) 'Comments' column, which contains comments about each attribute.
The output dataframe is indexed by attribute name, so that the
statistics for each attribute can be easily accessed using the attribute name.
"""
# check if the input table is a dataframe
validate_input_table(input_table, 'input table')
profile_output = []
if profile_attrs is None:
profile_attrs = list(input_table.columns.values)
else:
# check if the profile attributes exist
for attr in profile_attrs:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pandas as pd
from py_stringsimjoin.utils.validation import validate_attr, \
validate_input_table
and context:
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
which might include code, classes, or functions. Output only the next line. | validate_attr(attr, input_table.columns, |
Given the code snippet: <|code_start|>"""Profiling tools"""
def profile_table_for_join(input_table, profile_attrs=None):
"""Profiles the attributes in the table to suggest implications for join.
Args:
input_table (DataFrame): input table to profile.
profile_attrs (list): list of attribute names from the input table to be
profiled (defaults to None). If not provided, all attributes in the
input table will be profiled.
Returns:
A dataframe consisting of profile output. Specifically, the dataframe
contains three columns,
1) 'Unique values' column, which shows the number of unique values in
each attribute,
2) 'Missing values' column, which shows the number of missing values in
each attribute, and
3) 'Comments' column, which contains comments about each attribute.
The output dataframe is indexed by attribute name, so that the
statistics for each attribute can be easily accessed using the attribute name.
"""
# check if the input table is a dataframe
<|code_end|>
, generate the next line using the imports in this file:
import pandas as pd
from py_stringsimjoin.utils.validation import validate_attr, \
validate_input_table
and context (functions, classes, or occasionally code) from other files:
# Path: py_stringsimjoin/utils/validation.py
# def validate_attr(attr, table_cols, attr_label, table_label):
# """Check if the attribute exists in the table."""
# if attr not in table_cols:
# raise AssertionError(attr_label + ' \'' + attr + '\' not found in ' + \
# table_label)
# return True
#
# def validate_input_table(table, table_label):
# """Check if the input table is a dataframe."""
# if not isinstance(table, pd.DataFrame):
# raise TypeError(table_label + ' is not a dataframe')
# return True
. Output only the next line. | validate_input_table(input_table, 'input table') |
Given the following code snippet before the placeholder: <|code_start|>
def get_pairs_with_missing_value_disk(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
temp_dir, data_limit_per_core,
missing_pairs_file_name, l_out_attrs=None,
r_out_attrs=None, l_out_prefix='l_',
r_out_prefix='r_', out_sim_score=False,
show_progress=True):
# find column indices of key attr, join attr and output attrs in ltable
l_columns = list(ltable.columns.values)
l_key_attr_index = l_columns.index(l_key_attr)
l_join_attr_index = l_columns.index(l_join_attr)
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import pandas as pd
import pyprind
import os
from py_stringsimjoin.utils.generic_helper import \
find_output_attribute_indices, get_output_header_from_tables, \
get_output_row_from_tables
and context including class names, function names, and sometimes code from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def find_output_attribute_indices(original_columns, output_attributes):
# output_attribute_indices = []
# if output_attributes is not None:
# for attr in output_attributes:
# output_attribute_indices.append(original_columns.index(attr))
# return output_attribute_indices
#
# def get_output_header_from_tables(l_key_attr, r_key_attr,
# l_out_attrs, r_out_attrs,
# l_out_prefix, r_out_prefix):
# output_header = []
#
# output_header.append(l_out_prefix + l_key_attr)
#
# output_header.append(r_out_prefix + r_key_attr)
#
# if l_out_attrs:
# for l_attr in l_out_attrs:
# output_header.append(l_out_prefix + l_attr)
#
# if r_out_attrs:
# for r_attr in r_out_attrs:
# output_header.append(r_out_prefix + r_attr)
#
# return output_header
#
# def get_output_row_from_tables(l_row, r_row,
# l_key_attr_index, r_key_attr_index,
# l_out_attrs_indices=None,
# r_out_attrs_indices=None):
# output_row = []
#
# # add ltable id attr
# output_row.append(l_row[l_key_attr_index])
#
# # add rtable id attr
# output_row.append(r_row[r_key_attr_index])
#
# # add ltable output attributes
# if l_out_attrs_indices:
# for l_attr_index in l_out_attrs_indices:
# output_row.append(l_row[l_attr_index])
#
# # add rtable output attributes
# if r_out_attrs_indices:
# for r_attr_index in r_out_attrs_indices:
# output_row.append(r_row[r_attr_index])
#
# return output_row
. Output only the next line. | l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs) |
Here is a snippet: <|code_start|> l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs)
# find column indices of key attr, join attr and output attrs in rtable
r_columns = list(rtable.columns.values)
r_key_attr_index = r_columns.index(r_key_attr)
r_join_attr_index = r_columns.index(r_join_attr)
r_out_attrs_indices = find_output_attribute_indices(r_columns, r_out_attrs)
# find ltable records with missing value in l_join_attr
ltable_missing = ltable[pd.isnull(ltable[l_join_attr])]
# find ltable records which do not contain missing value in l_join_attr
ltable_not_missing = ltable[pd.notnull(ltable[l_join_attr])]
# find rtable records with missing value in r_join_attr
rtable_missing = rtable[pd.isnull(rtable[r_join_attr])]
output_rows = []
has_output_attributes = (l_out_attrs is not None or
r_out_attrs is not None)
if show_progress:
print('Finding pairs with missing value...')
prog_bar = pyprind.ProgBar(len(ltable_missing) + len(rtable_missing))
# For each ltable record with missing value in l_join_attr,
# output a pair corresponding to every record in rtable.
for l_row in ltable_missing.itertuples(index=False):
for r_row in rtable.itertuples(index=False):
if has_output_attributes:
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import pandas as pd
import pyprind
import os
from py_stringsimjoin.utils.generic_helper import \
find_output_attribute_indices, get_output_header_from_tables, \
get_output_row_from_tables
and context from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def find_output_attribute_indices(original_columns, output_attributes):
# output_attribute_indices = []
# if output_attributes is not None:
# for attr in output_attributes:
# output_attribute_indices.append(original_columns.index(attr))
# return output_attribute_indices
#
# def get_output_header_from_tables(l_key_attr, r_key_attr,
# l_out_attrs, r_out_attrs,
# l_out_prefix, r_out_prefix):
# output_header = []
#
# output_header.append(l_out_prefix + l_key_attr)
#
# output_header.append(r_out_prefix + r_key_attr)
#
# if l_out_attrs:
# for l_attr in l_out_attrs:
# output_header.append(l_out_prefix + l_attr)
#
# if r_out_attrs:
# for r_attr in r_out_attrs:
# output_header.append(r_out_prefix + r_attr)
#
# return output_header
#
# def get_output_row_from_tables(l_row, r_row,
# l_key_attr_index, r_key_attr_index,
# l_out_attrs_indices=None,
# r_out_attrs_indices=None):
# output_row = []
#
# # add ltable id attr
# output_row.append(l_row[l_key_attr_index])
#
# # add rtable id attr
# output_row.append(r_row[r_key_attr_index])
#
# # add ltable output attributes
# if l_out_attrs_indices:
# for l_attr_index in l_out_attrs_indices:
# output_row.append(l_row[l_attr_index])
#
# # add rtable output attributes
# if r_out_attrs_indices:
# for r_attr_index in r_out_attrs_indices:
# output_row.append(r_row[r_attr_index])
#
# return output_row
, which may include functions, classes, or code. Output only the next line. | record = get_output_row_from_tables( |
Given the following code snippet before the placeholder: <|code_start|>
def get_pairs_with_missing_value(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
l_out_attrs=None, r_out_attrs=None,
l_out_prefix='l_', r_out_prefix='r_',
out_sim_score=False, show_progress=True):
# find column indices of key attr, join attr and output attrs in ltable
l_columns = list(ltable.columns.values)
l_key_attr_index = l_columns.index(l_key_attr)
l_join_attr_index = l_columns.index(l_join_attr)
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import pandas as pd
import pyprind
from py_stringsimjoin.utils.generic_helper import \
find_output_attribute_indices, get_output_header_from_tables, \
get_output_row_from_tables
and context including class names, function names, and sometimes code from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def find_output_attribute_indices(original_columns, output_attributes):
# output_attribute_indices = []
# if output_attributes is not None:
# for attr in output_attributes:
# output_attribute_indices.append(original_columns.index(attr))
# return output_attribute_indices
#
# def get_output_header_from_tables(l_key_attr, r_key_attr,
# l_out_attrs, r_out_attrs,
# l_out_prefix, r_out_prefix):
# output_header = []
#
# output_header.append(l_out_prefix + l_key_attr)
#
# output_header.append(r_out_prefix + r_key_attr)
#
# if l_out_attrs:
# for l_attr in l_out_attrs:
# output_header.append(l_out_prefix + l_attr)
#
# if r_out_attrs:
# for r_attr in r_out_attrs:
# output_header.append(r_out_prefix + r_attr)
#
# return output_header
#
# def get_output_row_from_tables(l_row, r_row,
# l_key_attr_index, r_key_attr_index,
# l_out_attrs_indices=None,
# r_out_attrs_indices=None):
# output_row = []
#
# # add ltable id attr
# output_row.append(l_row[l_key_attr_index])
#
# # add rtable id attr
# output_row.append(r_row[r_key_attr_index])
#
# # add ltable output attributes
# if l_out_attrs_indices:
# for l_attr_index in l_out_attrs_indices:
# output_row.append(l_row[l_attr_index])
#
# # add rtable output attributes
# if r_out_attrs_indices:
# for r_attr_index in r_out_attrs_indices:
# output_row.append(r_row[r_attr_index])
#
# return output_row
. Output only the next line. | l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs) |
Next line prediction: <|code_start|> l_key_attr_index, r_key_attr_index,
l_out_attrs_indices, r_out_attrs_indices)
else:
output_row = [l_row[l_key_attr_index], r_row[r_key_attr_index]]
output_rows.append(output_row)
if show_progress:
prog_bar.update()
# For each rtable record with missing value in r_join_attr,
# output a pair corresponding to every record in ltable which
# doesn't have a missing value in l_join_attr.
for r_row in rtable_missing.itertuples(index=False):
for l_row in ltable_not_missing.itertuples(index=False):
if has_output_attributes:
output_row = get_output_row_from_tables(
l_row, r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices, r_out_attrs_indices)
else:
output_row = [l_row[l_key_attr_index], r_row[r_key_attr_index]]
if out_sim_score:
output_row.append(np.NaN)
output_rows.append(output_row)
if show_progress:
prog_bar.update()
<|code_end|>
. Use current file imports:
(import numpy as np
import pandas as pd
import pyprind
from py_stringsimjoin.utils.generic_helper import \
find_output_attribute_indices, get_output_header_from_tables, \
get_output_row_from_tables)
and context including class names, function names, or small code snippets from other files:
# Path: py_stringsimjoin/utils/generic_helper.py
# def find_output_attribute_indices(original_columns, output_attributes):
# output_attribute_indices = []
# if output_attributes is not None:
# for attr in output_attributes:
# output_attribute_indices.append(original_columns.index(attr))
# return output_attribute_indices
#
# def get_output_header_from_tables(l_key_attr, r_key_attr,
# l_out_attrs, r_out_attrs,
# l_out_prefix, r_out_prefix):
# output_header = []
#
# output_header.append(l_out_prefix + l_key_attr)
#
# output_header.append(r_out_prefix + r_key_attr)
#
# if l_out_attrs:
# for l_attr in l_out_attrs:
# output_header.append(l_out_prefix + l_attr)
#
# if r_out_attrs:
# for r_attr in r_out_attrs:
# output_header.append(r_out_prefix + r_attr)
#
# return output_header
#
# def get_output_row_from_tables(l_row, r_row,
# l_key_attr_index, r_key_attr_index,
# l_out_attrs_indices=None,
# r_out_attrs_indices=None):
# output_row = []
#
# # add ltable id attr
# output_row.append(l_row[l_key_attr_index])
#
# # add rtable id attr
# output_row.append(r_row[r_key_attr_index])
#
# # add ltable output attributes
# if l_out_attrs_indices:
# for l_attr_index in l_out_attrs_indices:
# output_row.append(l_row[l_attr_index])
#
# # add rtable output attributes
# if r_out_attrs_indices:
# for r_attr_index in r_out_attrs_indices:
# output_row.append(r_row[r_attr_index])
#
# return output_row
. Output only the next line. | output_header = get_output_header_from_tables( |
Using the snippet: <|code_start|> l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs)
# find column indices of key attr, join attr and output attrs in rtable
r_columns = list(rtable.columns.values)
r_key_attr_index = r_columns.index(r_key_attr)
r_join_attr_index = r_columns.index(r_join_attr)
r_out_attrs_indices = find_output_attribute_indices(r_columns, r_out_attrs)
# find ltable records with missing value in l_join_attr
ltable_missing = ltable[pd.isnull(ltable[l_join_attr])]
# find ltable records which do not contain missing value in l_join_attr
ltable_not_missing = ltable[pd.notnull(ltable[l_join_attr])]
# find rtable records with missing value in r_join_attr
rtable_missing = rtable[pd.isnull(rtable[r_join_attr])]
output_rows = []
has_output_attributes = (l_out_attrs is not None or
r_out_attrs is not None)
if show_progress:
print('Finding pairs with missing value...')
prog_bar = pyprind.ProgBar(len(ltable_missing) + len(rtable_missing))
# For each ltable record with missing value in l_join_attr,
# output a pair corresponding to every record in rtable.
for l_row in ltable_missing.itertuples(index=False):
for r_row in rtable.itertuples(index=False):
if has_output_attributes:
<|code_end|>
, determine the next line of code. You have imports:
import numpy as np
import pandas as pd
import pyprind
from py_stringsimjoin.utils.generic_helper import \
find_output_attribute_indices, get_output_header_from_tables, \
get_output_row_from_tables
and context (class names, function names, or code) available:
# Path: py_stringsimjoin/utils/generic_helper.py
# def find_output_attribute_indices(original_columns, output_attributes):
# output_attribute_indices = []
# if output_attributes is not None:
# for attr in output_attributes:
# output_attribute_indices.append(original_columns.index(attr))
# return output_attribute_indices
#
# def get_output_header_from_tables(l_key_attr, r_key_attr,
# l_out_attrs, r_out_attrs,
# l_out_prefix, r_out_prefix):
# output_header = []
#
# output_header.append(l_out_prefix + l_key_attr)
#
# output_header.append(r_out_prefix + r_key_attr)
#
# if l_out_attrs:
# for l_attr in l_out_attrs:
# output_header.append(l_out_prefix + l_attr)
#
# if r_out_attrs:
# for r_attr in r_out_attrs:
# output_header.append(r_out_prefix + r_attr)
#
# return output_header
#
# def get_output_row_from_tables(l_row, r_row,
# l_key_attr_index, r_key_attr_index,
# l_out_attrs_indices=None,
# r_out_attrs_indices=None):
# output_row = []
#
# # add ltable id attr
# output_row.append(l_row[l_key_attr_index])
#
# # add rtable id attr
# output_row.append(r_row[r_key_attr_index])
#
# # add ltable output attributes
# if l_out_attrs_indices:
# for l_attr_index in l_out_attrs_indices:
# output_row.append(l_row[l_attr_index])
#
# # add rtable output attributes
# if r_out_attrs_indices:
# for r_attr_index in r_out_attrs_indices:
# output_row.append(r_row[r_attr_index])
#
# return output_row
. Output only the next line. | output_row = get_output_row_from_tables( |
Given the following code snippet before the placeholder: <|code_start|> """
page = 1
numbers = []
while True:
json_data = perform_request(f'pulls?per_page=100&page={page}')
if not json_data:
break
for pr in json_data:
numbers.append(pr['number'])
if pr['state'] == 'open':
check_pr(pr)
page += 1
prs = tapi.pr.get(closed=False, limit=0)['objects']
for pr in prs:
if pr['number'] not in numbers:
logger.info("PR {} closed".format(pr['number']))
tapi.pr(pr['number']).patch({'closed': True})
def run_tasks():
""" Runs the pending tasks
This function first checks the number of dockerbot containers which are running the
py_test_base image running and stores this count. If the count is lower than the threshold,
it proceeds to grab a pending task from the list and provision an appliance. The appliance
is then configured before being handed off to Dockerbot for the running of the PR.
"""
cont_count = 0
<|code_end|>
, predict the next line using imports from the current file:
import json
import logging
import traceback
import fauxfactory
import pika
import requests
from datetime import datetime
from slumber.exceptions import HttpClientError
from cfme.utils.appliance import Appliance
from cfme.utils.conf import docker as docker_conf
from cfme.utils.dockerbot import dockerbot
from cfme.utils.log import setup_logger
from cfme.utils.trackerbot import api
and context including class names, function names, and sometimes code from other files:
# Path: cfme/utils/dockerbot/dockerbot.py
# def _dgci(d, key):
# def _name(docker_info):
# def docker_client(self):
# def __del__(self):
# def process_bindings(self, bindings):
# def wait(self):
# def stop(self):
# def remove(self):
# def kill(self):
# def __init__(self, bindings, image, dry_run=False):
# def run(self):
# def __init__(self, name, bindings, env, log_path, links, pytest_con, artifactor_dir,
# dry_run=False):
# def run(self):
# def __init__(self, **args):
# def cache_files(self):
# def get_base_branch(self, pr):
# def get_dev_branch(self, pr=None):
# def get_pr_metadata(self, pr=None):
# def find_files_by_pr(self, pr=None):
# def check_arg(self, name, default):
# def validate_args(self):
# def display_banner(self):
# def process_appliance(self):
# def create_pytest_command(self):
# def enc_key(self):
# def create_pytest_envvars(self):
# def handle_pr(self):
# def create_log_path(self):
# def create_pytest_bindings(self):
# def handle_watch(self):
# def handle_output(self):
# class DockerInstance:
# class SeleniumDocker(DockerInstance):
# class PytestDocker(DockerInstance):
# class DockerBot:
. Output only the next line. | for container in dockerbot.dc.containers(): |
Continue the code snippet: <|code_start|> soc = socket.create_connection((addr, int(port)), timeout=2)
except OSError:
return False
# docker-proxy opens the port immediately after container is started.
# Receive data from the socket to check if VNC session is really running.
if not soc.recv(1024):
return False
soc.close()
return True
@click.command(help='Starts selenium container for testing against')
@click.option('--watch', help='Opens VNC session', default=False, is_flag=True)
@click.option('--vnc', help='Chooses VNC port', default=5900)
@click.option('--webdriver', help='Chooses webdriver port', default=4444)
@click.option('--image', help='Chooses selenium container image',
default=docker_conf.get('selff', 'cfmeqe/sel_ff_chrome'))
@click.option('--vncviewer', help='Chooses VNC viewer command',
default=docker_conf.get('vncviewer', 'vncviewer'))
@click.option('--random-ports', is_flag=True, default=False,
help='Choose random ports for VNC, webdriver, (overrides --webdriver and --vnc)')
def main(watch, vnc, webdriver, image, vncviewer, random_ports):
"""Main function for running"""
ip = '127.0.0.1'
print("Starting container...")
vnc = random_port() if random_ports else vnc
webdriver = random_port() if random_ports else webdriver
<|code_end|>
. Use current file imports:
import socket
import subprocess
import sys
import click
from wait_for import TimedOutError
from wait_for import wait_for
from cfme.utils.conf import docker as docker_conf
from cfme.utils.dockerbot.dockerbot import SeleniumDocker
from cfme.utils.net import random_port
and context (classes, functions, or code) from other files:
# Path: cfme/utils/dockerbot/dockerbot.py
# class SeleniumDocker(DockerInstance):
# def __init__(self, bindings, image, dry_run=False):
# self.dry_run = dry_run
# self.container_name = fauxfactory.gen_alphanumeric(8)
# self.process_bindings(bindings)
# if not self.dry_run:
# self.container = self.docker_client.containers.create(image,
# tty=True,
# privileged=True,
# ports=self.port_bindings,
# name=self.container_name,)
# else:
# self.container_name = "DRY_RUN_SELENIUM_CONTAINER"
#
# def run(self):
# print(f"{'DRY RUN' if self.dry_run else self.container.id}: Running Selenium container")
# if not self.dry_run:
# self.container.start()
. Output only the next line. | dkb = SeleniumDocker(bindings={'VNC_PORT': (5999, vnc), |
Predict the next line after this snippet: <|code_start|> gc.collect()
map = map.add_attribute(i)
assert map.get_index(1) == 0
assert map.get_index(2) == 1
assert map.get_index(3) == 2
assert map.get_index(4) == -1
assert map.storage_size() == 3
# Check to ensure we get the same hidden class
# by adding the same attributes.
# GC aggressively to ensure we are not messing
# up due to use of weak hash tables
map2 = empty
for i in range(1, 4):
gc.collect()
map2 = map2.add_attribute(i)
assert map is map2
assert empty.other_maps.get(1) is not None
assert empty.other_maps.get(1).other_maps.get(2) is not None
assert empty.other_maps.get(1).other_maps.get(2).other_maps.get(3) is not None
map_ = empty.add_attribute(2).add_attribute(1).add_attribute(3)
assert map_ is not map
assert map_.get_index(2) == 0
assert map_.get_index(1) == 1
assert map_.get_index(3) == 2
def test_map_descriptors():
<|code_end|>
using the current file's imports:
import pytest
import gc
import gc
import gc
import gc
from pycket.hidden_classes import make_map_type, make_caching_map_type
from pycket.impersonators.hidden_classes.base import W_ImpPropertyDescriptor
and any relevant context from other files:
# Path: pycket/impersonators/hidden_classes/base.py
# class W_ImpPropertyDescriptor(values.W_Object):
# errorname = "chaperone-property"
# _immutable_fields_ = ["name"]
# def __init__(self, name):
# if we_are_translated():
# assert name is not None
# self.name = name
# def tostring(self):
# return "#<chaperone-property>"
. Output only the next line. | empty = make_map_type("__getitem__", W_ImpPropertyDescriptor).EMPTY |
Given snippet: <|code_start|>
DEBUG = False
def make_primitive_table(ls_str):
table = [None] * len(ls_str)
for i, exposed_id in enumerate(ls_str):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from pycket.values import W_Symbol
from pycket.prims.expose import define_nyi, prim_env, expose
from pycket.prims.expose import prim_env
from pycket.values import W_Symbol, W_Prim
and context:
# Path: pycket/values.py
# class W_Symbol(W_Object):
# errorname = "symbol"
# _attrs_ = ["unreadable", "_isascii", "_unicodevalue", "utf8value", "bar_quoted"]
# _immutable_fields_ = ["unreadable", "utf8value", "bar_quoted"]
#
# def __init__(self, val, unreadable=False):
# assert isinstance(val, str)
# self._unicodevalue = None
# self.utf8value = val
# self.unreadable = unreadable
# self._isascii = MAYBE
# self.bar_quoted = False
# if val == "" or val == ".":
# self.bar_quoted = True
# else:
# for q in " ()[]{}|\\,`'":
# if q in val:
# self.bar_quoted = True
# break
#
# def is_bar_quoted(self):
# return self.bar_quoted
#
# def is_unreadable(self):
# return self.unreadable
#
# @staticmethod
# def _cache_is_ascii(self):
# from pycket.values_string import _is_ascii
# if not we_are_translated():
# assert self._isascii == MAYBE
# if _is_ascii(self.utf8value):
# self._isascii = DEFINITELY_YES
# else:
# self._isascii = DEFINITELY_NO
# return self._isascii
#
# def asciivalue(self):
# isascii = jit.conditional_call_elidable(
# self._isascii, W_Symbol._cache_is_ascii, self)
# if isascii == DEFINITELY_NO:
# return None
# return self.utf8value
#
# @jit.elidable
# def unicodevalue(self):
# if self._unicodevalue is None:
# self._unicodevalue = self.utf8value.decode("utf-8")
# return self._unicodevalue
#
# @staticmethod
# @jit.elidable
# def make(string):
# # This assert statement makes the lowering phase of rpython break...
# # Maybe comment back in and check for bug.
# assert isinstance(string, str)
# w_result = W_Symbol.all_symbols.get(string, None)
# if w_result is None:
# w_result = W_Symbol(string)
# W_Symbol.all_symbols[string] = w_result
# return w_result
#
# @staticmethod
# @jit.elidable
# def make_unreadable(string):
# w_result = W_Symbol.unreadable_symbols.get(string, None)
# if w_result is None:
# w_result = W_Symbol(string, unreadable=True)
# W_Symbol.unreadable_symbols[string] = w_result
# return w_result
#
# def __repr__(self):
# return self.utf8value
#
# @jit.elidable
# def is_interned(self):
# if self.unreadable:
# return False
# string = self.utf8value
# symbol = W_Symbol.all_symbols.get(string, None)
# if symbol is self:
# return True
# return False
#
# def tostring(self):
# return "%s" % self.utf8value
#
# def variable_name(self):
# return self.utf8value
which might include code, classes, or functions. Output only the next line. | table[i] = W_Symbol.make(exposed_id) |
Using the snippet: <|code_start|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
class TestConses(object):
def test_basics(self):
_1, _2, _3 = W_Fixnum(1), W_Fixnum(2), W_Fixnum(3)
c = W_Cons.make(_1, _2)
assert c.car().value == _1.value
assert c.cdr().value == _2.value
l = to_list([_1, _2, _3])
assert l.car().value == _1.value
assert l.cdr().car().value == _2.value
assert l.cdr().cdr().car().value == _3.value
assert l.cdr().cdr().cdr() == w_null
def test_basic_hl(self):
<|code_end|>
, determine the next line of code. You have imports:
from pycket.values import *
from pycket.test.testhelper import execute, run_fix, run
and context (class names, function names, or code) available:
# Path: pycket/test/testhelper.py
# def execute(p, stdlib=False, extra=""):
# return run_mod_expr(p, stdlib=stdlib, extra=extra)
#
# def run_fix(p, v=None, stdlib=False, extra="", equal_huh=False):
# new = pytest.config.new_pycket
# if new:
# return run_expr(p, v, equal_huh=equal_huh)
# ov = run_mod_expr(p,stdlib=stdlib, extra=extra)
# assert isinstance(ov, values.W_Fixnum)
# if v is not None:
# assert ov.value == v
# return ov.value
#
# def run(p, v=None, stdlib=False, extra="", expect_to_fail=False):
# if pytest.config.new_pycket:
# return run_expr_result(p, expect_to_fail=expect_to_fail, v=v)
# return run_mod_expr(p,v=v,stdlib=stdlib, extra=extra)
. Output only the next line. | run_fix("(car (cons 1 2))", 1) |
Predict the next line for this snippet: <|code_start|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
class TestConses(object):
def test_basics(self):
_1, _2, _3 = W_Fixnum(1), W_Fixnum(2), W_Fixnum(3)
c = W_Cons.make(_1, _2)
assert c.car().value == _1.value
assert c.cdr().value == _2.value
l = to_list([_1, _2, _3])
assert l.car().value == _1.value
assert l.cdr().car().value == _2.value
assert l.cdr().cdr().car().value == _3.value
assert l.cdr().cdr().cdr() == w_null
def test_basic_hl(self):
run_fix("(car (cons 1 2))", 1)
run_fix("(cdr (cons 1 2))", 2)
run_fix("(car (list 1))", 1)
run_fix("(car (cdr (list 1 2)))", 2)
<|code_end|>
with the help of current file imports:
from pycket.values import *
from pycket.test.testhelper import execute, run_fix, run
and context from other files:
# Path: pycket/test/testhelper.py
# def execute(p, stdlib=False, extra=""):
# return run_mod_expr(p, stdlib=stdlib, extra=extra)
#
# def run_fix(p, v=None, stdlib=False, extra="", equal_huh=False):
# new = pytest.config.new_pycket
# if new:
# return run_expr(p, v, equal_huh=equal_huh)
# ov = run_mod_expr(p,stdlib=stdlib, extra=extra)
# assert isinstance(ov, values.W_Fixnum)
# if v is not None:
# assert ov.value == v
# return ov.value
#
# def run(p, v=None, stdlib=False, extra="", expect_to_fail=False):
# if pytest.config.new_pycket:
# return run_expr_result(p, expect_to_fail=expect_to_fail, v=v)
# return run_mod_expr(p,v=v,stdlib=stdlib, extra=extra)
, which may contain function names, class names, or code. Output only the next line. | run("(equal? (cons 1 2) (cons 1 2))", w_true) |
Predict the next line for this snippet: <|code_start|>
def set_config_val(self, name, val):
self.config[name] = val
def set_pycketconfig(self, c):
self.pycketconfig = c
def get_pycketconfig(self):
return self.pycketconfig
def is_expander_loaded(self):
return self.config['expander_loaded'] == 1
def is_repl_loaded(self):
return self.config['repl_loaded'] == 1
def lookup(self, s):
return self.config.get(s, None)
def load(self, ast):
if self.config is not None:
return
assert isinstance(ast, Module)
self.config = ast.config.copy()
def reset_callgraph(self):
self.callgraph = CallGraph()
w_global_config = GlobalConfig()
<|code_end|>
with the help of current file imports:
import sys
import os
from rpython.rlib import jit, objectmodel
from pycket.small_list import inline_small_list
from pycket.error import SchemeException
from pycket.base import W_Object
from pycket.callgraph import CallGraph
from pycket.config import get_testing_config
from pycket.interpreter import Module
from pycket.interpreter import Module
from rpython.config.config import Config
from pycket.values import W_Cell
from pycket.values import W_Cell
and context from other files:
# Path: pycket/base.py
# class W_Object(W_ProtoObject):
# __metaclass__ = extendabletype
# _attrs_ = []
# errorname = "%%%%unreachable%%%%"
#
# def __init__(self):
# raise NotImplementedError("abstract base class")
#
# def num_values(self):
# return 1
#
# def get_value(self, index):
# assert index == 0
# return self
#
# def get_all_values(self):
# return [self]
#
# def iscallable(self):
# return False
#
# def call(self, args, env, cont):
# raise SchemeException("%s is not callable\n args were: %s" % (self.tostring(),
# [s.tostring() for s in args]))
#
# def call_with_extra_info(self, args, env, cont, calling_app):
# return self.call(args, env, cont)
#
# def call_interpret(self, racket_vals):
# from pycket.interpreter import Done, interpret_one
# from pycket.env import ToplevelEnv, w_global_config
# from pycket.cont import NilCont, Prompt
# from pycket import values, values_parameter
# from pycket.prims.control import default_uncaught_exception_handler
#
# __pycketconfig = w_global_config.get_pycketconfig()
#
# t_env = ToplevelEnv(__pycketconfig)
#
# cont = NilCont()
# cont = Prompt(values.w_default_continuation_prompt_tag, None, t_env, cont)
# cont.update_cm(values.parameterization_key, values_parameter.top_level_config)
# cont.update_cm(values.exn_handler_key, default_uncaught_exception_handler)
#
# try:
# ast, env, cont = self.call_with_extra_info(racket_vals, t_env, cont, None)
# return interpret_one(ast, env, cont)
# except Done, e:
# return e.values
# except SchemeException, e:
# raise e
#
#
# def enable_jitting(self):
# pass # need to override in callables that are based on an AST
#
# # an arity is a pair of a list of numbers and either -1 or a non-negative integer
# def get_arity(self, promote=False):
# if self.iscallable():
# from pycket.arity import Arity
# return Arity.unknown
# else:
# raise SchemeException("%s does not have arity" % self.tostring())
#
# def get_result_arity(self):
# if self.iscallable():
# return None
# else:
# raise SchemeException("%s does not have result arity" % self.tostring())
#
# # Interface for structs
#
# def ref_with_extra_info(self, field, app, env, cont):
# raise SchemeException("%s is not a struct" % self.tostring())
#
# def set_with_extra_info(self, field, val, app, env, cont):
# raise SchemeException("%s is not a struct" % self.tostring())
#
# def struct_type(self):
# return None
#
# def get_prop(self, property, env, cont):
# raise SchemeException("%s is not a struct" % self.tostring())
#
# # Interface for proxies
#
# def is_impersonator(self):
# return self.is_chaperone()
# def is_chaperone(self):
# return False
# def is_proxy(self):
# return self.is_chaperone() or self.is_impersonator()
# def get_proxied(self):
# return self
# def get_base(self):
# return self
# def get_properties(self):
# return {}
# def is_non_interposing_chaperone(self):
# return False
# def replace_proxied(self, other):
# raise ValueError("Not a proxy")
#
# def is_proper_list(self, seen=[]):
# return False
#
# def immutable(self):
# return False
#
# def equal(self, other):
# return self is other # default implementation
#
# def eqv(self, other):
# return self is other # default implementation
#
# def hash_equal(self, info=None):
# return 10
#
# def hash_eqv(self):
# # default to hash_eq
# return objectmodel.compute_hash(self)
#
# def tostring(self):
# return str(self)
#
# def to_sexp(self):
# return self
#
# # for expose
# @classmethod
# def make_unwrapper(cls, unbox=False):
# if cls is W_Object:
# return lambda x: x, ''
# def unwrap(w_object):
# if isinstance(w_object, cls):
# return w_object
# return None
# return unwrap, cls.errorname
, which may contain function names, class names, or code. Output only the next line. | class Env(W_Object): |
Based on the snippet: <|code_start|>
def test_flonum_tostring():
s = '3.141592653589793' # racket -e "pi"
def float_tostring(x):
print W_Flonum(x).tostring()
return s in W_Flonum(x).tostring()
res = interpret(float_tostring, [math.pi])
assert res
def test_mul_zero():
<|code_end|>
, predict the immediate next line with the help of imports:
import math
import pytest
import math
import random
from pycket.interpreter import *
from pycket.values import *
from pycket.prims import *
from pycket.test.testhelper import run_fix, run, run_top, run_std, run_flo
from pycket.error import SchemeException
from rpython.rtyper.test.test_llinterp import interpret
from rpython.rlib.rbigint import rbigint
from pycket.arithmetic import gcd
from rpython.rlib.rbigint import rbigint
from pycket.arithmetic import gcd
from rpython.rlib.rbigint import ONERBIGINT
from pycket.arithmetic import count_trailing_zeros
and context (classes, functions, sometimes code) from other files:
# Path: pycket/test/testhelper.py
# def run_fix(p, v=None, stdlib=False, extra="", equal_huh=False):
# new = pytest.config.new_pycket
# if new:
# return run_expr(p, v, equal_huh=equal_huh)
# ov = run_mod_expr(p,stdlib=stdlib, extra=extra)
# assert isinstance(ov, values.W_Fixnum)
# if v is not None:
# assert ov.value == v
# return ov.value
#
# def run(p, v=None, stdlib=False, extra="", expect_to_fail=False):
# if pytest.config.new_pycket:
# return run_expr_result(p, expect_to_fail=expect_to_fail, v=v)
# return run_mod_expr(p,v=v,stdlib=stdlib, extra=extra)
#
# def run_top(p, v=None, stdlib=False, extra=""):
# return run_mod_expr(p,v=v,stdlib=stdlib, wrap=True, extra=extra)
#
# def run_std(c, v):
# return run_top(c, v, stdlib=True)
#
# def run_flo(p, v=None, stdlib=False, extra=""):
# ov = run_mod_expr(p,stdlib=stdlib, extra=extra)
# assert isinstance(ov, values.W_Flonum)
# if v is not None:
# assert ov.value == v
# return ov.value
. Output only the next line. | run_fix("(* 0 1.2)", 0) |
Given snippet: <|code_start|> res = interpret(float_tostring, [math.pi])
assert res
def test_mul_zero():
run_fix("(* 0 1.2)", 0)
run_fix("(* 1.2 0)", 0)
def test_quotient():
run_fix("(quotient 0 1)", 0)
run_fix("(quotient 0 -1)", 0)
run_fix("(quotient 0 2)", 0)
run_fix("(quotient 0 -2)", 0)
run_fix("(quotient 0 3)", 0)
run_fix("(quotient 1 1)", 1)
run_fix("(quotient -1 1)", -1)
run_fix("(quotient 1 -1)", -1)
run_fix("(quotient -1 -1)", 1)
run_fix("(quotient 1 2)", 0)
run_fix("(quotient -1 2)", 0)
run_fix("(quotient 1 -2)", 0)
run_fix("(quotient -1 -2)", 0)
run_fix("(quotient -1234 -10)", 123)
run_fix("(quotient 1234 1234)", 1)
big = 2 ** 70
run_fix("(quotient %s %s)" % (big, big), 1)
run_fix("(quotient %s %s)" % (-big, big), -1)
run_fix("(quotient %s %s)" % (big, -big), -1)
run_fix("(quotient %s %s)" % (-big, -big), 1)
run_fix("(quotient %s %s)" % (big+1, big), 1)
run_fix("(quotient %s %s)" % (-(big+1), big), -1)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import math
import pytest
import math
import random
from pycket.interpreter import *
from pycket.values import *
from pycket.prims import *
from pycket.test.testhelper import run_fix, run, run_top, run_std, run_flo
from pycket.error import SchemeException
from rpython.rtyper.test.test_llinterp import interpret
from rpython.rlib.rbigint import rbigint
from pycket.arithmetic import gcd
from rpython.rlib.rbigint import rbigint
from pycket.arithmetic import gcd
from rpython.rlib.rbigint import ONERBIGINT
from pycket.arithmetic import count_trailing_zeros
and context:
# Path: pycket/test/testhelper.py
# def run_fix(p, v=None, stdlib=False, extra="", equal_huh=False):
# new = pytest.config.new_pycket
# if new:
# return run_expr(p, v, equal_huh=equal_huh)
# ov = run_mod_expr(p,stdlib=stdlib, extra=extra)
# assert isinstance(ov, values.W_Fixnum)
# if v is not None:
# assert ov.value == v
# return ov.value
#
# def run(p, v=None, stdlib=False, extra="", expect_to_fail=False):
# if pytest.config.new_pycket:
# return run_expr_result(p, expect_to_fail=expect_to_fail, v=v)
# return run_mod_expr(p,v=v,stdlib=stdlib, extra=extra)
#
# def run_top(p, v=None, stdlib=False, extra=""):
# return run_mod_expr(p,v=v,stdlib=stdlib, wrap=True, extra=extra)
#
# def run_std(c, v):
# return run_top(c, v, stdlib=True)
#
# def run_flo(p, v=None, stdlib=False, extra=""):
# ov = run_mod_expr(p,stdlib=stdlib, extra=extra)
# assert isinstance(ov, values.W_Flonum)
# if v is not None:
# assert ov.value == v
# return ov.value
which might include code, classes, or functions. Output only the next line. | res = run(str(big / 2)) |
Predict the next line after this snippet: <|code_start|> newgs.append((og, ng))
return newgs
# ================================================
#
# FIGURES
#
# ================================================
def make_figure(standard_id, scores, collection, args):
print_fig_info()
sns.set_style('white')
fig_file = os.path.join(args.output_dir, '{0}_{1}_{2}.pdf'.format(args.db, collection, standard_id))
x = list(scores['germ_divergence'].values)
y = list(scores['identity'].values)
xy_vals = zip(x, y)
trunc_xy_vals = [v for v in xy_vals if v[0] <= args.x_max and v[1] >= args.y_min]
x = [v[0] for v in trunc_xy_vals]
y = [v[1] for v in trunc_xy_vals]
# To make sure the gridsize is correct (since it's based on the actual values)
# I need to add a single value near the max and min of each axis.
# They're added just outside the visible plot, so there's no effect on the plot.
x.extend([args.x_min - 1, args.x_max + 1])
y.extend([args.y_min - 1, args.y_max + 1])
# plot params
<|code_end|>
using the current file's imports:
import multiprocessing as mp
import platform
import os
import subprocess as sp
import sys
import tempfile
import time
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
from threading import Thread
from pymongo import MongoClient
from Bio import SeqIO
from abtools import color, log, mongodb
from abtools.utils import progbar
from abtools.queue.tasks import identity
from abtools.queue.tasks import identity
and any relevant context from other files:
# Path: abtools/color.py
# def cmap_from_color(color, dark=False):
# def hex_to_rgb(hex_string):
# def rgb_to_hex(rgb_tuple):
# def hls(n_colors, hue=0.01, lightness=0.6, saturation=0.65):
# def husl(n_colors, hue=0.01, saturation=0.9, lightness=0.65):
# def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
# def stack_colormap(lower, upper, n=256):
# def get_cmap(cmap=None, name=None, from_color=None, dark=False, n=256):
# A = get_cmap(lower)
# B = get_cmap(upper)
# A = np.loadtxt(cmap, delimiter=None) # None: white space
# A = cmap # numpy array or array-like
#
# Path: abtools/log.py
# def setup_logging(logfile, print_log_location=True, debug=False):
# def get_logger(name=None):
# def add_stream_handler(logger):
# def make_dir(d):
#
# Path: abtools/mongodb.py
# def get_connection(ip='localhost', port=27017, user=None, password=None):
# def get_db(db, ip='localhost', port=27017, user=None, password=None):
# def get_collections(db, collection=None, prefix=None, suffix=None):
# def rename_collection(db, collection, new_name):
# def update(field, value, db, collection, match=None):
# def unset(db, collection, field, match=None):
# def mongoimport(json, database,
# ip='localhost', port=27017,
# user=None, password=None,
# delim='_', delim1=None, delim2=None,
# delim_occurance=1, delim1_occurance=1, delim2_occurance=1):
# def index(db, collection, fields, directions=None, desc=False, background=False):
# def remove_padding(db, collection, field='padding'):
# def _get_import_collections(jsons, delim, delim_occurance,
# delim1, delim1_occurance,
# delim2, delim2_occurance):
# def _print_mongoimport_info(logger):
# def _print_remove_padding():
#
# Path: abtools/utils/progbar.py
# def progress_bar(finished, total, start_time=None):
. Output only the next line. | cmap = color.get_cmap(args.colormap) |
Next line prediction: <|code_start|> help="Use nucleotide sequences for alignment. Default is amino acid sequences. \
Ensure standard format matches.")
parser.add_argument('-x', '--xmin', dest='x_min', type=int, default=-1,
help="Minimum X-axis (germline divergence) value for the AbCompare plot. Default is -1.")
parser.add_argument('-X', '--xmax', dest='x_max', type=int, default=35,
help="Maximum X-axis (germline divergence) value for the AbCompare plot. Default is 35.")
parser.add_argument('-y', '--ymin', dest='y_min', type=int, default=65,
help="Minimum Y-axis (mAb identity) value for the AbCompare plot. Default is 65.")
parser.add_argument('-Y', '--ymax', dest='y_max', type=int, default=101,
help="Maximum Y-axis (mAb identity) value for the AbCompare plot. Default is 101.")
parser.add_argument('-g', '--gridsize', dest='gridsize', type=int, default=0,
help="Gridsize for the AbFinder hexbin plot. \
Default is 36 for amino acid sequences and 50 for nucleotide sequences.")
parser.add_argument('--colormap', dest='colormap', default='Blues',
help="Colormap to be used in the AbFinder hexbin plots. \
Can accept a matplotlib cmap or the name of one of matplotlib's builtin cmaps. \
Default is 'Blues'.")
parser.add_argument('--mincount', dest='mincount', default=3, type=int,
help="Minimum number of sequences in a hexbin for that hexbin to be colored. \
Default is 3.")
parser.add_argument('--skip-padding', dest='remove_padding', default=True, action='store_false',
help="If set, will not remove padding field from MongoDB.")
parser.add_argument('-D', '--debug', dest="debug", action='store_true', default=False,
help="If set, will write all failed/exception sequences to file \
and should give more informative errors.")
return parser
class Args(object):
def __init__(self, db=None, collection=None,
<|code_end|>
. Use current file imports:
(import multiprocessing as mp
import platform
import os
import subprocess as sp
import sys
import tempfile
import time
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
from threading import Thread
from pymongo import MongoClient
from Bio import SeqIO
from abtools import color, log, mongodb
from abtools.utils import progbar
from abtools.queue.tasks import identity
from abtools.queue.tasks import identity)
and context including class names, function names, or small code snippets from other files:
# Path: abtools/color.py
# def cmap_from_color(color, dark=False):
# def hex_to_rgb(hex_string):
# def rgb_to_hex(rgb_tuple):
# def hls(n_colors, hue=0.01, lightness=0.6, saturation=0.65):
# def husl(n_colors, hue=0.01, saturation=0.9, lightness=0.65):
# def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
# def stack_colormap(lower, upper, n=256):
# def get_cmap(cmap=None, name=None, from_color=None, dark=False, n=256):
# A = get_cmap(lower)
# B = get_cmap(upper)
# A = np.loadtxt(cmap, delimiter=None) # None: white space
# A = cmap # numpy array or array-like
#
# Path: abtools/log.py
# def setup_logging(logfile, print_log_location=True, debug=False):
# def get_logger(name=None):
# def add_stream_handler(logger):
# def make_dir(d):
#
# Path: abtools/mongodb.py
# def get_connection(ip='localhost', port=27017, user=None, password=None):
# def get_db(db, ip='localhost', port=27017, user=None, password=None):
# def get_collections(db, collection=None, prefix=None, suffix=None):
# def rename_collection(db, collection, new_name):
# def update(field, value, db, collection, match=None):
# def unset(db, collection, field, match=None):
# def mongoimport(json, database,
# ip='localhost', port=27017,
# user=None, password=None,
# delim='_', delim1=None, delim2=None,
# delim_occurance=1, delim1_occurance=1, delim2_occurance=1):
# def index(db, collection, fields, directions=None, desc=False, background=False):
# def remove_padding(db, collection, field='padding'):
# def _get_import_collections(jsons, delim, delim_occurance,
# delim1, delim1_occurance,
# delim2, delim2_occurance):
# def _print_mongoimport_info(logger):
# def _print_remove_padding():
#
# Path: abtools/utils/progbar.py
# def progress_bar(finished, total, start_time=None):
. Output only the next line. | output=None, temp=None, log=None, cluster=False, |
Given snippet: <|code_start|> seq_counter = 0
if fastas:
files.append(write_to_temp_file(fastas, temp_dir))
return files
def write_to_temp_file(fastas, temp_dir):
tfile = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)
tfile.write('\n'.join(fastas))
tfile.close()
return tfile.name
def clean_up(files):
for f in files:
os.unlink(f)
# ================================================
#
# MONGO
#
# ================================================
def query(db, collection, args):
coll = db[collection]
chain = get_chain(args)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import multiprocessing as mp
import platform
import os
import subprocess as sp
import sys
import tempfile
import time
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
from threading import Thread
from pymongo import MongoClient
from Bio import SeqIO
from abtools import color, log, mongodb
from abtools.utils import progbar
from abtools.queue.tasks import identity
from abtools.queue.tasks import identity
and context:
# Path: abtools/color.py
# def cmap_from_color(color, dark=False):
# def hex_to_rgb(hex_string):
# def rgb_to_hex(rgb_tuple):
# def hls(n_colors, hue=0.01, lightness=0.6, saturation=0.65):
# def husl(n_colors, hue=0.01, saturation=0.9, lightness=0.65):
# def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
# def stack_colormap(lower, upper, n=256):
# def get_cmap(cmap=None, name=None, from_color=None, dark=False, n=256):
# A = get_cmap(lower)
# B = get_cmap(upper)
# A = np.loadtxt(cmap, delimiter=None) # None: white space
# A = cmap # numpy array or array-like
#
# Path: abtools/log.py
# def setup_logging(logfile, print_log_location=True, debug=False):
# def get_logger(name=None):
# def add_stream_handler(logger):
# def make_dir(d):
#
# Path: abtools/mongodb.py
# def get_connection(ip='localhost', port=27017, user=None, password=None):
# def get_db(db, ip='localhost', port=27017, user=None, password=None):
# def get_collections(db, collection=None, prefix=None, suffix=None):
# def rename_collection(db, collection, new_name):
# def update(field, value, db, collection, match=None):
# def unset(db, collection, field, match=None):
# def mongoimport(json, database,
# ip='localhost', port=27017,
# user=None, password=None,
# delim='_', delim1=None, delim2=None,
# delim_occurance=1, delim1_occurance=1, delim2_occurance=1):
# def index(db, collection, fields, directions=None, desc=False, background=False):
# def remove_padding(db, collection, field='padding'):
# def _get_import_collections(jsons, delim, delim_occurance,
# delim1, delim1_occurance,
# delim2, delim2_occurance):
# def _print_mongoimport_info(logger):
# def _print_remove_padding():
#
# Path: abtools/utils/progbar.py
# def progress_bar(finished, total, start_time=None):
which might include code, classes, or functions. Output only the next line. | mongodb.index(db, collection, ['chain']) |
Given the following code snippet before the placeholder: <|code_start|>
def chunker(l, n):
'Generator that produces n-length chunks from iterable l.'
for i in xrange(0, len(l), n):
yield l[i:i + n]
def update_db(db, standard, scores, collection, args):
db = mongodb.get_db(args.db, args.ip, args.port, args.user, args.password)
print_index_info()
mongodb.index(db, collection, ['seq_id'])
print_update_info()
start = time.time()
conn = mongodb.get_connection(args.ip, args.port,
args.user, args.password)
mongo_version = conn.server_info()['version']
standard = standard.replace('.', '_')
g = scores.groupby('identity')
groups = regroup(g.groups)
for g in range(0, len(groups), args.update_threads):
tlist = []
for group in groups[g:g + args.update_threads]:
t = Thread(target=update, args=(db, collection, group, standard, mongo_version, args))
t.start()
tlist.append(t)
for t in tlist:
t.join()
<|code_end|>
, predict the next line using imports from the current file:
import multiprocessing as mp
import platform
import os
import subprocess as sp
import sys
import tempfile
import time
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
from threading import Thread
from pymongo import MongoClient
from Bio import SeqIO
from abtools import color, log, mongodb
from abtools.utils import progbar
from abtools.queue.tasks import identity
from abtools.queue.tasks import identity
and context including class names, function names, and sometimes code from other files:
# Path: abtools/color.py
# def cmap_from_color(color, dark=False):
# def hex_to_rgb(hex_string):
# def rgb_to_hex(rgb_tuple):
# def hls(n_colors, hue=0.01, lightness=0.6, saturation=0.65):
# def husl(n_colors, hue=0.01, saturation=0.9, lightness=0.65):
# def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
# def stack_colormap(lower, upper, n=256):
# def get_cmap(cmap=None, name=None, from_color=None, dark=False, n=256):
# A = get_cmap(lower)
# B = get_cmap(upper)
# A = np.loadtxt(cmap, delimiter=None) # None: white space
# A = cmap # numpy array or array-like
#
# Path: abtools/log.py
# def setup_logging(logfile, print_log_location=True, debug=False):
# def get_logger(name=None):
# def add_stream_handler(logger):
# def make_dir(d):
#
# Path: abtools/mongodb.py
# def get_connection(ip='localhost', port=27017, user=None, password=None):
# def get_db(db, ip='localhost', port=27017, user=None, password=None):
# def get_collections(db, collection=None, prefix=None, suffix=None):
# def rename_collection(db, collection, new_name):
# def update(field, value, db, collection, match=None):
# def unset(db, collection, field, match=None):
# def mongoimport(json, database,
# ip='localhost', port=27017,
# user=None, password=None,
# delim='_', delim1=None, delim2=None,
# delim_occurance=1, delim1_occurance=1, delim2_occurance=1):
# def index(db, collection, fields, directions=None, desc=False, background=False):
# def remove_padding(db, collection, field='padding'):
# def _get_import_collections(jsons, delim, delim_occurance,
# delim1, delim1_occurance,
# delim2, delim2_occurance):
# def _print_mongoimport_info(logger):
# def _print_remove_padding():
#
# Path: abtools/utils/progbar.py
# def progress_bar(finished, total, start_time=None):
. Output only the next line. | progbar.progress_bar(g + args.update_threads, len(groups)) |
Using the snippet: <|code_start|> will be imported
ip (str): IP address of the MongoDB server. Default is ``localhost``.
port (int): Port of the MongoDB database. Default is ``27017``.
user (str): Username for the MongoDB database, if authentication is enabled.
Default is ``None``, which results in attempting connection without
authentication.
password (str): Password for the MongoDB database, if authentication is enabled.
Default is ``None``, which results in attempting connection without
authentication.
delim (str): Delimiter, when generating collection names using a single delimiter.
Default is ``_``
delim_occurance (int): Occurance at which to split filename when using a
single delimiter. Default is ``1``
delim1 (str): Left delimiter when splitting with two delimiters. Default is None.
delim1_occurance (int): Occurance of ``delim1`` at which to split filename.
Default is ``1``
delim2 (str): Right delimiter when splitting with two delimiters. Default is None.
delim2_occurance (int): Occurance of ``delim2`` at which to split filename.
Default is ``1``
'''
<|code_end|>
, determine the next line of code. You have imports:
import logging
import platform
import os
import subprocess as sp
import urllib
import urllib
import pymongo
from pymongo import MongoClient
from abtools import log
from abtools.utils.pipeline import list_files
and context (class names, function names, or code) available:
# Path: abtools/log.py
# def setup_logging(logfile, print_log_location=True, debug=False):
# def get_logger(name=None):
# def add_stream_handler(logger):
# def make_dir(d):
. Output only the next line. | logger = log.get_logger('mongodb') |
Next line prediction: <|code_start|> parser.add_argument('-J', '--join', dest='join_plot', default=None,
help="Plot the distribution of joining families or genes. \
Only option is 'gene'. \
If not provided, no plot will be made.")
parser.add_argument('-3', '--cdr3', dest='cdr3_plot', default=None,
help="Plot the distribution of CDR3 lengths, as nucleotide or amino acid. \
Options are 'nt', 'aa', or 'both'. \
If not provided, no plot will be made.")
parser.add_argument('-H', '--heatmap', dest='heatmap', default=False, action='store_true',
help="If set, generates a heatmap (or quiltplot, if you're a novice) of combined variable and joining gene use. \
If not provided, no plot will be made.")
parser.add_argument('-I', '--isotype', dest='isotype_plot', default=False, action='store_true',
help="Plot the isotype frequency. \
Requires that isotypes have been identified using AbStar, or the 'isotype' field is present in the given MongoDB collection. \
If not provided, no plot will be made.")
parser.add_argument('-C', '--chain', dest='chain', default='heavy', choices=['heavy', 'kappa', 'lambda'],
help="Select the antibody chain to analyze. \
Options are 'heavy', 'kappa', and 'lambda'. \
Default is 'heavy'.")
parser.add_argument('-s', '--species', dest='species', default='human', choices=['human', 'mouse', 'macaque'],
help="Species of the sequence data. Choices are 'human', 'mouse', and 'macaque'. \
Default is 'human'.")
parser.add_argument('--debug', dest='debug', action='store_true', default=False,
help="If set, will run in debug mode.")
return parser
class Args(object):
"""docstring for Args"""
<|code_end|>
. Use current file imports:
(import os
import sys
import uuid
import time
import shelve
import urllib
import sqlite3
import tempfile
import argparse
import subprocess as sp
import multiprocessing as mp
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from StringIO import StringIO
from collections import Counter
from pymongo import MongoClient
from abtools import log, mongodb
from abstar.core.germline import get_germlines)
and context including class names, function names, or small code snippets from other files:
# Path: abtools/log.py
# def setup_logging(logfile, print_log_location=True, debug=False):
# def get_logger(name=None):
# def add_stream_handler(logger):
# def make_dir(d):
#
# Path: abtools/mongodb.py
# def get_connection(ip='localhost', port=27017, user=None, password=None):
# def get_db(db, ip='localhost', port=27017, user=None, password=None):
# def get_collections(db, collection=None, prefix=None, suffix=None):
# def rename_collection(db, collection, new_name):
# def update(field, value, db, collection, match=None):
# def unset(db, collection, field, match=None):
# def mongoimport(json, database,
# ip='localhost', port=27017,
# user=None, password=None,
# delim='_', delim1=None, delim2=None,
# delim_occurance=1, delim1_occurance=1, delim2_occurance=1):
# def index(db, collection, fields, directions=None, desc=False, background=False):
# def remove_padding(db, collection, field='padding'):
# def _get_import_collections(jsons, delim, delim_occurance,
# delim1, delim1_occurance,
# delim2, delim2_occurance):
# def _print_mongoimport_info(logger):
# def _print_remove_padding():
. Output only the next line. | def __init__(self, output=None, temp=None, log=None, |
Based on the snippet: <|code_start|>
def print_collection_info(collection):
print('')
print('')
print('-' * 25)
print(collection)
print('-' * 25)
print('')
def run(**kwargs):
args = Args(**kwargs)
main(args)
def run_standalone(args):
logfile = args.log if args.log else os.path.join(args.output, 'abstats.log')
log.setup_logging(logfile)
global logger
logger = log.get_logger('abstats')
main(args)
def main(args):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
import uuid
import time
import shelve
import urllib
import sqlite3
import tempfile
import argparse
import subprocess as sp
import multiprocessing as mp
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from StringIO import StringIO
from collections import Counter
from pymongo import MongoClient
from abtools import log, mongodb
from abstar.core.germline import get_germlines
and context (classes, functions, sometimes code) from other files:
# Path: abtools/log.py
# def setup_logging(logfile, print_log_location=True, debug=False):
# def get_logger(name=None):
# def add_stream_handler(logger):
# def make_dir(d):
#
# Path: abtools/mongodb.py
# def get_connection(ip='localhost', port=27017, user=None, password=None):
# def get_db(db, ip='localhost', port=27017, user=None, password=None):
# def get_collections(db, collection=None, prefix=None, suffix=None):
# def rename_collection(db, collection, new_name):
# def update(field, value, db, collection, match=None):
# def unset(db, collection, field, match=None):
# def mongoimport(json, database,
# ip='localhost', port=27017,
# user=None, password=None,
# delim='_', delim1=None, delim2=None,
# delim_occurance=1, delim1_occurance=1, delim2_occurance=1):
# def index(db, collection, fields, directions=None, desc=False, background=False):
# def remove_padding(db, collection, field='padding'):
# def _get_import_collections(jsons, delim, delim_occurance,
# delim1, delim1_occurance,
# delim2, delim2_occurance):
# def _print_mongoimport_info(logger):
# def _print_remove_padding():
. Output only the next line. | db = mongodb.get_db(args.db, ip=args.ip, port=args.port, user=args.user, password=args.password) |
Predict the next line after this snippet: <|code_start|># License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
def monitor_mp_jobs(results):
finished = 0
jobs = len(results)
while finished < jobs:
time.sleep(1)
ready = [ar for ar in results if ar.ready()]
finished = len(ready)
<|code_end|>
using the current file's imports:
import time
from abtools.utils import progbar
and any relevant context from other files:
# Path: abtools/utils/progbar.py
# def progress_bar(finished, total, start_time=None):
. Output only the next line. | progbar.progress_bar(finished, jobs) |
Next line prediction: <|code_start|> help="Port for the MongoDB server. Defaults to 27017.")
parser.add_argument('-u', '--user', dest='user', default=None,
help="Username for the MongoDB server. Not used if not provided.")
parser.add_argument('-p', '--password', dest='password', default=None,
help="Password for the MongoDB server. Not used if not provided.")
parser.add_argument('-k', '--chunksize', dest='chunksize', type=int, default=100000,
help="Number of sequences to use for each similarity calculation. \
Default is 100,000.")
parser.add_argument('-I', '--iterations', dest='iterations', type=int, default=1000,
help="Number of iterations of the similarity calculation to perform. \
Default is 10000.")
parser.add_argument('-s', '--similarity-method', dest='method',
choices=['morisita-horn', 'kullback-leibler', 'jensen-shannon', 'jaccard', 'bray-curtis', 'renkonen', 'cosine'],
default='morisita-horn',
help="The similarity calculation to use. \
Options are 'morisita-horn', 'kullback-leibler' and 'jensen-shannon'. \
Note that kullback-leibler is actually a divergence measure (lower values indicate greater similarity) \
Default is 'morisita-horn'.")
parser.add_argument('-c', '--control', dest='control_similarity', default=False, action='store_true',
help="Plot the control similarity of the two collections")
parser.add_argument('-C', '--chain', dest='chain', default='heavy', choices=['heavy', 'kappa', 'lambda'],
help="Select the antibody chain to analyze. \
Options are 'heavy', 'kappa', and 'lambda'. \
Default is 'heavy'.")
parser.add_argument('--debug', dest='debug', action='store_true', default=False,
help="If set, will run in debug mode.")
return parser
class Args(object):
<|code_end|>
. Use current file imports:
(from collections import Counter
from StringIO import StringIO
from abtools import log, mongodb
import itertools
import math
import multiprocessing as mp
import os
import random
import sqlite3
import subprocess as sp
import sys
import tempfile
import time
import urllib
import uuid
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import argparse)
and context including class names, function names, or small code snippets from other files:
# Path: abtools/log.py
# def setup_logging(logfile, print_log_location=True, debug=False):
# def get_logger(name=None):
# def add_stream_handler(logger):
# def make_dir(d):
#
# Path: abtools/mongodb.py
# def get_connection(ip='localhost', port=27017, user=None, password=None):
# def get_db(db, ip='localhost', port=27017, user=None, password=None):
# def get_collections(db, collection=None, prefix=None, suffix=None):
# def rename_collection(db, collection, new_name):
# def update(field, value, db, collection, match=None):
# def unset(db, collection, field, match=None):
# def mongoimport(json, database,
# ip='localhost', port=27017,
# user=None, password=None,
# delim='_', delim1=None, delim2=None,
# delim_occurance=1, delim1_occurance=1, delim2_occurance=1):
# def index(db, collection, fields, directions=None, desc=False, background=False):
# def remove_padding(db, collection, field='padding'):
# def _get_import_collections(jsons, delim, delim_occurance,
# delim1, delim1_occurance,
# delim2, delim2_occurance):
# def _print_mongoimport_info(logger):
# def _print_remove_padding():
. Output only the next line. | def __init__(self, output=None, log=None, db=None, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.