Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
_VERSION = '2.4'
_OPTIONS = ''
def install(version=_VERSION, options=_OPTIONS):
package.ensure(['git-core', 'build-essential'])
tmpdir = dir.temp()
try:
with ctx.cd(tmpdir):
run('git clone git://github.com/antirez/redis.git ./ --depth 1')
run('git checkout %s' % version)
run('make %s > /dev/null' % options)
sudo('make install')
finally:
dir.remove(tmpdir, recursive=True)
def ensure(version=_VERSION, options=_OPTIONS):
# TODO Check if version if fulfilled
if command.exists('redis-server'):
return
<|code_end|>
with the help of current file imports:
from revolver import command, package
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver.core import sudo, run
and context from other files:
# Path: revolver/command.py
#
# Path: revolver/package.py
# def is_installed(name):
# def install_ppa(name):
#
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/directory.py
# def temp_local():
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def create(path, recursive=False, mode=None, owner=None, group=None):
#
# Path: revolver/core.py
# def sudo(*args, **kwargs):
# if env.sudo_user:
# kwargs['user'] = env.sudo_user
#
# return _sudo(*args, **kwargs)
#
# def run(*args, **kwargs):
# if not env.sudo_forced:
# return _run(*args, **kwargs)
#
# return sudo(*args, **kwargs)
, which may contain function names, class names, or code. Output only the next line. | install(version, options) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
def install():
package.ensure(["curl", "git-core"])
<|code_end|>
, generate the next line using the imports in this file:
from revolver.core import sudo
from revolver import command, package
and context (functions, classes, or occasionally code) from other files:
# Path: revolver/core.py
# def sudo(*args, **kwargs):
# if env.sudo_user:
# kwargs['user'] = env.sudo_user
#
# return _sudo(*args, **kwargs)
#
# Path: revolver/command.py
#
# Path: revolver/package.py
# def is_installed(name):
# def install_ppa(name):
. Output only the next line. | url = "https://raw.github.com/michaelcontento/git-chiefs/master/install" |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
def install():
package.install(["memcached", "libmemcached-dev"])
<|code_end|>
with the help of current file imports:
from revolver import command, package
and context from other files:
# Path: revolver/command.py
#
# Path: revolver/package.py
# def is_installed(name):
# def install_ppa(name):
, which may contain function names, class names, or code. Output only the next line. | def ensure(): |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
def install():
package.ensure(["git-core", "openjdk-7-jre"])
if not dir.exists(".awsenv"):
run("git clone git://github.com/michaelcontento/awsenv.git .awsenv")
<|code_end|>
using the current file's imports:
from revolver.core import run
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver import package, file
and any relevant context from other files:
# Path: revolver/core.py
# def run(*args, **kwargs):
# if not env.sudo_forced:
# return _run(*args, **kwargs)
#
# return sudo(*args, **kwargs)
#
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/directory.py
# def temp_local():
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def create(path, recursive=False, mode=None, owner=None, group=None):
#
# Path: revolver/package.py
# def is_installed(name):
# def install_ppa(name):
#
# Path: revolver/file.py
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def touch(location, mode=None, owner=None, group=None):
# def copy(source, destination, force=True, mode=None, owner=None, group=None):
. Output only the next line. | return |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
def install():
package.ensure(["git-core", "openjdk-7-jre"])
if not dir.exists(".awsenv"):
run("git clone git://github.com/michaelcontento/awsenv.git .awsenv")
return
with ctx.cd(".awsenv"):
run("git pull")
_ensure_autoload(".bashrc")
_ensure_autoload(".zshrc")
<|code_end|>
. Use current file imports:
from revolver.core import run
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver import package, file
and context (classes, functions, or code) from other files:
# Path: revolver/core.py
# def run(*args, **kwargs):
# if not env.sudo_forced:
# return _run(*args, **kwargs)
#
# return sudo(*args, **kwargs)
#
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/directory.py
# def temp_local():
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def create(path, recursive=False, mode=None, owner=None, group=None):
#
# Path: revolver/package.py
# def is_installed(name):
# def install_ppa(name):
#
# Path: revolver/file.py
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def touch(location, mode=None, owner=None, group=None):
# def copy(source, destination, force=True, mode=None, owner=None, group=None):
. Output only the next line. | def ensure(): |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
def install():
package.ensure(["git-core", "openjdk-7-jre"])
if not dir.exists(".awsenv"):
run("git clone git://github.com/michaelcontento/awsenv.git .awsenv")
<|code_end|>
using the current file's imports:
from revolver.core import run
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver import package, file
and any relevant context from other files:
# Path: revolver/core.py
# def run(*args, **kwargs):
# if not env.sudo_forced:
# return _run(*args, **kwargs)
#
# return sudo(*args, **kwargs)
#
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/directory.py
# def temp_local():
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def create(path, recursive=False, mode=None, owner=None, group=None):
#
# Path: revolver/package.py
# def is_installed(name):
# def install_ppa(name):
#
# Path: revolver/file.py
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def touch(location, mode=None, owner=None, group=None):
# def copy(source, destination, force=True, mode=None, owner=None, group=None):
. Output only the next line. | return |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
ensure = decorator.multiargs(decorator.sudo(ensure))
install = decorator.multiargs(decorator.sudo(install))
update = decorator.multiargs(decorator.sudo(update))
upgrade = decorator.sudo(upgrade)
def is_installed(name):
with ctx.settings(warn_only=True):
res = core.run("dpkg -s %s" % name)
for line in res.splitlines():
<|code_end|>
. Use current file imports:
from cuisine import package_ensure as ensure
from cuisine import package_install as install
from cuisine import package_update as update
from cuisine import package_upgrade as upgrade
from revolver import contextmanager as ctx
from revolver import file, server, core, decorator
and context (classes, functions, or code) from other files:
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/file.py
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def touch(location, mode=None, owner=None, group=None):
# def copy(source, destination, force=True, mode=None, owner=None, group=None):
#
# Path: revolver/server.py
# def timezone(zone='UTC'):
# def version():
# def codename():
#
# Path: revolver/core.py
# VERSION = '0.0.5'
# def put(*args, **kwargs):
# def run(*args, **kwargs):
# def sudo(*args, **kwargs):
#
# Path: revolver/decorator.py
# def sudo(func):
# def wrapper(*args, **kwargs):
# def multiargs(func):
# def wrapper(*args, **kwargs):
# def inject_use_sudo(func):
# def inject_wrapper(*args, **kwargs):
. Output only the next line. | if line.startswith("Status: "): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
ensure = decorator.multiargs(decorator.sudo(ensure))
install = decorator.multiargs(decorator.sudo(install))
update = decorator.multiargs(decorator.sudo(update))
upgrade = decorator.sudo(upgrade)
def is_installed(name):
with ctx.settings(warn_only=True):
res = core.run("dpkg -s %s" % name)
for line in res.splitlines():
<|code_end|>
, determine the next line of code. You have imports:
from cuisine import package_ensure as ensure
from cuisine import package_install as install
from cuisine import package_update as update
from cuisine import package_upgrade as upgrade
from revolver import contextmanager as ctx
from revolver import file, server, core, decorator
and context (class names, function names, or code) available:
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/file.py
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def touch(location, mode=None, owner=None, group=None):
# def copy(source, destination, force=True, mode=None, owner=None, group=None):
#
# Path: revolver/server.py
# def timezone(zone='UTC'):
# def version():
# def codename():
#
# Path: revolver/core.py
# VERSION = '0.0.5'
# def put(*args, **kwargs):
# def run(*args, **kwargs):
# def sudo(*args, **kwargs):
#
# Path: revolver/decorator.py
# def sudo(func):
# def wrapper(*args, **kwargs):
# def multiargs(func):
# def wrapper(*args, **kwargs):
# def inject_use_sudo(func):
# def inject_wrapper(*args, **kwargs):
. Output only the next line. | if line.startswith("Status: "): |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
ensure = decorator.multiargs(decorator.sudo(ensure))
install = decorator.multiargs(decorator.sudo(install))
update = decorator.multiargs(decorator.sudo(update))
upgrade = decorator.sudo(upgrade)
def is_installed(name):
with ctx.settings(warn_only=True):
res = core.run("dpkg -s %s" % name)
for line in res.splitlines():
if line.startswith("Status: "):
status = line[8:]
if "installed" in status.split(" "):
return True
return False
def install_ppa(name):
ensure("python-software-properties")
with ctx.cd("/etc/apt/sources.list.d"):
<|code_end|>
, predict the next line using imports from the current file:
from cuisine import package_ensure as ensure
from cuisine import package_install as install
from cuisine import package_update as update
from cuisine import package_upgrade as upgrade
from revolver import contextmanager as ctx
from revolver import file, server, core, decorator
and context including class names, function names, and sometimes code from other files:
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/file.py
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def touch(location, mode=None, owner=None, group=None):
# def copy(source, destination, force=True, mode=None, owner=None, group=None):
#
# Path: revolver/server.py
# def timezone(zone='UTC'):
# def version():
# def codename():
#
# Path: revolver/core.py
# VERSION = '0.0.5'
# def put(*args, **kwargs):
# def run(*args, **kwargs):
# def sudo(*args, **kwargs):
#
# Path: revolver/decorator.py
# def sudo(func):
# def wrapper(*args, **kwargs):
# def multiargs(func):
# def wrapper(*args, **kwargs):
# def inject_use_sudo(func):
# def inject_wrapper(*args, **kwargs):
. Output only the next line. | name_normalizes = name.replace("/", "-") |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
ensure = decorator.multiargs(decorator.sudo(ensure))
install = decorator.multiargs(decorator.sudo(install))
update = decorator.multiargs(decorator.sudo(update))
upgrade = decorator.sudo(upgrade)
def is_installed(name):
with ctx.settings(warn_only=True):
res = core.run("dpkg -s %s" % name)
for line in res.splitlines():
if line.startswith("Status: "):
status = line[8:]
if "installed" in status.split(" "):
return True
return False
def install_ppa(name):
ensure("python-software-properties")
with ctx.cd("/etc/apt/sources.list.d"):
<|code_end|>
, predict the immediate next line with the help of imports:
from cuisine import package_ensure as ensure
from cuisine import package_install as install
from cuisine import package_update as update
from cuisine import package_upgrade as upgrade
from revolver import contextmanager as ctx
from revolver import file, server, core, decorator
and context (classes, functions, sometimes code) from other files:
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/file.py
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def touch(location, mode=None, owner=None, group=None):
# def copy(source, destination, force=True, mode=None, owner=None, group=None):
#
# Path: revolver/server.py
# def timezone(zone='UTC'):
# def version():
# def codename():
#
# Path: revolver/core.py
# VERSION = '0.0.5'
# def put(*args, **kwargs):
# def run(*args, **kwargs):
# def sudo(*args, **kwargs):
#
# Path: revolver/decorator.py
# def sudo(func):
# def wrapper(*args, **kwargs):
# def multiargs(func):
# def wrapper(*args, **kwargs):
# def inject_use_sudo(func):
# def inject_wrapper(*args, **kwargs):
. Output only the next line. | name_normalizes = name.replace("/", "-") |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
def install():
package.ensure("git-core")
package.ensure([
"build-essential", "zlib1g-dev", "libssl-dev",
"libxml2-dev", "libsqlite3-dev"
])
ruby_rbenv.ensure()
dir.ensure(".rbenv/plugins")
with ctx.cd(".rbenv/plugins"):
if not dir.exists("ruby-build"):
run("git clone git://github.com/sstephenson/ruby-build.git")
return
with ctx.cd("ruby-build"):
run("git pull")
<|code_end|>
, determine the next line of code. You have imports:
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver import package
from revolver.core import run
from revolver.tool import ruby_rbenv
and context (class names, function names, or code) available:
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/directory.py
# def temp_local():
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def create(path, recursive=False, mode=None, owner=None, group=None):
#
# Path: revolver/package.py
# def is_installed(name):
# def install_ppa(name):
#
# Path: revolver/core.py
# def run(*args, **kwargs):
# if not env.sudo_forced:
# return _run(*args, **kwargs)
#
# return sudo(*args, **kwargs)
#
# Path: revolver/tool/ruby_rbenv.py
# def install():
# def ensure():
# def _ensure_autoload(filename):
. Output only the next line. | def ensure(): |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
def install():
package.ensure("git-core")
package.ensure([
"build-essential", "zlib1g-dev", "libssl-dev",
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver import package
from revolver.core import run
from revolver.tool import ruby_rbenv
and context:
# Path: revolver/contextmanager.py
# def sudo(username=None, login=False):
# def unpatched_state():
#
# Path: revolver/directory.py
# def temp_local():
# def temp(mode=None, owner=None, group=None):
# def remove(location, recursive=False, force=True):
# def create(path, recursive=False, mode=None, owner=None, group=None):
#
# Path: revolver/package.py
# def is_installed(name):
# def install_ppa(name):
#
# Path: revolver/core.py
# def run(*args, **kwargs):
# if not env.sudo_forced:
# return _run(*args, **kwargs)
#
# return sudo(*args, **kwargs)
#
# Path: revolver/tool/ruby_rbenv.py
# def install():
# def ensure():
# def _ensure_autoload(filename):
which might include code, classes, or functions. Output only the next line. | "libxml2-dev", "libsqlite3-dev" |
Predict the next line after this snippet: <|code_start|>
@patch("revolver.directory.exists", "revolver.user.exists")
def test_inside(dir_exists, user_exists):
dir_exists.expects_call().returns(True)
user_exists.expects_call().returns(True)
assert vagrant.inside()
@patch("revolver.directory.exists")
def test_not_inside_if_dir_is_missing(dir_exists):
dir_exists.expects_call().returns(False)
assert not vagrant.inside()
@patch("revolver.directory.exists", "revolver.user.exists")
def test_not_inside_if_dir_is_missing(dir_exists, user_exists):
dir_exists.expects_call().returns(True)
user_exists.expects_call().returns(False)
assert not vagrant.inside()
@patch("revolver.core.local")
def test_is_running_arguments(local):
command = "vagrant status | egrep -o 'running$'"
local.expects_call().with_args(command, capture=True)
vagrant.is_running()
@patch("revolver.core.local")
<|code_end|>
using the current file's imports:
from fudge import patch
from revolver import vagrant, core
and any relevant context from other files:
# Path: revolver/vagrant.py
# def inside():
# def is_running():
# def select():
#
# Path: revolver/core.py
# VERSION = '0.0.5'
# def put(*args, **kwargs):
# def run(*args, **kwargs):
# def sudo(*args, **kwargs):
. Output only the next line. | def test_is_not_running_if_result_is_none(local): |
Continue the code snippet: <|code_start|>
@patch("revolver.core.local")
def test_is_not_running_if_result_is_none(local):
local.expects_call().returns(None)
assert not vagrant.is_running()
@patch("revolver.core.local")
def test_is_not_running_if_result_is_blank(local):
local.expects_call().returns("")
assert not vagrant.is_running()
@patch("revolver.core.local")
def test_is_running(local):
local.expects_call().returns("some non empty string")
assert vagrant.is_running()
@patch("revolver.vagrant.is_running", "revolver.log.abort")
def test_select_aborts_if_not_running(is_running, abort):
is_running.expects_call().returns(False)
abort.expects_call().with_args("Vagrant based VM currently NOT running")
vagrant.select()
@patch("revolver.vagrant.is_running")
@patch("revolver.directory.temp_local")
@patch("revolver.core.local")
def test_select_changes_the_environment(is_running, temp_local, local):
<|code_end|>
. Use current file imports:
from fudge import patch
from revolver import vagrant, core
and context (classes, functions, or code) from other files:
# Path: revolver/vagrant.py
# def inside():
# def is_running():
# def select():
#
# Path: revolver/core.py
# VERSION = '0.0.5'
# def put(*args, **kwargs):
# def run(*args, **kwargs):
# def sudo(*args, **kwargs):
. Output only the next line. | is_running.expects_call().returns(True) |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
def install(version, _update=True):
nodejs_nvm.ensure()
<|code_end|>
. Write the next line using the current file imports:
from revolver.core import run
from revolver.tool import nodejs_nvm
and context from other files:
# Path: revolver/core.py
# def run(*args, **kwargs):
# if not env.sudo_forced:
# return _run(*args, **kwargs)
#
# return sudo(*args, **kwargs)
#
# Path: revolver/tool/nodejs_nvm.py
# def install():
# def ensure():
# def _ensure_autoload(filename):
, which may include functions, classes, or code. Output only the next line. | if not version.startswith("v"): |
Predict the next line for this snippet: <|code_start|>#encoding: utf-8
try:
except ImportError:
pygments = None
class CommandLineInterface(object):
def __init__(self, arguments):
remainder = list(arguments)
kwargs = list((i, remainder.pop(i), v.partition('=')) for i, v in reversed(list(enumerate(arguments))) if v.startswith('--'))
self.kwargs = {k[2:]: v for i, (k, _, v) in kwargs}
self.flags = set(remainder.pop(i)[1:] for i, v in reversed(list(enumerate(remainder))) if v.startswith('-'))
self.args = remainder
def go(self, action, *args, **data):
action = getattr(self, action)
result = action(*args, **data)
if isinstance(result, int):
sys.exit(result)
if result:
print(result)
def render(self, reference, *args, **kw):
<|code_end|>
with the help of current file imports:
import sys
import pygments
import pygments.lexers
import pygments.formatters
from cinje import flatten
and context from other files:
# Path: cinje/util.py
# def flatten(input, file=None, encoding=None, errors='strict'):
# """Return a flattened representation of a cinje chunk stream.
#
# This has several modes of operation. If no `file` argument is given, output will be returned as a string.
# The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
# binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
# iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
# returned. The type of string written will be determined by `encoding`, just as the return value is when not
# writing to a file-like object. The `errors` argument is passed through when encoding.
#
# We can highly recommend using the various stremaing IO containers available in the
# [`io`](https://docs.python.org/3/library/io.html) module, though
# [`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
# """
#
# input = stream(input, encoding, errors)
#
# if file is None: # Exit early if we're not writing to a file.
# return b''.join(input) if encoding else ''.join(input)
#
# counter = 0
#
# for chunk in input:
# file.write(chunk)
# counter += len(chunk)
#
# return counter
, which may contain function names, class names, or code. Output only the next line. | output = kw.pop('out', None) |
Next line prediction: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
def transform(input):
#__import__('pudb').set_trace()
translator = Context(input)
return '\n'.join(str(i) for i in translator.stream)
def cinje_decode(input, errors='strict', final=True):
if not final: return '', 0
output = transform(bytes(input).decode('utf8', errors))
return output, len(input)
<|code_end|>
. Use current file imports:
(import codecs
from encodings import utf_8 as utf8
from .util import StringIO, bytes, str, Context)
and context including class names, function names, or small code snippets from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
. Output only the next line. | class CinjeIncrementalDecoder(utf8.IncrementalDecoder): |
Given the code snippet: <|code_start|> #__import__('pudb').set_trace()
translator = Context(input)
return '\n'.join(str(i) for i in translator.stream)
def cinje_decode(input, errors='strict', final=True):
if not final: return '', 0
output = transform(bytes(input).decode('utf8', errors))
return output, len(input)
class CinjeIncrementalDecoder(utf8.IncrementalDecoder):
def _buffer_decode(self, input, errors='strict', final=False):
if not final or len(input) == 0:
return '', 0
output = transform(bytes(input).decode('utf8', errors))
return output, len(input)
class CinjeStreamReader(utf8.StreamReader):
def __init__(self, *args, **kw):
codecs.StreamReader.__init__(self, *args, **kw)
self.stream = StringIO(transform(self.stream))
def cinje_search_function(name):
# I have absolutely no idea how to reliably test this scenario, other than artificially.
if name != 'cinje': # pragma: no cover
<|code_end|>
, generate the next line using the imports in this file:
import codecs
from encodings import utf_8 as utf8
from .util import StringIO, bytes, str, Context
and context (functions, classes, or occasionally code) from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
. Output only the next line. | return None |
Continue the code snippet: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
def transform(input):
#__import__('pudb').set_trace()
translator = Context(input)
return '\n'.join(str(i) for i in translator.stream)
def cinje_decode(input, errors='strict', final=True):
if not final: return '', 0
output = transform(bytes(input).decode('utf8', errors))
return output, len(input)
<|code_end|>
. Use current file imports:
import codecs
from encodings import utf_8 as utf8
from .util import StringIO, bytes, str, Context
and context (classes, functions, or code) from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
. Output only the next line. | class CinjeIncrementalDecoder(utf8.IncrementalDecoder): |
Predict the next line after this snippet: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
@pytest.fixture
def tmpl():
return fragment("Epic Template™".encode('utf-8'), name="tmpl")
class TestIssueTwentyFive(object):
<|code_end|>
using the current file's imports:
import pytest
from cinje import fragment
from cinje.util import flatten
and any relevant context from other files:
# Path: cinje/util.py
# def fragment(string, name="anonymous", **context):
# """Translate a template fragment into a callable function.
#
# **Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
#
# Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
# resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
# """
#
# if isinstance(string, bytes):
# string = string.decode('utf-8')
#
# if ": def" in string or ":def" in string:
# code = string.encode('utf8').decode('cinje')
# name = None
# else:
# code = ": def {name}\n\n{string}".format(
# name = name,
# string = string,
# ).encode('utf8').decode('cinje')
#
# environ = dict(context)
#
# exec(code, environ)
#
# if name is None: # We need to dig it out of the `__tmpl__` list.
# if __debug__ and not environ.get('__tmpl__', None):
# raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
# "\n\n" + code)
#
# return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
#
# return environ[name]
#
# Path: cinje/util.py
# def flatten(input, file=None, encoding=None, errors='strict'):
# """Return a flattened representation of a cinje chunk stream.
#
# This has several modes of operation. If no `file` argument is given, output will be returned as a string.
# The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
# binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
# iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
# returned. The type of string written will be determined by `encoding`, just as the return value is when not
# writing to a file-like object. The `errors` argument is passed through when encoding.
#
# We can highly recommend using the various stremaing IO containers available in the
# [`io`](https://docs.python.org/3/library/io.html) module, though
# [`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
# """
#
# input = stream(input, encoding, errors)
#
# if file is None: # Exit early if we're not writing to a file.
# return b''.join(input) if encoding else ''.join(input)
#
# counter = 0
#
# for chunk in input:
# file.write(chunk)
# counter += len(chunk)
#
# return counter
. Output only the next line. | def test_trademark(self, tmpl): |
Predict the next line for this snippet: <|code_start|># encoding: utf-8
@pytest.fixture
def std():
return std
class TestStandardHTMLFive(object):
def test_div(self, std):
assert flatten(std.div(foo="bar", data_baz="42")) == '<div data-baz="42" foo="bar">\n</div>\n'
def test_span(self, std):
assert flatten(std.span("Hello!")) == '<span>Hello!</span>'
def test_span_iterator(self, std):
assert flatten(std.span(iter(["Yay!"]))) == '<span>Yay!</span>'
<|code_end|>
with the help of current file imports:
import pytest
from cinje.util import flatten
from cinje.std import html as std
and context from other files:
# Path: cinje/util.py
# def flatten(input, file=None, encoding=None, errors='strict'):
# """Return a flattened representation of a cinje chunk stream.
#
# This has several modes of operation. If no `file` argument is given, output will be returned as a string.
# The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
# binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
# iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
# returned. The type of string written will be determined by `encoding`, just as the return value is when not
# writing to a file-like object. The `errors` argument is passed through when encoding.
#
# We can highly recommend using the various stremaing IO containers available in the
# [`io`](https://docs.python.org/3/library/io.html) module, though
# [`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
# """
#
# input = stream(input, encoding, errors)
#
# if file is None: # Exit early if we're not writing to a file.
# return b''.join(input) if encoding else ''.join(input)
#
# counter = 0
#
# for chunk in input:
# file.write(chunk)
# counter += len(chunk)
#
# return counter
, which may contain function names, class names, or code. Output only the next line. | def test_span_protected(self, std): |
Predict the next line after this snippet: <|code_start|># encoding: utf-8
class TestBasicFormatters(object):
def test_formatted_strings(self):
assert flatten(fragment('%{"{foo}" foo=27}')()) == '27\n'
assert flatten(fragment('%{"{0}" 27}')()) == '27\n'
assert flatten(fragment('%{format 27}', format="{0}")()) == '27\n'
assert flatten(fragment('%{format() 27}', format=lambda: "{0}")()) == '27\n'
assert flatten(fragment('%{format["first"] 27}', format=dict(first="{0}"))()) == '27\n'
assert flatten(fragment('%{format[0] 27}', format=["{0}"])()) == '27\n'
def test_basic_text(self):
assert flatten(fragment('text')()) == "text\n"
<|code_end|>
using the current file's imports:
from cinje.util import fragment, flatten
and any relevant context from other files:
# Path: cinje/util.py
# def fragment(string, name="anonymous", **context):
# """Translate a template fragment into a callable function.
#
# **Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
#
# Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
# resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
# """
#
# if isinstance(string, bytes):
# string = string.decode('utf-8')
#
# if ": def" in string or ":def" in string:
# code = string.encode('utf8').decode('cinje')
# name = None
# else:
# code = ": def {name}\n\n{string}".format(
# name = name,
# string = string,
# ).encode('utf8').decode('cinje')
#
# environ = dict(context)
#
# exec(code, environ)
#
# if name is None: # We need to dig it out of the `__tmpl__` list.
# if __debug__ and not environ.get('__tmpl__', None):
# raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
# "\n\n" + code)
#
# return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
#
# return environ[name]
#
# def flatten(input, file=None, encoding=None, errors='strict'):
# """Return a flattened representation of a cinje chunk stream.
#
# This has several modes of operation. If no `file` argument is given, output will be returned as a string.
# The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
# binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
# iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
# returned. The type of string written will be determined by `encoding`, just as the return value is when not
# writing to a file-like object. The `errors` argument is passed through when encoding.
#
# We can highly recommend using the various stremaing IO containers available in the
# [`io`](https://docs.python.org/3/library/io.html) module, though
# [`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
# """
#
# input = stream(input, encoding, errors)
#
# if file is None: # Exit early if we're not writing to a file.
# return b''.join(input) if encoding else ''.join(input)
#
# counter = 0
#
# for chunk in input:
# file.write(chunk)
# counter += len(chunk)
#
# return counter
. Output only the next line. | def test_escaped_text(self): |
Given the code snippet: <|code_start|># encoding: utf-8
class TestBasicFormatters(object):
def test_formatted_strings(self):
assert flatten(fragment('%{"{foo}" foo=27}')()) == '27\n'
assert flatten(fragment('%{"{0}" 27}')()) == '27\n'
assert flatten(fragment('%{format 27}', format="{0}")()) == '27\n'
assert flatten(fragment('%{format() 27}', format=lambda: "{0}")()) == '27\n'
assert flatten(fragment('%{format["first"] 27}', format=dict(first="{0}"))()) == '27\n'
assert flatten(fragment('%{format[0] 27}', format=["{0}"])()) == '27\n'
def test_basic_text(self):
assert flatten(fragment('text')()) == "text\n"
def test_escaped_text(self):
assert flatten(fragment('${"27"}')()) == "27\n"
assert flatten(fragment('${"<html>"}')()) == "<html>\n"
def test_blessed_text(self):
assert flatten(fragment('#{"42"}')()) == "42\n"
assert flatten(fragment('#{"<html>"}')()) == "<html>\n"
<|code_end|>
, generate the next line using the imports in this file:
from cinje.util import fragment, flatten
and context (functions, classes, or occasionally code) from other files:
# Path: cinje/util.py
# def fragment(string, name="anonymous", **context):
# """Translate a template fragment into a callable function.
#
# **Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
#
# Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
# resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
# """
#
# if isinstance(string, bytes):
# string = string.decode('utf-8')
#
# if ": def" in string or ":def" in string:
# code = string.encode('utf8').decode('cinje')
# name = None
# else:
# code = ": def {name}\n\n{string}".format(
# name = name,
# string = string,
# ).encode('utf8').decode('cinje')
#
# environ = dict(context)
#
# exec(code, environ)
#
# if name is None: # We need to dig it out of the `__tmpl__` list.
# if __debug__ and not environ.get('__tmpl__', None):
# raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
# "\n\n" + code)
#
# return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
#
# return environ[name]
#
# def flatten(input, file=None, encoding=None, errors='strict'):
# """Return a flattened representation of a cinje chunk stream.
#
# This has several modes of operation. If no `file` argument is given, output will be returned as a string.
# The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
# binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
# iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
# returned. The type of string written will be determined by `encoding`, just as the return value is when not
# writing to a file-like object. The `errors` argument is passed through when encoding.
#
# We can highly recommend using the various stremaing IO containers available in the
# [`io`](https://docs.python.org/3/library/io.html) module, though
# [`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
# """
#
# input = stream(input, encoding, errors)
#
# if file is None: # Exit early if we're not writing to a file.
# return b''.join(input) if encoding else ''.join(input)
#
# counter = 0
#
# for chunk in input:
# file.write(chunk)
# counter += len(chunk)
#
# return counter
. Output only the next line. | def test_json_object(self): |
Continue the code snippet: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
class TestPipeBehaviour(object):
def test_basic_usage(self):
@Pipe
def text(value):
return str(value)
assert (27 | text) == "27"
def test_repr_is_reasonable(self):
@Pipe
def text(value):
return str(value)
assert repr(text).startswith('Pipe(<function')
assert 'text' in repr(text)
<|code_end|>
. Use current file imports:
from cinje.util import str, Pipe
and context (classes, functions, or code) from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
. Output only the next line. | def test_argument_specialization(self): |
Continue the code snippet: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
class TestPipeBehaviour(object):
def test_basic_usage(self):
@Pipe
def text(value):
return str(value)
assert (27 | text) == "27"
def test_repr_is_reasonable(self):
@Pipe
def text(value):
return str(value)
assert repr(text).startswith('Pipe(<function')
assert 'text' in repr(text)
def test_argument_specialization(self):
@Pipe
def encode(value, encoding='utf8'):
return str(value).encode(encoding)
utf8 = encode(encoding='utf8')
latin1 = encode(encoding='latin1')
<|code_end|>
. Use current file imports:
from cinje.util import str, Pipe
and context (classes, functions, or code) from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
. Output only the next line. | assert ("Zoë" | utf8) == b'Zo\xc3\xab' |
Next line prediction: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
tmpl = fragment("Zoë")
class TestCinjeIO(object):
def test_flatten_string_unicode(self):
assert flatten(tmpl()) == 'Zoë\n'
def test_flatten_string_binary(self):
assert flatten(tmpl(), encoding='utf8') == 'Zoë\n'.encode('utf8')
def test_flatten_filelike_unicode(self):
container = StringIO()
assert flatten(tmpl(), container) == 4
assert container.getvalue() == 'Zoë\n'
<|code_end|>
. Use current file imports:
(from io import StringIO, BytesIO
from cinje import flatten, fragment)
and context including class names, function names, or small code snippets from other files:
# Path: cinje/util.py
# def flatten(input, file=None, encoding=None, errors='strict'):
# """Return a flattened representation of a cinje chunk stream.
#
# This has several modes of operation. If no `file` argument is given, output will be returned as a string.
# The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
# binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
# iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
# returned. The type of string written will be determined by `encoding`, just as the return value is when not
# writing to a file-like object. The `errors` argument is passed through when encoding.
#
# We can highly recommend using the various stremaing IO containers available in the
# [`io`](https://docs.python.org/3/library/io.html) module, though
# [`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
# """
#
# input = stream(input, encoding, errors)
#
# if file is None: # Exit early if we're not writing to a file.
# return b''.join(input) if encoding else ''.join(input)
#
# counter = 0
#
# for chunk in input:
# file.write(chunk)
# counter += len(chunk)
#
# return counter
#
# def fragment(string, name="anonymous", **context):
# """Translate a template fragment into a callable function.
#
# **Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
#
# Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
# resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
# """
#
# if isinstance(string, bytes):
# string = string.decode('utf-8')
#
# if ": def" in string or ":def" in string:
# code = string.encode('utf8').decode('cinje')
# name = None
# else:
# code = ": def {name}\n\n{string}".format(
# name = name,
# string = string,
# ).encode('utf8').decode('cinje')
#
# environ = dict(context)
#
# exec(code, environ)
#
# if name is None: # We need to dig it out of the `__tmpl__` list.
# if __debug__ and not environ.get('__tmpl__', None):
# raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
# "\n\n" + code)
#
# return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
#
# return environ[name]
. Output only the next line. | def test_flatten_filelike_binary(self): |
Predict the next line after this snippet: <|code_start|># encoding: utf-8
def producer(*args, **kw):
return [", ".join(str(i) for i in args) + "\n" + json.dumps(kw, sort_keys=True)]
@pytest.fixture
<|code_end|>
using the current file's imports:
import pytest
import json
import os.path
from cinje.inline.use import Use
from cinje.util import flatten, fragment
and any relevant context from other files:
# Path: cinje/inline/use.py
# class Use(object):
# """Consume the result of calling another template function, extending the local buffer.
#
# This is meant to consume non-wrapping template functions. For wrapping functions see ": using" instead.
#
# Syntax:
#
# : use <name-constant> [<arguments>]
#
# The name constant must resolve to a generator function that participates in the cinje "yielded buffer" protocol.
#
# """
#
# priority = 25
#
# def match(self, context, line):
# """Match code lines prefixed with a "use" keyword."""
# return line.kind == 'code' and line.partitioned[0] == "use"
#
# def __call__(self, context):
# """Wrap the expression in a `_buffer.extend()` call."""
#
# input = context.input
#
# try:
# declaration = input.next()
# except StopIteration:
# return
#
# parts = declaration.partitioned[1] # Ignore the "use" part, we care about the name and arguments.
# name, _, args = parts.partition(' ')
#
# for i in ensure_buffer(context):
# yield i
#
# name = name.rstrip()
# args = args.lstrip()
#
# if 'buffer' in context.flag:
# yield declaration.clone(line=PREFIX + name + "(" + args + "))")
# context.flag.add('dirty')
# return
#
# if py == 3: # We can use the more efficient "yield from" syntax. Wewt!
# yield declaration.clone(line="yield from " + name + "(" + args + ")")
# else:
# yield declaration.clone(line="for _chunk in " + name + "(" + args + "):")
# yield declaration.clone(line="yield _chunk", scope=context.scope + 1)
#
# Path: cinje/util.py
# def flatten(input, file=None, encoding=None, errors='strict'):
# """Return a flattened representation of a cinje chunk stream.
#
# This has several modes of operation. If no `file` argument is given, output will be returned as a string.
# The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
# binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
# iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
# returned. The type of string written will be determined by `encoding`, just as the return value is when not
# writing to a file-like object. The `errors` argument is passed through when encoding.
#
# We can highly recommend using the various stremaing IO containers available in the
# [`io`](https://docs.python.org/3/library/io.html) module, though
# [`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
# """
#
# input = stream(input, encoding, errors)
#
# if file is None: # Exit early if we're not writing to a file.
# return b''.join(input) if encoding else ''.join(input)
#
# counter = 0
#
# for chunk in input:
# file.write(chunk)
# counter += len(chunk)
#
# return counter
#
# def fragment(string, name="anonymous", **context):
# """Translate a template fragment into a callable function.
#
# **Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
#
# Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
# resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
# """
#
# if isinstance(string, bytes):
# string = string.decode('utf-8')
#
# if ": def" in string or ":def" in string:
# code = string.encode('utf8').decode('cinje')
# name = None
# else:
# code = ": def {name}\n\n{string}".format(
# name = name,
# string = string,
# ).encode('utf8').decode('cinje')
#
# environ = dict(context)
#
# exec(code, environ)
#
# if name is None: # We need to dig it out of the `__tmpl__` list.
# if __debug__ and not environ.get('__tmpl__', None):
# raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
# "\n\n" + code)
#
# return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
#
# return environ[name]
. Output only the next line. | def tmpl(): |
Based on the snippet: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
class TestContextBehaviours(object):
def test_repr_is_reasonable(self):
context = Context('')
if py == 2:
assert repr(context) == 'Context(Lines(1), 0, set([]))'
else:
assert repr(context) == 'Context(Lines(1), 0, set())'
def test_prepare_required_translator_priority(self):
context = Context('')
context.prepare()
assert isinstance(context._handler[0], Module), "Module must be first priority."
assert isinstance(context._handler[-1], Code), "Code must be last priority."
def test_first_handler(self):
context = Context('')
context.prepare()
<|code_end|>
, predict the immediate next line with the help of imports:
from cinje.util import py, Context
from cinje.block.module import Module
from cinje.inline.code import Code
and context (classes, functions, sometimes code) from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
#
# Path: cinje/block/module.py
# class Module(object):
# """Module handler.
#
# This is the initial scope, and the highest priority to ensure its processing of the preamble happens first.
# """
#
# priority = -100
#
# def match(self, context, line):
# return 'init' not in context.flag
#
# def __call__(self, context):
# input = context.input
#
# context.flag.add('init')
# context.flag.add('buffer')
#
# imported = False
#
# for line in input:
# if not line.stripped or line.stripped[0] == '#':
# if not line.stripped.startswith('##') and 'coding:' not in line.stripped:
# yield line
# continue
#
# input.push(line) # We're out of the preamble, so put that line back and stop.
# break
#
# # After any existing preamble, but before other imports, we inject our own.
#
# if py == 2:
# yield Line(0, 'from __future__ import unicode_literals')
# yield Line(0, '')
#
# yield Line(0, 'import cinje')
# yield Line(0, 'from cinje.helpers import escape as _escape, bless as _bless, iterate, xmlargs as _args, _interrupt, _json')
# yield Line(0, '')
# yield Line(0, '')
# yield Line(0, '__tmpl__ = [] # Exported template functions.')
# yield Line(0, '')
#
# for i in context.stream:
# yield i
#
# if context.templates:
# yield Line(0, '')
# yield Line(0, '__tmpl__.extend(["' + '", "'.join(context.templates) + '"])')
# context.templates = []
#
# # Snapshot the line number mapping.
# mapping = deque(context.mapping)
# mapping.reverse()
#
# yield Line(0, '')
#
# if __debug__:
# yield Line(0, '__mapping__ = [' + ','.join(str(i) for i in mapping) + ']')
#
# yield Line(0, '__gzmapping__ = b"' + red(mapping).replace('"', '\"') + '"')
#
# context.flag.remove('init')
#
# Path: cinje/inline/code.py
# class Code(object):
# """General code handler.
#
# This captures all code segments not otherwise handled. It has a very low priority to ensure other "code" handlers
# get a chance to run first.
#
# Syntax:
#
# : <code>
# """
#
# priority = 100
#
# def match(self, context, line):
# return line.kind == 'code'
#
# def __call__(self, context):
# try:
# yield context.input.next() # Pass through.
# except StopIteration:
# return
. Output only the next line. | assert context.classify(": foo") is context._handler[0] |
Next line prediction: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
class TestContextBehaviours(object):
def test_repr_is_reasonable(self):
context = Context('')
if py == 2:
assert repr(context) == 'Context(Lines(1), 0, set([]))'
else:
assert repr(context) == 'Context(Lines(1), 0, set())'
<|code_end|>
. Use current file imports:
(from cinje.util import py, Context
from cinje.block.module import Module
from cinje.inline.code import Code)
and context including class names, function names, or small code snippets from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
#
# Path: cinje/block/module.py
# class Module(object):
# """Module handler.
#
# This is the initial scope, and the highest priority to ensure its processing of the preamble happens first.
# """
#
# priority = -100
#
# def match(self, context, line):
# return 'init' not in context.flag
#
# def __call__(self, context):
# input = context.input
#
# context.flag.add('init')
# context.flag.add('buffer')
#
# imported = False
#
# for line in input:
# if not line.stripped or line.stripped[0] == '#':
# if not line.stripped.startswith('##') and 'coding:' not in line.stripped:
# yield line
# continue
#
# input.push(line) # We're out of the preamble, so put that line back and stop.
# break
#
# # After any existing preamble, but before other imports, we inject our own.
#
# if py == 2:
# yield Line(0, 'from __future__ import unicode_literals')
# yield Line(0, '')
#
# yield Line(0, 'import cinje')
# yield Line(0, 'from cinje.helpers import escape as _escape, bless as _bless, iterate, xmlargs as _args, _interrupt, _json')
# yield Line(0, '')
# yield Line(0, '')
# yield Line(0, '__tmpl__ = [] # Exported template functions.')
# yield Line(0, '')
#
# for i in context.stream:
# yield i
#
# if context.templates:
# yield Line(0, '')
# yield Line(0, '__tmpl__.extend(["' + '", "'.join(context.templates) + '"])')
# context.templates = []
#
# # Snapshot the line number mapping.
# mapping = deque(context.mapping)
# mapping.reverse()
#
# yield Line(0, '')
#
# if __debug__:
# yield Line(0, '__mapping__ = [' + ','.join(str(i) for i in mapping) + ']')
#
# yield Line(0, '__gzmapping__ = b"' + red(mapping).replace('"', '\"') + '"')
#
# context.flag.remove('init')
#
# Path: cinje/inline/code.py
# class Code(object):
# """General code handler.
#
# This captures all code segments not otherwise handled. It has a very low priority to ensure other "code" handlers
# get a chance to run first.
#
# Syntax:
#
# : <code>
# """
#
# priority = 100
#
# def match(self, context, line):
# return line.kind == 'code'
#
# def __call__(self, context):
# try:
# yield context.input.next() # Pass through.
# except StopIteration:
# return
. Output only the next line. | def test_prepare_required_translator_priority(self): |
Here is a snippet: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
class TestContextBehaviours(object):
def test_repr_is_reasonable(self):
context = Context('')
if py == 2:
assert repr(context) == 'Context(Lines(1), 0, set([]))'
else:
assert repr(context) == 'Context(Lines(1), 0, set())'
def test_prepare_required_translator_priority(self):
context = Context('')
context.prepare()
assert isinstance(context._handler[0], Module), "Module must be first priority."
assert isinstance(context._handler[-1], Code), "Code must be last priority."
def test_first_handler(self):
context = Context('')
<|code_end|>
. Write the next line using the current file imports:
from cinje.util import py, Context
from cinje.block.module import Module
from cinje.inline.code import Code
and context from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
#
# Path: cinje/block/module.py
# class Module(object):
# """Module handler.
#
# This is the initial scope, and the highest priority to ensure its processing of the preamble happens first.
# """
#
# priority = -100
#
# def match(self, context, line):
# return 'init' not in context.flag
#
# def __call__(self, context):
# input = context.input
#
# context.flag.add('init')
# context.flag.add('buffer')
#
# imported = False
#
# for line in input:
# if not line.stripped or line.stripped[0] == '#':
# if not line.stripped.startswith('##') and 'coding:' not in line.stripped:
# yield line
# continue
#
# input.push(line) # We're out of the preamble, so put that line back and stop.
# break
#
# # After any existing preamble, but before other imports, we inject our own.
#
# if py == 2:
# yield Line(0, 'from __future__ import unicode_literals')
# yield Line(0, '')
#
# yield Line(0, 'import cinje')
# yield Line(0, 'from cinje.helpers import escape as _escape, bless as _bless, iterate, xmlargs as _args, _interrupt, _json')
# yield Line(0, '')
# yield Line(0, '')
# yield Line(0, '__tmpl__ = [] # Exported template functions.')
# yield Line(0, '')
#
# for i in context.stream:
# yield i
#
# if context.templates:
# yield Line(0, '')
# yield Line(0, '__tmpl__.extend(["' + '", "'.join(context.templates) + '"])')
# context.templates = []
#
# # Snapshot the line number mapping.
# mapping = deque(context.mapping)
# mapping.reverse()
#
# yield Line(0, '')
#
# if __debug__:
# yield Line(0, '__mapping__ = [' + ','.join(str(i) for i in mapping) + ']')
#
# yield Line(0, '__gzmapping__ = b"' + red(mapping).replace('"', '\"') + '"')
#
# context.flag.remove('init')
#
# Path: cinje/inline/code.py
# class Code(object):
# """General code handler.
#
# This captures all code segments not otherwise handled. It has a very low priority to ensure other "code" handlers
# get a chance to run first.
#
# Syntax:
#
# : <code>
# """
#
# priority = 100
#
# def match(self, context, line):
# return line.kind == 'code'
#
# def __call__(self, context):
# try:
# yield context.input.next() # Pass through.
# except StopIteration:
# return
, which may include functions, classes, or code. Output only the next line. | context.prepare() |
Given snippet: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
class TestContextBehaviours(object):
def test_repr_is_reasonable(self):
context = Context('')
if py == 2:
assert repr(context) == 'Context(Lines(1), 0, set([]))'
else:
assert repr(context) == 'Context(Lines(1), 0, set())'
def test_prepare_required_translator_priority(self):
context = Context('')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from cinje.util import py, Context
from cinje.block.module import Module
from cinje.inline.code import Code
and context:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
#
# Path: cinje/block/module.py
# class Module(object):
# """Module handler.
#
# This is the initial scope, and the highest priority to ensure its processing of the preamble happens first.
# """
#
# priority = -100
#
# def match(self, context, line):
# return 'init' not in context.flag
#
# def __call__(self, context):
# input = context.input
#
# context.flag.add('init')
# context.flag.add('buffer')
#
# imported = False
#
# for line in input:
# if not line.stripped or line.stripped[0] == '#':
# if not line.stripped.startswith('##') and 'coding:' not in line.stripped:
# yield line
# continue
#
# input.push(line) # We're out of the preamble, so put that line back and stop.
# break
#
# # After any existing preamble, but before other imports, we inject our own.
#
# if py == 2:
# yield Line(0, 'from __future__ import unicode_literals')
# yield Line(0, '')
#
# yield Line(0, 'import cinje')
# yield Line(0, 'from cinje.helpers import escape as _escape, bless as _bless, iterate, xmlargs as _args, _interrupt, _json')
# yield Line(0, '')
# yield Line(0, '')
# yield Line(0, '__tmpl__ = [] # Exported template functions.')
# yield Line(0, '')
#
# for i in context.stream:
# yield i
#
# if context.templates:
# yield Line(0, '')
# yield Line(0, '__tmpl__.extend(["' + '", "'.join(context.templates) + '"])')
# context.templates = []
#
# # Snapshot the line number mapping.
# mapping = deque(context.mapping)
# mapping.reverse()
#
# yield Line(0, '')
#
# if __debug__:
# yield Line(0, '__mapping__ = [' + ','.join(str(i) for i in mapping) + ']')
#
# yield Line(0, '__gzmapping__ = b"' + red(mapping).replace('"', '\"') + '"')
#
# context.flag.remove('init')
#
# Path: cinje/inline/code.py
# class Code(object):
# """General code handler.
#
# This captures all code segments not otherwise handled. It has a very low priority to ensure other "code" handlers
# get a chance to run first.
#
# Syntax:
#
# : <code>
# """
#
# priority = 100
#
# def match(self, context, line):
# return line.kind == 'code'
#
# def __call__(self, context):
# try:
# yield context.input.next() # Pass through.
# except StopIteration:
# return
which might include code, classes, or functions. Output only the next line. | context.prepare() |
Next line prediction: <|code_start|># encoding: utf-8
class Using(object):
priority = 25
def match(self, context, line):
return line.kind == 'code' and line.stripped.startswith("using ")
def __call__(self, context):
input = context.input
try:
declaration = input.next()
except:
return
_, _, declaration = declaration.stripped.partition(' ')
name, _, args = declaration.partition(' ')
<|code_end|>
. Use current file imports:
(from ..util import Line, ensure_buffer)
and context including class names, function names, or small code snippets from other files:
# Path: cinje/util.py
# class Line(object):
# """A rich description for a line of input, allowing for annotation."""
#
# __slots__ = ('number', 'line', 'scope', 'kind', 'continued')
#
# def __init__(self, number, line, scope=None, kind=None):
# if isinstance(line, bytes):
# line = line.decode('utf-8')
#
# self.number = number
# self.line = line
# self.scope = scope
# self.kind = kind
# self.continued = self.stripped.endswith('\\')
#
# if not kind: self.process()
#
# super(Line, self).__init__()
#
# def process(self):
# if self.stripped.startswith('#') and not self.stripped.startswith('#{'):
# self.kind = 'comment'
# elif self.stripped.startswith(':'):
# self.kind = 'code'
# self.line = self.stripped[1:].lstrip()
# else:
# self.kind = 'text'
#
# @property
# def stripped(self):
# return self.line.strip()
#
# @property
# def partitioned(self):
# prefix, _, remainder = self.stripped.partition(' ')
# return prefix.rstrip(), remainder.lstrip()
#
# def __repr__(self):
# return '{0.__class__.__name__}({0.number}, {0.kind}, "{0.stripped}")'.format(self)
#
# def __bytes__(self):
# return str(self).encode('utf8')
#
# def __str__(self):
# if self.scope is None:
# return self.line
#
# return '\t' * self.scope + self.line.lstrip()
#
# if py == 2: # pragma: no cover
# __unicode__ = __str__
# __str__ = __bytes__
# del __bytes__
#
# def clone(self, **kw):
# values = dict(
# number = self.number,
# line = self.line,
# scope = self.scope,
# kind = self.kind,
# )
#
# values.update(kw)
#
# instance = self.__class__(**values)
#
# return instance
#
# def ensure_buffer(context, separate=True):
# if 'text' in context.flag or 'buffer' not in context.flag:
# return
#
# if separate: yield Line(0, "")
# yield Line(0, "_buffer = []")
#
# if not pypy:
# yield Line(0, "__w, __ws = _buffer.extend, _buffer.append")
#
# yield Line(0, "")
#
# context.flag.add('text')
. Output only the next line. | name = name.strip() |
Predict the next line for this snippet: <|code_start|># encoding: utf-8
from __future__ import unicode_literals
def test_line_processing():
assert Line(None, "Some text.").kind == 'text'
assert Line(None, "# Comment?").kind == 'comment'
assert Line(None, ": pass").kind == 'code'
def test_line_formatting():
assert Line(None, "\tHuzzah?").stripped == "Huzzah?"
assert Line(None, "foo bar baz").partitioned == ('foo', 'bar baz')
assert repr(Line(1, "First!")) == 'Line(1, text, "First!")'
assert str(Line(None, "Flat.")) == "Flat."
assert str(Line(None, "Indented.", 1)) == "\tIndented."
assert bytes(Line(None, "Text.")) == b"Text."
def test_line_clone():
line = Line(27, "\t\tMy test line!", 2)
<|code_end|>
with the help of current file imports:
from cinje.util import Line, bytes, str
and context from other files:
# Path: cinje/util.py
# def stream(input, encoding=None, errors='strict'):
# def flatten(input, file=None, encoding=None, errors='strict'):
# def fragment(string, name="anonymous", **context):
# def interruptable(iterable):
# def iterate(obj):
# def xmlargs(_source=None, **values):
# def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
# def ensure_buffer(context, separate=True):
# def __init__(self, number, line, scope=None, kind=None):
# def process(self):
# def stripped(self):
# def partitioned(self):
# def __repr__(self):
# def __bytes__(self):
# def __str__(self):
# def clone(self, **kw):
# def __init__(self, input=None, Line=Line):
# def count(self):
# def __len__(self):
# def __repr__(self):
# def __iter__(self):
# def __next__(self):
# def __str__(self):
# def next(self):
# def peek(self):
# def push(self, *lines):
# def reset(self):
# def append(self, *lines):
# def __init__(self, input):
# def __repr__(self):
# def prepare(self):
# def stream(self):
# def classify(self, line):
# def __init__(self, callable, *args, **kw):
# def __repr__(self):
# def __ror__(self, other):
# def __call__(self, *args, **kw):
# def __init__(self):
# def handle_data(self, d):
# def get_data(self):
# def strip_tags(html):
# class Line(object):
# class Lines(object):
# class Context(object):
# class Pipe(object):
# class MLStripper(HTMLParser):
, which may contain function names, class names, or code. Output only the next line. | clone = line.clone(line="New line.") |
Next line prediction: <|code_start|># encoding: utf-8
CODE = """
: def _wrapper
prefix
: yield
postfix
: end
: def consumer
<|code_end|>
. Use current file imports:
(import pytest
from cinje import fragment, flatten)
and context including class names, function names, or small code snippets from other files:
# Path: cinje/util.py
# def flatten(input, file=None, encoding=None, errors='strict'):
# """Return a flattened representation of a cinje chunk stream.
#
# This has several modes of operation. If no `file` argument is given, output will be returned as a string.
# The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
# binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
# iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
# returned. The type of string written will be determined by `encoding`, just as the return value is when not
# writing to a file-like object. The `errors` argument is passed through when encoding.
#
# We can highly recommend using the various stremaing IO containers available in the
# [`io`](https://docs.python.org/3/library/io.html) module, though
# [`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
# """
#
# input = stream(input, encoding, errors)
#
# if file is None: # Exit early if we're not writing to a file.
# return b''.join(input) if encoding else ''.join(input)
#
# counter = 0
#
# for chunk in input:
# file.write(chunk)
# counter += len(chunk)
#
# return counter
#
# def fragment(string, name="anonymous", **context):
# """Translate a template fragment into a callable function.
#
# **Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
#
# Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
# resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
# """
#
# if isinstance(string, bytes):
# string = string.decode('utf-8')
#
# if ": def" in string or ":def" in string:
# code = string.encode('utf8').decode('cinje')
# name = None
# else:
# code = ": def {name}\n\n{string}".format(
# name = name,
# string = string,
# ).encode('utf8').decode('cinje')
#
# environ = dict(context)
#
# exec(code, environ)
#
# if name is None: # We need to dig it out of the `__tmpl__` list.
# if __debug__ and not environ.get('__tmpl__', None):
# raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
# "\n\n" + code)
#
# return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
#
# return environ[name]
. Output only the next line. | : using _wrapper |
Given the code snippet: <|code_start|>#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Rocamgo is recogniter of the go games by processing digital images with opencv.
# Copyright (C) 2012 Víctor Ramirez de la Corte <virako.9 at gmail dot com>
# Copyright (C) 2012 David Medina Velasco <cuidadoconeltecho at gmail dot com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Test_camera(unittest.TestCase):
def setUp(self):
self.cameras = Cameras()
self.camera_1 = Camera()
self.camera_2 = Camera()
def tearDown(self):
<|code_end|>
, generate the next line using the imports in this file:
from src.camera import Camera
from src.cameras import Cameras
from nose.tools import *
from random import choice
import unittest
and context (functions, classes, or occasionally code) from other files:
# Path: src/cameras.py
# class Cameras:
# """Clase para abrir las cámaras disponibles en el ordenador. """
#
# def __init__(self):
# #cam = Camera()
# #cam.index = 100
# #cam.capture = CreateFileCapture("http://192.168.1.2:5143/mjpeg")
# self.cameras = []
# self.camera = None
#
# def on_mouse(self, event, x, y, flags, camera):
# """Capturador de eventos de click de ratón.
#
# :Param event: Evento capturado.
# :Type event: int
# :Param x: posición x del ratón.
# :Type x: int
# :Param y: posición y del ratón.
# :Type y: int
# :Param camera: objeto Capture
# :Type camera: Capture
# """
# if event == CV_EVENT_LBUTTONDBLCLK:
# self.camera = camera
#
# def check_cameras(self, num=MAX_CAMERAS):
# """Comprueba las cámaras disponibles.
#
# :Param num: máximo número de cámaras a comprobar
# :Keyword num: 99 por defecto, ya que en Linux es lo permitido
# :Param num: int
# :Return: lista de cámaras disponibles
# :Rtype: list of Capture
# """
# n = 0
# while len(self.cameras) < num and n <= MAX_CAMERAS:
# camera = CaptureFromCAM(n)
# if QueryFrame(camera):
# self.cameras.append(camera)
# n += 1
# if num != MAX_CAMERAS and len(self.cameras) != num:
# print "Found %d of %d cameras. " %(len(self.cameras), num)
# exit()
# return len(self.cameras)
#
# def show_and_select_camera(self):
# """Muestra las cámaras disponibles en ventanas y da la opción de seleccionar una de ellas pulsando doble click.
#
# :Return: cámara seleccionada
# :Rtype: Camera """
# if not self.cameras:
# return self.camera
# elif len(self.cameras) == 1:
# return self.cameras[0]
# elif len(self.cameras) > 1:
# while not self.camera:
# for camera in self.cameras:
# name_windows = camera.__str__()
# img = QueryFrame(camera)
# ShowImage(name_windows, img)
# key = WaitKey(60)
# # TODO select camera push the key
# SetMouseCallback(name_windows, self.on_mouse, camera)
# DestroyAllWindows()
#
# return self.camera
. Output only the next line. | self.cameras = None |
Here is a snippet: <|code_start|> :Type pt: tuple
:Keyword pt: None si no le pasamos ningún punto parámetro. """
if not img and not pix:
self.pt = pt
elif not pt:
square_w = float(img.width)/GOBAN_SIZE
border_w = square_w/2
x = int(round((pix[0] - border_w)/square_w))
square_h = float(img.width)/GOBAN_SIZE
border_h = square_h/2
y = int(round((pix[1] - border_h)/square_h))
self.pt = [x, y]
self.color = color
self.x, self.y = self.pt
def __str__(self):
color = 'black' if self.color==BLACK else 'white'
return "(%d, %d) --> %s" %(self.x, self.y, color)
def __eq__(self, st):
return self.pt == st.pt and self.color == st.color
def __cmp__(self, st):
x = self.st.x - st.x
y = self.st.y - st.y
if x > 0:
return x
elif x == 0:
<|code_end|>
. Write the next line using the current file imports:
from src.cte import GOBAN_SIZE
from src.cte import WHITE
from src.cte import BLACK
and context from other files:
# Path: src/cte.py
# GOBAN_SIZE = 19
#
# Path: src/cte.py
# WHITE = 2
#
# Path: src/cte.py
# BLACK = 1
, which may include functions, classes, or code. Output only the next line. | if x == y: |
Using the snippet: <|code_start|>class Stone:
"""Clase piedra. """
def __init__(self, color, img=None, pix=None, pt=None):
"""Inicializamos una piedra, si no tenemos la posición, buscamos cual es esa posición dado una imagen ideal y un pixel.
:Param color: color de la piedra, BLACK or WHITE
:Type color: int
:Param img: imagen en formato ideal
:Type img: IplImage
:Keyword img: None si no le pasamos ninguna imagen por parámetro
:Param pix: pixel donde se encuentra la piedra en la imagen
:Type pix: tuple
:Keyword pix: None si no le pasamos ningun pixel por parámetro
:Param pt: punto donde se encuentra la piedra en el tablero
:Type pt: tuple
:Keyword pt: None si no le pasamos ningún punto parámetro. """
if not img and not pix:
self.pt = pt
elif not pt:
square_w = float(img.width)/GOBAN_SIZE
border_w = square_w/2
x = int(round((pix[0] - border_w)/square_w))
square_h = float(img.width)/GOBAN_SIZE
border_h = square_h/2
y = int(round((pix[1] - border_h)/square_h))
self.pt = [x, y]
self.color = color
self.x, self.y = self.pt
<|code_end|>
, determine the next line of code. You have imports:
from src.cte import GOBAN_SIZE
from src.cte import WHITE
from src.cte import BLACK
and context (class names, function names, or code) available:
# Path: src/cte.py
# GOBAN_SIZE = 19
#
# Path: src/cte.py
# WHITE = 2
#
# Path: src/cte.py
# BLACK = 1
. Output only the next line. | def __str__(self): |
Given snippet: <|code_start|>
"""
:var goban: matriz de piedras puestas
:Type goban: list
:var statistical: matriz de estadísticas para comprobar piedras buenas o malas
:Type statistical: list
:var stones: piedras a comprobar para añadir a estadísticas
:Type stones: list
:var kifu: Objeto Kifu
:Type kifu: Kifu
:var igs: Objeto Igs
:Type igs: Igs
"""
class Goban:
"""Clase tablero, contiene la matriz de estadíticas y funciones para rellenar el tablero. """
def __init__(self, size):
"""Crea dos matrices de tamaño pasado por parámetro, una para estadísticas y otra para guardar el estado de las piedras. Creamos un set de piedras para ir guardando las piedras que estemos comprobando. También inicializa un kifu para guardar la partida y un el objetos igs que se encargará de conectarse con el servidor que subirá la partida.
:Param size: tamaño del tablero
:Type size: int """
self.size = size
# El valor 0 es para ir sumando(hay piedra) o restando(no hay)
# El valor 8 es el nº de veces a buscar antes de hacer la estadística
self.goban = [[None] * size for i in range(size)]
self.statistical = [[[0, 8]] * size for i in range(size)]
self.stones = set()
self.kifu = Kifu()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from src.cte import GOBAN_SIZE
from src.cte import WHITE
from src.cte import BLACK
from src.kifu import Kifu
from igs import Igs
and context:
# Path: src/cte.py
# GOBAN_SIZE = 19
#
# Path: src/cte.py
# WHITE = 2
#
# Path: src/cte.py
# BLACK = 1
#
# Path: src/kifu.py
# class Kifu:
# """Clase para crear un fichero .sgf y guardar la partida. """
#
# def __init__(self, player1="j1", player2="j2", handicap=0, path="sgf", \
# rank_player1='20k', rank_player2='20k'):
# """Inicializamos configuración del archivo sgf.
#
# :Param player1: nombre del jugador 1
# :Type player1: str
# :Keyword player1: j1 por defecto
# :Param player2: nombre del jugador 2
# :Type player2: str
# :Keyword player2: j2 por defecto
# :Param handicap: handicap dado en la partida
# :Type handicap: int
# :Keyword handicap: ninguno por defecto (0)
# :Param path: ruta relativa donde guardamos el fichero
# :Type path: str
# :Keyword path: carpeta sgf por defecto
# :Param rank_player1: rango del jugador 1
# :Type rank_player1: str
# :Keyword rank_player1: 20k por defecto, nivel de inicio en el go
# :Param rank_player2: rango del jugador 2
# :Type rank_player2: str
# :Keyword rank_player2: 20k por defecto, nivel de inicio en el go """
# self.num_jug = 0
# self.player_black = player1
# self.player_white = player2
# filename = str(datetime.now()).replace(" ","_") + "_" + player1 + "_vs_" + player2
# self.dir = os.path.join(path, filename + ".sgf")
# header_file = HEADER_SGF
# header_file += [ "\nPB[%s]" %player1, "\nBR[%s]" %rank_player1, \
# "\nPW[%s]" %player2, "\nWR[%s]" %rank_player2]
# with open(self.dir, "w") as f:
# f.writelines(header_file)
#
#
# def add_stone(self, pos, color):
# """Añadir piedra al sgf.
#
# :Param pos: posición de la piedra
# :Type pos: tuple
# :Param color: color de la piedra
# :Type color: int """
# coord = chr(pos[0]+97) + chr(pos[1]+97)
# with open(self.dir, "a") as f:
# if color == BLACK:
# f.write("\n;B[%s]" %coord)
# elif color == WHITE:
# f.write("\n;W[%s]" %coord)
# else:
# print _("el color debe ser BLACK or WHITE")
# self.num_jug += 1
#
#
# def end_file(self):
# """Cerrar el fichero y dejarlo listo para poder abrirlo."""
# with open(self.dir, "a") as f:
# f.write(")")
which might include code, classes, or functions. Output only the next line. | user = raw_input("Insert your user: ") |
Predict the next line after this snippet: <|code_start|> self.statistical[st.x][st.y][0] -= 1
self.statistical[st.x][st.y][1] -= 1
values = self.statistical[st.x][st.y]
if values[1] <= 0 and values[0] > 0:
if self.goban[st.x][st.y] != True:
print "Add", st.x+1, st.y+1
# add kifu e igs
self.kifu.add_stone((st.x, st.y), st.color)
self.igs.add_stone((st.x, st.y))
self.statistical[st.x][st.y] = [0, 8]
self.goban[st.x][st.y] = True
elif values[1] <= 0 and values[0] > 0:
self.statistical[st.x][st.y] = [0, 8]
if self.goban[st.x][st.y] == True:
print "Piedra %d, %d quitada?." %(st.x, st.y)
# TODO comprobar piedras capturadas
# falsa piedra
self.stones.update(stones)
def print_st(self):
string = ""
for x in range(self.size):
for y in range(self.size):
string += '%s' %str(self.statistical[y][x])
string += " " + str(x+1) + "\n"
return string
<|code_end|>
using the current file's imports:
from src.cte import GOBAN_SIZE
from src.cte import WHITE
from src.cte import BLACK
from src.kifu import Kifu
from igs import Igs
and any relevant context from other files:
# Path: src/cte.py
# GOBAN_SIZE = 19
#
# Path: src/cte.py
# WHITE = 2
#
# Path: src/cte.py
# BLACK = 1
#
# Path: src/kifu.py
# class Kifu:
# """Clase para crear un fichero .sgf y guardar la partida. """
#
# def __init__(self, player1="j1", player2="j2", handicap=0, path="sgf", \
# rank_player1='20k', rank_player2='20k'):
# """Inicializamos configuración del archivo sgf.
#
# :Param player1: nombre del jugador 1
# :Type player1: str
# :Keyword player1: j1 por defecto
# :Param player2: nombre del jugador 2
# :Type player2: str
# :Keyword player2: j2 por defecto
# :Param handicap: handicap dado en la partida
# :Type handicap: int
# :Keyword handicap: ninguno por defecto (0)
# :Param path: ruta relativa donde guardamos el fichero
# :Type path: str
# :Keyword path: carpeta sgf por defecto
# :Param rank_player1: rango del jugador 1
# :Type rank_player1: str
# :Keyword rank_player1: 20k por defecto, nivel de inicio en el go
# :Param rank_player2: rango del jugador 2
# :Type rank_player2: str
# :Keyword rank_player2: 20k por defecto, nivel de inicio en el go """
# self.num_jug = 0
# self.player_black = player1
# self.player_white = player2
# filename = str(datetime.now()).replace(" ","_") + "_" + player1 + "_vs_" + player2
# self.dir = os.path.join(path, filename + ".sgf")
# header_file = HEADER_SGF
# header_file += [ "\nPB[%s]" %player1, "\nBR[%s]" %rank_player1, \
# "\nPW[%s]" %player2, "\nWR[%s]" %rank_player2]
# with open(self.dir, "w") as f:
# f.writelines(header_file)
#
#
# def add_stone(self, pos, color):
# """Añadir piedra al sgf.
#
# :Param pos: posición de la piedra
# :Type pos: tuple
# :Param color: color de la piedra
# :Type color: int """
# coord = chr(pos[0]+97) + chr(pos[1]+97)
# with open(self.dir, "a") as f:
# if color == BLACK:
# f.write("\n;B[%s]" %coord)
# elif color == WHITE:
# f.write("\n;W[%s]" %coord)
# else:
# print _("el color debe ser BLACK or WHITE")
# self.num_jug += 1
#
#
# def end_file(self):
# """Cerrar el fichero y dejarlo listo para poder abrirlo."""
# with open(self.dir, "a") as f:
# f.write(")")
. Output only the next line. | def __str__(self): |
Predict the next line for this snippet: <|code_start|># This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
:var goban: matriz de piedras puestas
:Type goban: list
:var statistical: matriz de estadísticas para comprobar piedras buenas o malas
:Type statistical: list
:var stones: piedras a comprobar para añadir a estadísticas
:Type stones: list
:var kifu: Objeto Kifu
:Type kifu: Kifu
:var igs: Objeto Igs
:Type igs: Igs
"""
class Goban:
"""Clase tablero, contiene la matriz de estadíticas y funciones para rellenar el tablero. """
def __init__(self, size):
"""Crea dos matrices de tamaño pasado por parámetro, una para estadísticas y otra para guardar el estado de las piedras. Creamos un set de piedras para ir guardando las piedras que estemos comprobando. También inicializa un kifu para guardar la partida y un el objetos igs que se encargará de conectarse con el servidor que subirá la partida.
:Param size: tamaño del tablero
:Type size: int """
<|code_end|>
with the help of current file imports:
from src.cte import GOBAN_SIZE
from src.cte import WHITE
from src.cte import BLACK
from src.kifu import Kifu
from igs import Igs
and context from other files:
# Path: src/cte.py
# GOBAN_SIZE = 19
#
# Path: src/cte.py
# WHITE = 2
#
# Path: src/cte.py
# BLACK = 1
#
# Path: src/kifu.py
# class Kifu:
# """Clase para crear un fichero .sgf y guardar la partida. """
#
# def __init__(self, player1="j1", player2="j2", handicap=0, path="sgf", \
# rank_player1='20k', rank_player2='20k'):
# """Inicializamos configuración del archivo sgf.
#
# :Param player1: nombre del jugador 1
# :Type player1: str
# :Keyword player1: j1 por defecto
# :Param player2: nombre del jugador 2
# :Type player2: str
# :Keyword player2: j2 por defecto
# :Param handicap: handicap dado en la partida
# :Type handicap: int
# :Keyword handicap: ninguno por defecto (0)
# :Param path: ruta relativa donde guardamos el fichero
# :Type path: str
# :Keyword path: carpeta sgf por defecto
# :Param rank_player1: rango del jugador 1
# :Type rank_player1: str
# :Keyword rank_player1: 20k por defecto, nivel de inicio en el go
# :Param rank_player2: rango del jugador 2
# :Type rank_player2: str
# :Keyword rank_player2: 20k por defecto, nivel de inicio en el go """
# self.num_jug = 0
# self.player_black = player1
# self.player_white = player2
# filename = str(datetime.now()).replace(" ","_") + "_" + player1 + "_vs_" + player2
# self.dir = os.path.join(path, filename + ".sgf")
# header_file = HEADER_SGF
# header_file += [ "\nPB[%s]" %player1, "\nBR[%s]" %rank_player1, \
# "\nPW[%s]" %player2, "\nWR[%s]" %rank_player2]
# with open(self.dir, "w") as f:
# f.writelines(header_file)
#
#
# def add_stone(self, pos, color):
# """Añadir piedra al sgf.
#
# :Param pos: posición de la piedra
# :Type pos: tuple
# :Param color: color de la piedra
# :Type color: int """
# coord = chr(pos[0]+97) + chr(pos[1]+97)
# with open(self.dir, "a") as f:
# if color == BLACK:
# f.write("\n;B[%s]" %coord)
# elif color == WHITE:
# f.write("\n;W[%s]" %coord)
# else:
# print _("el color debe ser BLACK or WHITE")
# self.num_jug += 1
#
#
# def end_file(self):
# """Cerrar el fichero y dejarlo listo para poder abrirlo."""
# with open(self.dir, "a") as f:
# f.write(")")
, which may contain function names, class names, or code. Output only the next line. | self.size = size |
Predict the next line for this snippet: <|code_start|># Rocamgo is recogniter of the go games by processing digital images with opencv.
# Copyright (C) 2012 Víctor Ramirez de la Corte <virako.9 at gmail dot com>
# Copyright (C) 2012 David Medina Velasco <cuidadoconeltecho at gmail dot com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def search_stones(img, corners, dp=1.7):
"""Devuelve las circunferencias encontradas en una imagen.
:Param img: imagen donde buscaremos las circunferencias
:Type img: IplImage
:Param corners: lista de esquinas
:Type corners: list
:Param dp: profundidad de búsqueda de círculos
:Type dp: int
:Keyword dp: 1.7 era el valor que mejor funcionaba. Prueba y error """
gray = CreateMat(img.width, img.height,CV_8UC1)
<|code_end|>
with the help of current file imports:
from cv import CV_8UC1
from cv import CV_BGR2GRAY
from cv import CloneMat
from cv import CvtColor
from cv import Canny
from cv import Smooth
from cv import CV_GAUSSIAN
from cv import CV_32FC3
from cv import CV_HOUGH_GRADIENT
from cv import HoughCircles
from cv import Get2D
from cv import CreateMat
from src.cte import GOBAN_SIZE
from src.cte import BLACK
from src.cte import WHITE
and context from other files:
# Path: src/cte.py
# GOBAN_SIZE = 19
#
# Path: src/cte.py
# BLACK = 1
#
# Path: src/cte.py
# WHITE = 2
, which may contain function names, class names, or code. Output only the next line. | CvtColor(img, gray, CV_BGR2GRAY) |
Given snippet: <|code_start|> HoughCircles(gray_aux, circles, CV_HOUGH_GRADIENT, dp, int(r*0.5), 50, 55,\
int(r*0.7), int(r*1.2))
return circles
def check_color_stone(pt, radious, img, threshold=190):
"""Devuelve el color de la piedra dado el centro y el radio de la piedra y una imagen. También desechamos las piedras que no sean negras o blancas.
:Param pt: centro de la piedra
:Type pt: tuple
:Param radious: radio de la piedra
:Type radious: int
:Param img: imagen donde comprobaremos el color de ciertos pixeles
:Type img: IplImage
:Param threshold: umbral de blanco
:Type threshold: int
:Keyword threshold: 190 cuando hay buena luminosidad """
black_total = 0
white_total = 0
no_color = 0
for x in range(pt[0] - radious/2, pt[0] + radious/2):
try:
pixel = Get2D(img, pt[1], x)[:-1]
except:
continue
if all(p > threshold for p in pixel):
white_total += 1
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from cv import CV_8UC1
from cv import CV_BGR2GRAY
from cv import CloneMat
from cv import CvtColor
from cv import Canny
from cv import Smooth
from cv import CV_GAUSSIAN
from cv import CV_32FC3
from cv import CV_HOUGH_GRADIENT
from cv import HoughCircles
from cv import Get2D
from cv import CreateMat
from src.cte import GOBAN_SIZE
from src.cte import BLACK
from src.cte import WHITE
and context:
# Path: src/cte.py
# GOBAN_SIZE = 19
#
# Path: src/cte.py
# BLACK = 1
#
# Path: src/cte.py
# WHITE = 2
which might include code, classes, or functions. Output only the next line. | elif all(p < 50 for p in pixel): |
Given the following code snippet before the placeholder: <|code_start|>
:Param img: imagen donde buscaremos las circunferencias
:Type img: IplImage
:Param corners: lista de esquinas
:Type corners: list
:Param dp: profundidad de búsqueda de círculos
:Type dp: int
:Keyword dp: 1.7 era el valor que mejor funcionaba. Prueba y error """
gray = CreateMat(img.width, img.height,CV_8UC1)
CvtColor(img, gray, CV_BGR2GRAY)
gray_aux = CloneMat(gray)
gray_aux_2 = CloneMat(gray)
Canny(gray, gray_aux_2, 50,55,3)
Smooth(gray_aux_2, gray_aux, CV_GAUSSIAN, 3, 5)
# creo una matriz de para guardar los circulos encontrados
circles = CreateMat(1, gray_aux.height*gray_aux.width, CV_32FC3)
# r es el la mitad del tamaño de un cuadrado, el radio deseado
r = img.width/(GOBAN_SIZE*2)
# HoughCircles(image, storage, method, dp, min_dist, param1, param2,
# min_radius, max_radius)
HoughCircles(gray_aux, circles, CV_HOUGH_GRADIENT, dp, int(r*0.5), 50, 55,\
int(r*0.7), int(r*1.2))
return circles
<|code_end|>
, predict the next line using imports from the current file:
from cv import CV_8UC1
from cv import CV_BGR2GRAY
from cv import CloneMat
from cv import CvtColor
from cv import Canny
from cv import Smooth
from cv import CV_GAUSSIAN
from cv import CV_32FC3
from cv import CV_HOUGH_GRADIENT
from cv import HoughCircles
from cv import Get2D
from cv import CreateMat
from src.cte import GOBAN_SIZE
from src.cte import BLACK
from src.cte import WHITE
and context including class names, function names, and sometimes code from other files:
# Path: src/cte.py
# GOBAN_SIZE = 19
#
# Path: src/cte.py
# BLACK = 1
#
# Path: src/cte.py
# WHITE = 2
. Output only the next line. | def check_color_stone(pt, radious, img, threshold=190): |
Continue the code snippet: <|code_start|> seq = FindContours(img, storage, CV_RETR_TREE, CV_CHAIN_APPROX_NONE,
offset=(0, 0))
sequence = []
aprox = True
while seq:
if len(seq) >= NUM_EDGES and (img.cols*img.rows) > ContourArea(seq) > \
((img.cols/2)*(img.rows/2)):
perimeter = count_perimeter(seq)
seq_app = ApproxPoly(seq, storage, CV_POLY_APPROX_DP, perimeter*0.02, 1)
if len(seq_app) == NUM_EDGES:
return seq_app
else:
return None
else:
if seq.h_next() == None:
break
else:
seq = seq.h_next()
return None
def search_goban(img):
"""Busca el tablero en una imagen.
:Param img: imagen del tablero
:Type img: IplImage # TODO comprobar tipo imagen
:Return: lista de esquinas si las encuentra, sino None
:Rtype: list or None """
aux_gray = CreateImage((img.width, img.height), IPL_DEPTH_8U, 1)
<|code_end|>
. Use current file imports:
from cv import Canny
from cv import Smooth
from cv import CreateMat
from cv import CreateMemStorage
from cv import CreateImage
from cv import FindContours
from cv import CV_RETR_TREE
from cv import CV_CHAIN_APPROX_NONE
from cv import CV_POLY_APPROX_DP
from cv import CV_RGB2GRAY
from cv import ContourArea
from cv import IPL_DEPTH_8U
from cv import CvtColor
from cv import GetMat
from cv import CV_GAUSSIAN
from cv import ApproxPoly
from math import sqrt
from src.cte import NUM_EDGES
and context (classes, functions, or code) from other files:
# Path: src/cte.py
# NUM_EDGES = 4
. Output only the next line. | CvtColor(img, aux_gray, CV_RGB2GRAY) |
Given snippet: <|code_start|> def images_true(self):
return (('img0013.png', 'img0014.png'), ('img0001.png', 'img0004.png'),
('img0014.png', 'img0013.png'), ('img0004.png', 'img0001.png'))
def images_false(self):
return (('img0001.png'), ('img0004.png'), ('img0007.png'), ('img0010.png'))
def select_corners_false(self, n):
path = 'tests/images/'
image1 = self.images_false()[n]
txt1 = image1.replace('.png', '.txt')
with open(path + txt1) as f:
corners_img1 = pickle.load(f)
return corners_img1, corners_img1
def select_corners_true(self, n):
path = 'tests/images/'
image1, image2 = self.images_true()[n]
txt1 = image1.replace('.png', '.txt')
txt2 = image2.replace('.png', '.txt')
with open(path + txt1) as f:
corners_img1 = pickle.load(f)
with open(path + txt2) as f:
corners_img2 = pickle.load(f)
return corners_img1, corners_img2
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from src.check_goban_moved import check_goban_moved
from cv import LoadImageM
from cv import CV_LOAD_IMAGE_COLOR
from nose.tools import *
import unittest
import os
import cPickle as pickle
import pickle
and context:
# Path: src/check_goban_moved.py
# def check_goban_moved(prev_corners, current_corners):
# """Comprobamos si es posible el movimiento de tablero detectado.
#
# :Param prev_corners: corners detectados anteriormente
# :Type prev_corners: list
# :Param current_corners: corners detectados actualmente
# :Type current_corners: list
# :Return: True si el tablero se ha movido
# :Rtype: bool """
#
# if not prev_corners or not current_corners:
# return True
# dist_min_of_movement = get_max_edge(prev_corners)/(2*GOBAN_SIZE)
# " Comprobamos primero si existe mucho movimiento. "
# dist = []
# directions = []
# for i in xrange(NUM_EDGES):
# dist.append(abs(distance(prev_corners[i], current_corners[i])))
# directions.append(direction(prev_corners[i], current_corners[i]))
# f = lambda x: x>1
# dist_list = filter(f, dist)
# if len(dist_list) > 2:
# # min_mov=1/3 square TODO check impossible movement (Direcction)
# min_mov = get_max_edge(prev_corners)/((GOBAN_SIZE-1)*3.0)
# dist_list.sort()
# if (dist_list[-1] - dist_list[0]) < min_mov:
# return check_directions(directions)
# elif (dist_list[-1] - dist_list[-3]) < min_mov:
# return check_directions(directions)
# else:
# return False
# else:
# return False
which might include code, classes, or functions. Output only the next line. | def test_check_goban_moved_true1(self): |
Predict the next line for this snippet: <|code_start|>
class HtmlProcessorTests(unittest.TestCase):
def testHtmlProcessor(self):
# get html
link = Link(
"https://www.deccanherald.com/city/bengaluru-crime/bitcoins-worth-rs-9-cr-seized-from-hacker-arrested-in-drugs-case-939549.html")
html = link.getHtmlStatic()
# process it
result = hp.processHtml(
"testJobId",
html,
".content .field-name-body",
[".content img"])
# validate result
self.assertEqual(len(result), 2)
#expect processor to have extracted some text
self.assertTrue(isinstance(result[0], str))
self.assertGreater(len(result[0]), 100)
#expect processor to find 1 image
<|code_end|>
with the help of current file imports:
import unittest
import newsApp.htmlProcessor as hp
from newsApp.link import Link
and context from other files:
# Path: newsApp/link.py
# class Link(DbItem):
# """
# Represents a link to a webPage.
#
# Each wlink consists of a unique identifier(the url)
# and a set of tags(key-value pairs).
# """
#
# def __init__(self, id, tags=None):
# """
# Instantiates a link object representing a link to a web page.
# """
#
# DbItem.__init__(self, id, tags)
#
# def checkExistence(self):
# try:
# _openUrlWithRetries(self.id)
# except:
# return False
#
# return True
#
# def getFinalRedirect(self):
# return getIdentifierUrl(self.id)
#
# def getHtmlDynamic(self):
# return loadPageAndGetHtml(self.id)
#
# def getHtmlStatic(self):
# return getHtmlStatic(self.id)
, which may contain function names, class names, or code. Output only the next line. | self.assertEqual(len(result[1]), 1) |
Based on the snippet: <|code_start|>
class DbHelperTests(unittest.TestCase):
def testEncryptDecrypt(self):
encKey = 'kjvorn#4gjha52sg'
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
import time
from newsApp.dbhelper import decryptSecret, encryptSecret
and context (classes, functions, sometimes code) from other files:
# Path: newsApp/dbhelper.py
# def decryptSecret(value, encryptionKey):
# """
# Helper function to decrypt a secret stored in database.
# """
#
# cipherText = base64.b64decode(value)
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# decryptedBytes = aesCipher.decrypt(cipherText)
# return decryptedBytes.decode('ascii').rstrip('{')
#
# def encryptSecret(value, encryptionKey):
# """
# Helper function to encrypt a secret before storing in database.
# """
#
# value = value + (16 - len(value) % 16) * '{'
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# cipherText = aesCipher.encrypt(value.encode('ascii'))
# encryptedBytes = base64.b64encode(cipherText)
# return encryptedBytes.decode('ascii')
. Output only the next line. | value = 'testSecretValue' |
Given the following code snippet before the placeholder: <|code_start|>
class FeedProcessorTests(unittest.TestCase):
def testProcessWebFeed(self):
dcHydFeedTags = {}
<|code_end|>
, predict the next line using imports from the current file:
import newsApp.constants as ct
import unittest
from newsApp.feed import Feed
from newsApp.feedProcessor import getLinksFromWebFeed
and context including class names, function names, and sometimes code from other files:
# Path: newsApp/feed.py
# class Feed(DbItem):
# """
# Represents a web feed.
#
# Each feed consists of a unique identifier and a set of tags(key-value pairs).
# """
#
# Path: newsApp/feedProcessor.py
# def getLinksFromWebFeed(jobId, feed):
# feedAndJobId = "Feed id: " + feed.id + ". Job id: " + jobId
#
# # get page html
# pageHtml = ""
# if FEEDTAG_IS_FEEDPAGE_STATIC in feed.tags:
# try:
# pageHtml = getHtmlStatic(feed.tags[FEEDTAG_URL])
# except (requests.ReadTimeout, requests.ConnectTimeout, requests.ConnectionError):
# logger.warning(
# "Timed out or could not connect while getting web feed page %s. Returning zero links. %s",
# feed.tags[FEEDTAG_URL],
# feedAndJobId)
# return []
# else:
# pageHtml = loadPageAndGetHtml(feed.tags[FEEDTAG_URL])
# logger.info("Got html for web page. %s.", feedAndJobId)
#
# # load entry selectors
# entrySelectors = json.loads(feed.tags[FEEDTAG_ENTRY_SELECTORS])
# logger.info(
# "Will use %i entry selectors. %s",
# len(entrySelectors),
# feedAndJobId)
#
# # Use entry selector to get entries
# links = []
# for entrySelector in entrySelectors:
# entries = hp.getSubHtmlEntries(jobId, pageHtml, entrySelector['overall'])
# logger.info(
# "Got %i entries for entry selector %s. %s",
# len(entries),
# entrySelector['overall'],
# feedAndJobId)
#
# # considering only the top 30 entries to reduce load
# for entry in entries[:30]:
# link = _linkFromWebPageEntry(jobId, entry, feed, entrySelector)
# if link:
# links.append(link)
#
# if len(links) == 0:
# logger.warning("No links found while processing webPage. %s", feedAndJobId)
# else:
# logger.info("Number of links found: %i. %s", len(links), feedAndJobId)
#
# return links
. Output only the next line. | dcHydFeedTags[ct.FEEDTAG_URL] = 'http://www.deccanchronicle.com/location/india/telangana/hyderabad' |
Predict the next line after this snippet: <|code_start|>
#nltk.download('punkt')
#nltk.download('stopwords')
#nltk.download('maxent_treebank_pos_tagger')
#nltk.download('maxent_ne_chunker')
#nltk.download('averaged_perceptron_tagger')
#nltk.download('words')
def _removePuntuation(text):
return text.translate(str.maketrans('','',string.punctuation))
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def getSentences(text):
return nltk.sent_tokenize(text)
def getDigitForToken(token):
digitsDict = {
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
<|code_end|>
using the current file's imports:
import itertools
import logging
import string
import math
import nltk
from retrying import retry
from .encodedEntity import EncodedEntity
from .textHelper import removeNonAsciiChars
and any relevant context from other files:
# Path: newsApp/encodedEntity.py
# class EncodedEntity:
# """
# Represents a phonetically encoded entity
# """
#
# def __init__(self, plainEntity):
# """
# Instantiates a new encoded entity object.
# Requires 'plainEntity': plain text entity to encode
# """
#
# self.plain = plainEntity
# self.encoded = jellyfish.metaphone(plainEntity)
#
# Path: newsApp/textHelper.py
# def removeNonAsciiChars(text):
# return unicodedata.normalize('NFKD', text).encode('ascii','ignore').decode()
. Output only the next line. | 'ten': '10', |
Predict the next line after this snippet: <|code_start|> return comparisionScore
@retry(stop_max_attempt_number=3)
def getEntitiesInternal(text):
if not text:
return []
text = removeNonAsciiChars(text)
sentences = nltk.sent_tokenize(text)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [nltk.pos_tag(sent) for sent in sentences]
entities = []
for sentence in sentences:
extractedEntities = nltk.ne_chunk(sentence, binary=True).subtrees(
filter = lambda x: x.label() == 'NE')
for entity in extractedEntities:
newEntity = ' '.join([leaf[0] for leaf in entity.leaves()])
entities.append(newEntity)
return list(set(entities))
def getEntities(text):
try:
return getEntitiesInternal(text)
except Exception as e:
logging.info("Could not extract entities for text: '%s'", text)
return []
<|code_end|>
using the current file's imports:
import itertools
import logging
import string
import math
import nltk
from retrying import retry
from .encodedEntity import EncodedEntity
from .textHelper import removeNonAsciiChars
and any relevant context from other files:
# Path: newsApp/encodedEntity.py
# class EncodedEntity:
# """
# Represents a phonetically encoded entity
# """
#
# def __init__(self, plainEntity):
# """
# Instantiates a new encoded entity object.
# Requires 'plainEntity': plain text entity to encode
# """
#
# self.plain = plainEntity
# self.encoded = jellyfish.metaphone(plainEntity)
#
# Path: newsApp/textHelper.py
# def removeNonAsciiChars(text):
# return unicodedata.normalize('NFKD', text).encode('ascii','ignore').decode()
. Output only the next line. | def compareEntities(entity1, entity2, doc1EntityWeights, doc2EntityWeights): |
Based on the snippet: <|code_start|>
class ShingleTableManagerTests(unittest.TestCase):
def testCreateTableAndUse(self):
testShingleTableManager = ShingleTableManager()
# wait for table to get created and add entries
<|code_end|>
, predict the immediate next line with the help of imports:
import time
import unittest
from newsApp.shingleTableManager import ShingleTableManager
and context (classes, functions, sometimes code) from other files:
# Path: newsApp/shingleTableManager.py
# class ShingleTableManager:
# """
# Manage shingle-docid pairs stored on AWS dynamo db database.
#
# Contains functions for CRUD on table, adding and querying entries etc.
# """
#
# def __init__(self):
# """
# Instantiates a new instance of ShingleTableManager class
# """
#
# self.tableConnString = os.environ['SHINGLETABLE_CONNECTIONSTRING']
# self.__table = None
#
# def __getTable(self):
# """
# Get the shingle table.
# """
#
# if not self.__table:
# shingleTableConnectionParams = parseConnectionString(
# self.tableConnString);
#
# self.__table = Table(
# shingleTableConnectionParams['name'],
# connection = getDbConnection(shingleTableConnectionParams));
#
# return self.__table;
#
# def addEntries(self, docId, shingles):
# """
# Add a entries in shingles table for shingles and docId passed.
# """
#
# shingles = list(set(shingles)) # remove duplicate shingles
# shingles.sort()
# shingles = shingles[:MAX_SHINGLES_PER_DOC]
#
# shingleTable = self.__getTable()
# with shingleTable.batch_write() as batch:
# for shingle in shingles:
# batch.put_item(data={
# 'docId': docId,
# 'shingle': shingle})
#
# def queryByShingle(self, shingle):
# """
# Retrieve list of docId's for the passed shingle.
# """
#
# shingleTable = self.__getTable()
# return (row['docId'] for row in shingleTable.query_2(
# shingle__eq = shingle))
#
# def queryByDocId(self, docId):
# """
# Retrieve list of shingles for the passed docId.
# """
#
# shingleTable = self.__getTable()
# return (row['shingle'] for row in shingleTable.query_2(
# docId__eq = docId,
# index = 'docIdIndex'))
#
# def cleanUpDocShingles(self, docId):
# """
# Cleanup all shingles of doc.
# """
#
# shingles = self.queryByDocId(docId)
# shingleTable = self.__getTable()
#
# with shingleTable.batch_write() as batch:
# for shingle in shingles:
# batch.delete_item(docId=docId, shingle=shingle)
#
# def scan(self):
# """
# List all the shingle entries currently present.
# """
#
# shingleTable = self.__getTable()
# scanResults = ((row['docId'], row['shingle']) for row in shingleTable.scan())
#
# return scanResults
. Output only the next line. | testShingleTableManager.addEntries('testDoc1', ['ab', 'bcd', 'c']) |
Here is a snippet: <|code_start|> if job.jobName == JOB_PROCESSFEED:
processFeed(job.jobId, job.jobParams[JOBARG_PROCESSFEED_FEEDID])
elif job.jobName == JOB_PROCESSLINK:
processLink(job.jobId, job.jobParams[JOBARG_PROCESSLINK_LINKID])
elif job.jobName == JOB_PARSEDOC:
cj.parseDoc(job.jobId, job.jobParams[JOBARG_PARSEDOC_DOCID])
elif job.jobName == JOB_GETCANDIDATEDOCS:
cj.getCandidateDocs(
job.jobId,
job.jobParams[JOBARG_GETCANDIDATEDOCS_DOCID])
elif job.jobName == JOB_COMPAREDOCS:
cj.compareDocs(
job.jobId,
job.jobParams[JOBARG_COMPAREDOCS_DOC1ID],
job.jobParams[JOBARG_COMPAREDOCS_DOC2ID])
elif job.jobName == JOB_COMPAREDOCSBATCH:
cj.compareDocsBatch(
job.jobId,
job.jobParams[JOBARG_COMPAREDOCSBATCH_DOCID],
job.jobParams[JOBARG_COMPAREDOCSBATCH_OTHERDOCS])
elif job.jobName == JOB_CLUSTERDOCS:
cj.clusterDocs(job.jobId)
elif job.jobName == JOB_CLEANUPDOC:
cj.cleanUpDoc(job.jobId, job.jobParams[JOBARG_CLEANUPDOC_DOCID])
elif job.jobName == JOB_CLEANUPDOCSHINGLES:
cj.cleanUpDocShingles(job.jobId, job.jobParams[JOBARG_CLEANUPDOCSHINGLES_DOCID])
elif job.jobName == JOB_CLEANUPDOCENTITIES:
cj.cleanUpDocEntities(job.jobId, job.jobParams[JOBARG_CLEANUPDOCENTITIES_DOCID])
elif job.jobName == JOB_CLEANUPDOCDISTANCES:
cj.cleanUpDocDistances(job.jobId, job.jobParams[JOBARG_CLEANUPDOCDISTANCES_DOCID])
<|code_end|>
. Write the next line using the current file imports:
import getopt
import os
import sys
import time
import threading
from multiprocessing import Process
from .constants import *
from .loggingHelper import *
from .jobManager import JobManager
from .workerJob import WorkerJob
from .feedProcessor import *
from .linkProcessor import *
from . import clusteringJobs as cj
from . import dbJobs as dj
and context from other files:
# Path: newsApp/jobManager.py
# class JobManager:
# """
# Helper class to enqueue and dequeue jobs to the job queue.
# """
#
# def __init__(self, connectionStringKey):
# """
# Instantiates the job manager.
#
# 'connectionStringKey' : name of environment variable containing the
# connection string to use.
# """
#
# self.queue = getQueue(os.environ[connectionStringKey])
#
# def enqueueJob(self, job):
# """
# Enqueue the job into the jobs queue.
# """
#
# enqueueMessage(self.queue, job.serializeToString())
#
# def convertDequeuedMessageToJob(self, dequeuedMessage):
# if dequeuedMessage is None:
# return None
# dequeuedJob = WorkerJob(None, None)
# dequeuedJob.deserializeFromString(dequeuedMessage)
# return dequeuedJob
#
# def dequeueJob(self):
# """
# Dequeue a job from the job queue.
# """
#
# dequeuedMessage = dequeueMessage(self.queue)
# return self.convertDequeuedMessageToJob(dequeuedMessage)
#
# def count(self):
# """
# Return the count of messages in queue.
# """
#
# return self.queue.count()
#
# Path: newsApp/workerJob.py
# class WorkerJob:
# """
# Represents a job which can be processed by one of the worker roles.
# """
#
# def __init__(self, jobName, jobParams, jobId = None):
# """
# Instantiates a new worker job object.
# Requires 'jobName': a string representing name of the job
# Requires 'jobParams': a dictionary where keys represnt job parameter
# names, and corresponding values the job parameter values.
# Optional 'jobId': a identier for this job. If not provided a
# randomly generated alphanumeric string is used.
# """
#
# if (jobId is None):
# self.jobId = _generateRandomJobId()
# else :
# self.jobId = jobId
#
# self.jobName = jobName
# self.jobParams = jobParams
#
# def deserializeFromString(self, serializedJob):
# """
# Sets this worker job object to the specified serialized string
# representation.
# """
#
# tempDict = json.loads(serializedJob)
# jobName = tempDict.pop('jobName', None)
# jobId = tempDict.pop('jobId', None)
# self.__init__(jobName, tempDict, jobId)
#
# def serializeToString(self):
# """
# Serialize to a human-readable string representation of this object
# """
#
# tempDict = dict(self.jobParams)
# tempDict['jobName'] = self.jobName
# tempDict['jobId'] = self.jobId
# return json.dumps(tempDict)
, which may include functions, classes, or code. Output only the next line. | elif job.jobName == JOB_PROCESSNEWCLUSTER: |
Given the following code snippet before the placeholder: <|code_start|>
def populateFeeds():
tree = ET.parse(sys.argv[1])
root = tree.getroot()
for child in root:
<|code_end|>
, predict the next line using imports from the current file:
import xml.etree.ElementTree as ET
import sys
from newsApp.feed import Feed
from newsApp.feedManager import FeedManager
and context including class names, function names, and sometimes code from other files:
# Path: newsApp/feed.py
# class Feed(DbItem):
# """
# Represents a web feed.
#
# Each feed consists of a unique identifier and a set of tags(key-value pairs).
# """
#
# Path: newsApp/feedManager.py
# class FeedManager(DbItemManagerV2):
# """
# Manage feeds stored on AWS dynamo db database.
#
# Contains functions for CRUD operations on the feeds stored
#
# Following environment variables need to be set -
# 'FEEDTAGSTABLE_CONNECTIONSTRING' : connection string of feed tags table.
# """
#
# def __init__(self):
# """
# Instantiates the feedManager.
# """
#
# DbItemManagerV2.__init__(self,
# os.environ['FEEDTAGSTABLE_CONNECTIONSTRING'])
#
# def put(self, feed):
# """
# Put a new feed.
# """
#
# # add polling info tags and put into database
# feed.tags[FEEDTAG_NEXTPOLLTIME] = int(time.time())
# if FEEDTAG_POLLFREQUENCY not in feed.tags:
# feed.tags[FEEDTAG_POLLFREQUENCY] = DEFAULT_FEED_POLLING_FREQUENCY
# DbItemManagerV2.put(self, feed)
#
# def getStaleFeeds(self):
# """
# Returns a list of feedIds of stale feeds (i.e whose next poll time
# is less than current time.
# """
#
# currentTime = int(time.time())
# scanResults = DbItemManagerV2.scan(self, nextPollTime__lte = currentTime)
# return (result.id for result in scanResults)
#
# def updateFeedOnSuccessfullPoll(self, feed):
# """
# Updates the polling related tags of specified feed and puts in db.
# """
#
# feed.tags[FEEDTAG_LASTPOLLTIME] = int(time.time())
# feed.tags[FEEDTAG_NEXTPOLLTIME] = (feed.tags[FEEDTAG_LASTPOLLTIME] +
# feed.tags[FEEDTAG_POLLFREQUENCY]*60)
# DbItemManagerV2.put(self, feed)
. Output only the next line. | feedTags = child.attrib |
Given the code snippet: <|code_start|>
class ClusterTableManagerTests(unittest.TestCase):
def testTranslateHindi(self):
with open('tests/testData/hindiLong.txt', 'r', encoding='utf8') as sourceFile:
text = sourceFile.read()
result = translateMicrosoft('job', text, 'hi')
self.assertTrue(isinstance(result, str))
<|code_end|>
, generate the next line using the imports in this file:
import unittest
from newsApp.translation import translate, translateMicrosoft, translateGoogle
and context (functions, classes, or occasionally code) from other files:
# Path: newsApp/translation.py
# def translate(jobId, text, fromLang, toLang = 'en'):
# jobInfo = "fromLang: " + fromLang + " toLang: " + toLang \
# + " Job id: " + jobId
#
# # clip text if too long to save costs
# if len(text) > 800:
# text = text[:800]
#
# if fromLang in MSTRANSLATE_LANGS and fromLang in GOOGLE_LANGS:
# msResult = translateMicrosoft(jobInfo, text, fromLang, toLang)
# if len(msResult) > 0:
# return msResult
# else:
# return translateGoogle(jobInfo, text, fromLang, toLang)
# elif fromLang in GOOGLE_LANGS:
# return translateGoogle(jobInfo, text, fromLang, toLang)
#
# def translateMicrosoft(jobInfo, text, fromLang, toLang = 'en'):
# try:
# logger.info("Started microsoft translation. %s", jobInfo)
#
# # get the access token
# auth_token = _getMicrosoftAccessToken(jobInfo)
#
# translation_args = {
# 'text': text.encode(),
# 'to': toLang,
# 'from': fromLang
# }
#
# headers={'Authorization': 'Bearer ' + auth_token.decode()}
# translate_url = 'https://api.microsofttranslator.com/V2/Ajax.svc/Translate?'
# translation_result = requests.get(
# translate_url + urllib.parse.urlencode(translation_args),
# headers=headers)
# response = translation_result.content.decode()
#
# if translation_result.status_code == 200 and \
# 'Exception:' not in response:
# logger.info("Completed microsoft translation. %s", jobInfo)
# return response
# else:
# logger.info(
# "Microsoft translation call failed. Status code %i. Response: %s",
# translation_result.status_code,
# response)
# return ""
# except Exception:
# logging.exception("Microsoft translation failed. %s", jobInfo)
# return ""
#
# def translateGoogle(jobInfo, text, fromLang, toLang = 'en'):
# try:
# logger.info("Started google translation. %s", jobInfo)
#
# service = build('translate', 'v2',
# developerKey = os.environ['GOOGLE_DEV_KEY'])
# result = service.translations().list(
# source=fromLang,
# target=toLang,
# q=text).execute()['translations'][0]['translatedText']
#
# logger.info("Completed google translation. %s", jobInfo)
# return result
# except:
# logger.exception("Google translation failed. %s", jobInfo)
# return ""
. Output only the next line. | self.assertIsNot(result, '') |
Next line prediction: <|code_start|>
logger = logging.getLogger('notifierTwitter')
class NotifierTwitter(NotifierBase):
def __init__(self):
NotifierBase.__init__(self)
self.tableConnString = os.environ['TWITTERHANDLESTABLE_CONNECTIONSTRING']
self.encryptionKey = os.environ['TWITTERHANDLESTABLE_KEY']
self.__table = None
def __getTable(self):
if not self.__table:
self.__table = getDbTable(self.tableConnString)
return self.__table
def __getKeys(self, handle):
<|code_end|>
. Use current file imports:
(import os
import logging
import time
import tweepy
from datetime import datetime
from pytz import timezone
from boto.dynamodb2.table import Table
from .dbhelper import getDbTable, decryptSecret, encryptSecret
from .notifierBase import NotifierBase)
and context including class names, function names, or small code snippets from other files:
# Path: newsApp/dbhelper.py
# def getDbTable(connectionString):
# """
# Get a dynamo db table object using connection string
# """
#
# connectionParams = parseConnectionString(connectionString)
#
# return Table(
# connectionParams['name'],
# connection = getDbConnection(connectionParams))
#
# def decryptSecret(value, encryptionKey):
# """
# Helper function to decrypt a secret stored in database.
# """
#
# cipherText = base64.b64decode(value)
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# decryptedBytes = aesCipher.decrypt(cipherText)
# return decryptedBytes.decode('ascii').rstrip('{')
#
# def encryptSecret(value, encryptionKey):
# """
# Helper function to encrypt a secret before storing in database.
# """
#
# value = value + (16 - len(value) % 16) * '{'
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# cipherText = aesCipher.encrypt(value.encode('ascii'))
# encryptedBytes = base64.b64encode(cipherText)
# return encryptedBytes.decode('ascii')
#
# Path: newsApp/notifierBase.py
# class NotifierBase:
# def __init__(self):
# self.domainName = os.environ['DOMAIN']
#
# def isNightTime(self, locale):
# # for now we only have cities in india
# india_tz = timezone('Asia/Kolkata')
# hour = datetime.now(india_tz).hour
# return hour >= 2 and hour < 9
. Output only the next line. | table = self.__getTable() |
Given snippet: <|code_start|>class NotifierTwitter(NotifierBase):
def __init__(self):
NotifierBase.__init__(self)
self.tableConnString = os.environ['TWITTERHANDLESTABLE_CONNECTIONSTRING']
self.encryptionKey = os.environ['TWITTERHANDLESTABLE_KEY']
self.__table = None
def __getTable(self):
if not self.__table:
self.__table = getDbTable(self.tableConnString)
return self.__table
def __getKeys(self, handle):
table = self.__getTable()
dbRows = list(table.query_2(handle__eq = handle))
if not dbRows:
return None
dbRow = dbRows[0]
return {
'handle': dbRow['handle'],
'consumerKey': decryptSecret(dbRow['consumerKey'], self.encryptionKey),
'consumerSecret': decryptSecret(dbRow['consumerSecret'], self.encryptionKey),
'token': decryptSecret(dbRow['token'], self.encryptionKey),
'tokenSecret': decryptSecret(dbRow['tokenSecret'], self.encryptionKey)
}
def __getTwitterApi(self, jobId, handle):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import logging
import time
import tweepy
from datetime import datetime
from pytz import timezone
from boto.dynamodb2.table import Table
from .dbhelper import getDbTable, decryptSecret, encryptSecret
from .notifierBase import NotifierBase
and context:
# Path: newsApp/dbhelper.py
# def getDbTable(connectionString):
# """
# Get a dynamo db table object using connection string
# """
#
# connectionParams = parseConnectionString(connectionString)
#
# return Table(
# connectionParams['name'],
# connection = getDbConnection(connectionParams))
#
# def decryptSecret(value, encryptionKey):
# """
# Helper function to decrypt a secret stored in database.
# """
#
# cipherText = base64.b64decode(value)
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# decryptedBytes = aesCipher.decrypt(cipherText)
# return decryptedBytes.decode('ascii').rstrip('{')
#
# def encryptSecret(value, encryptionKey):
# """
# Helper function to encrypt a secret before storing in database.
# """
#
# value = value + (16 - len(value) % 16) * '{'
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# cipherText = aesCipher.encrypt(value.encode('ascii'))
# encryptedBytes = base64.b64encode(cipherText)
# return encryptedBytes.decode('ascii')
#
# Path: newsApp/notifierBase.py
# class NotifierBase:
# def __init__(self):
# self.domainName = os.environ['DOMAIN']
#
# def isNightTime(self, locale):
# # for now we only have cities in india
# india_tz = timezone('Asia/Kolkata')
# hour = datetime.now(india_tz).hour
# return hour >= 2 and hour < 9
which might include code, classes, or functions. Output only the next line. | keys = self.__getKeys(handle) |
Next line prediction: <|code_start|> keys = self.__getKeys(handle)
logging.info("Got secrets for twitter handle %s. Job id: %s", handle, jobId)
auth = tweepy.OAuthHandler(keys['consumerKey'], keys['consumerSecret'])
auth.set_access_token(keys['token'], keys['tokenSecret'])
return tweepy.API(auth)
def addHandle(self, handle, consumerKey, consumerSecret, token, tokenSecret):
table = self.__getTable()
table.put_item(data={
'handle': handle,
'consumerKey': encryptSecret(consumerKey, self.encryptionKey),
'consumerSecret': encryptSecret(consumerSecret, self.encryptionKey),
'token': encryptSecret(token, self.encryptionKey),
'tokenSecret': encryptSecret(tokenSecret, self.encryptionKey)
})
def getNotificationText(self, cluster):
storyUrl = "https://" + self.domainName + "/story/" + cluster.articles[0]['id']
tweetText = ""
linkLength = 23 #t.co length
tweetLength = linkLength
for article in cluster.articles:
# don't tweet old articles in cluster
if article['publishedOn'] > (int(time.time()) - 18 * 60 * 60):
articleTitle = article['title']
articleLink = article['link']
articleText = articleTitle + " (via: " + articleLink + ")\n\n"
# "{} (via: {})\n".format(articleTitle, articleLink)
<|code_end|>
. Use current file imports:
(import os
import logging
import time
import tweepy
from datetime import datetime
from pytz import timezone
from boto.dynamodb2.table import Table
from .dbhelper import getDbTable, decryptSecret, encryptSecret
from .notifierBase import NotifierBase)
and context including class names, function names, or small code snippets from other files:
# Path: newsApp/dbhelper.py
# def getDbTable(connectionString):
# """
# Get a dynamo db table object using connection string
# """
#
# connectionParams = parseConnectionString(connectionString)
#
# return Table(
# connectionParams['name'],
# connection = getDbConnection(connectionParams))
#
# def decryptSecret(value, encryptionKey):
# """
# Helper function to decrypt a secret stored in database.
# """
#
# cipherText = base64.b64decode(value)
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# decryptedBytes = aesCipher.decrypt(cipherText)
# return decryptedBytes.decode('ascii').rstrip('{')
#
# def encryptSecret(value, encryptionKey):
# """
# Helper function to encrypt a secret before storing in database.
# """
#
# value = value + (16 - len(value) % 16) * '{'
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# cipherText = aesCipher.encrypt(value.encode('ascii'))
# encryptedBytes = base64.b64encode(cipherText)
# return encryptedBytes.decode('ascii')
#
# Path: newsApp/notifierBase.py
# class NotifierBase:
# def __init__(self):
# self.domainName = os.environ['DOMAIN']
#
# def isNightTime(self, locale):
# # for now we only have cities in india
# india_tz = timezone('Asia/Kolkata')
# hour = datetime.now(india_tz).hour
# return hour >= 2 and hour < 9
. Output only the next line. | articleTextLength = len(articleText) - (len(articleLink) - linkLength) |
Based on the snippet: <|code_start|>
for article in cluster.articles:
# don't tweet old articles in cluster
if article['publishedOn'] > (int(time.time()) - 18 * 60 * 60):
articleTitle = article['title']
articleLink = article['link']
articleText = articleTitle + " (via: " + articleLink + ")\n\n"
# "{} (via: {})\n".format(articleTitle, articleLink)
articleTextLength = len(articleText) - (len(articleLink) - linkLength)
if (tweetLength + articleTextLength) < 240:
tweetText = tweetText + articleText
tweetLength = tweetLength + articleTextLength
else:
break
tweetText = tweetText + storyUrl
return tweetText
def doesLocaleExist(self, locale):
if not self.__getKeys(locale):
return False
else:
return True
def notifyForLocales(self, jobId, cluster):
jobLog = "Job id: " + jobId
tweetText = self.getNotificationText(cluster)
logging.info("Going to tweet'%s'. %s", tweetText, jobLog)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import logging
import time
import tweepy
from datetime import datetime
from pytz import timezone
from boto.dynamodb2.table import Table
from .dbhelper import getDbTable, decryptSecret, encryptSecret
from .notifierBase import NotifierBase
and context (classes, functions, sometimes code) from other files:
# Path: newsApp/dbhelper.py
# def getDbTable(connectionString):
# """
# Get a dynamo db table object using connection string
# """
#
# connectionParams = parseConnectionString(connectionString)
#
# return Table(
# connectionParams['name'],
# connection = getDbConnection(connectionParams))
#
# def decryptSecret(value, encryptionKey):
# """
# Helper function to decrypt a secret stored in database.
# """
#
# cipherText = base64.b64decode(value)
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# decryptedBytes = aesCipher.decrypt(cipherText)
# return decryptedBytes.decode('ascii').rstrip('{')
#
# def encryptSecret(value, encryptionKey):
# """
# Helper function to encrypt a secret before storing in database.
# """
#
# value = value + (16 - len(value) % 16) * '{'
# aesCipher = AES.new(encryptionKey.encode('ascii'), AES.MODE_ECB)
# cipherText = aesCipher.encrypt(value.encode('ascii'))
# encryptedBytes = base64.b64encode(cipherText)
# return encryptedBytes.decode('ascii')
#
# Path: newsApp/notifierBase.py
# class NotifierBase:
# def __init__(self):
# self.domainName = os.environ['DOMAIN']
#
# def isNightTime(self, locale):
# # for now we only have cities in india
# india_tz = timezone('Asia/Kolkata')
# hour = datetime.now(india_tz).hour
# return hour >= 2 and hour < 9
. Output only the next line. | for locale in cluster.locales: |
Here is a snippet: <|code_start|>
class FeedTests(unittest.TestCase):
def testCreateFeed(self):
testFeed = Feed("feedId")
<|code_end|>
. Write the next line using the current file imports:
from newsApp.feed import Feed
import unittest
and context from other files:
# Path: newsApp/feed.py
# class Feed(DbItem):
# """
# Represents a web feed.
#
# Each feed consists of a unique identifier and a set of tags(key-value pairs).
# """
, which may include functions, classes, or code. Output only the next line. | self.assertTrue(testFeed.id == "feedId") |
Based on the snippet: <|code_start|> if doc.tags[FEEDTAG_LANG] == addedDoc.tags[FEEDTAG_LANG]:
if doc.tags[TAG_PUBLISHER] != addedDoc.tags[TAG_PUBLISHER]:
return False
distance = getDocComparisionScore("processCluster", doc, addedDoc)
if distance >= DOC_DUPLICATION_SCORE_THRESHOLD:
return True
englishContentSim = getDocEnglishContentSimilarity(doc, addedDoc)
if englishContentSim >= DOC_DUPLICATION_CONTENT_THRESHOLD:
return True
return False
def _getDocsInParallel(docKeys):
que = queue.Queue()
threads_list = list()
docManager = DocManager()
for docKey in docKeys:
t = Thread(
target=lambda q, arg1: q.put(docManager.get(arg1)),
args=(que, docKey))
t.start()
threads_list.append(t)
for t in threads_list:
t.join()
docs = list()
while not que.empty():
<|code_end|>
, predict the immediate next line with the help of imports:
import hashlib
import json
import queue
from operator import itemgetter
from threading import Thread
from .constants import *
from .docHelper import getDocComparisionScore,getDocEnglishContentSimilarity
from .docManager import DocManager
from . import textHelper as th
and context (classes, functions, sometimes code) from other files:
# Path: newsApp/docHelper.py
# def getDocComparisionScore(jobInfo, doc1, doc2):
# score = 0
# if (doc1.tags[FEEDTAG_LANG] == LANG_ENGLISH) and \
# (doc2.tags[FEEDTAG_LANG] == LANG_ENGLISH):
# score = computeEnglishDocsSimScore(jobInfo, doc1, doc2)
# logger.info("Compared using shingles. %s", jobInfo)
# else:
# score = computeDocSimScoreUsingEntities(jobInfo, doc1, doc2)
# # make it slightly easier for non-english docs to get clustered.
# score = score * 1.15
# logger.info("Compared using entities. %s", jobInfo)
#
# if FEEDTAG_LOCALE in doc1.tags and FEEDTAG_LOCALE in doc2.tags and \
# doc1.tags[FEEDTAG_LOCALE] != doc2.tags[FEEDTAG_LOCALE]:
#
# logger.info(
# "The two docs are from different locations. Adding penalty. %s",
# jobInfo)
# score = score - 0.4
# if score < 0:
# score = 0
#
# if doc1.tags[TAG_PUBLISHER] != doc2.tags[TAG_PUBLISHER]:
# # make it slightly easier for different-publisher-docs to get clustered
# score = score * 1.1
#
# doc1Publisher = doc1.tags[TAG_PUBLISHER]
# doc2Publisher = doc2.tags[TAG_PUBLISHER]
# if doc1Publisher == doc2Publisher:
# penaltyPublishers = ['TOI', 'TelanganaToday', 'Hindu']
# if doc1Publisher in penaltyPublishers :
# logger.info(
# "Adding penalty for same publisher %s. %s",
# doc1Publisher,
# jobInfo)
# score = score*0.8
#
# if score > 1:
# score = 1
#
# logger.info("Comparision score: %s. %s", str(score), jobInfo)
# return score
#
# def getDocEnglishContentSimilarity(doc1, doc2):
# return th.compareUsingShingles(
# getDocEnglishContent(doc1),
# getDocEnglishContent(doc2))
#
# Path: newsApp/docManager.py
# class DocManager:
# """
# Manage documents stored in cloud.
#
# Contains functions for CRUD operations on documents
# """
#
# def __init__(self):
# """
# Instantiates a new instance of DocManager class
#
# 'bucketConnString' : connection string of s3 bucket in which docs
# are stored.
# """
#
# self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING']
# self.cache = getCache()
# self.__cacheExpiry= 900
#
# def __getBucket(self):
# bucketConnParams = parseConnectionString(self.bucketConnString)
# conn = getS3Connection(self.bucketConnString)
#
# return conn.get_bucket(bucketConnParams['bucketName'], validate=False)
#
# def __isDocNew(self, key, timeLimit):
# if _getEpochSecs(key.last_modified) < timeLimit:
# return False
#
# doc = self.get(key.name)
# return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \
# (FEEDTAG_DO_NOT_CLUSTER not in doc.tags)
#
# def put(self, doc):
# k = Key(self.__getBucket())
# k.key = doc.key
#
# # not storing tags directly in blob's metadata as the maximum size
# # allowed there is only 2kb.
# tags = dict(doc.tags)
# tags['content'] = doc.content
# keyContents = json.dumps(tags)
# k.set_contents_from_string(keyContents)
# self.cache.set(k.key, keyContents, self.__cacheExpiry)
#
# def get(self, docKey):
# keyContents = self.cache.get(docKey)
# if not keyContents:
# k = Key(self.__getBucket())
# k.key = docKey
# keyContents = k.get_contents_as_string()
# self.cache.set(docKey, keyContents, self.__cacheExpiry)
#
# storedTags = json.loads(keyContents)
# content = storedTags.pop('content', None)
# tags = storedTags
#
# return Doc(docKey, content, tags)
#
# def delete(self, docKey):
# k = Key(self.__getBucket())
# k.key = docKey
# k.delete()
# self.cache.delete(docKey)
. Output only the next line. | docs.append(que.get()) |
Using the snippet: <|code_start|> for docKey in docKeys:
t = Thread(
target=lambda q, arg1: q.put(docManager.get(arg1)),
args=(que, docKey))
t.start()
threads_list.append(t)
for t in threads_list:
t.join()
docs = list()
while not que.empty():
docs.append(que.get())
return docs
def _getDocTitle(doc):
title = doc.tags.get(LINKTAG_TITLE, "").strip()
if doc.tags[FEEDTAG_LANG] == LANG_ENGLISH:
return th.removeNonAsciiChars(title)
else:
return title
def _getDocSummary(doc):
summary = doc.tags.get(LINKTAG_SUMMARYTEXT, "").strip()
if doc.tags[FEEDTAG_LANG] == LANG_ENGLISH:
return th.removeNonAsciiChars(summary)
else:
return summary
<|code_end|>
, determine the next line of code. You have imports:
import hashlib
import json
import queue
from operator import itemgetter
from threading import Thread
from .constants import *
from .docHelper import getDocComparisionScore,getDocEnglishContentSimilarity
from .docManager import DocManager
from . import textHelper as th
and context (class names, function names, or code) available:
# Path: newsApp/docHelper.py
# def getDocComparisionScore(jobInfo, doc1, doc2):
# score = 0
# if (doc1.tags[FEEDTAG_LANG] == LANG_ENGLISH) and \
# (doc2.tags[FEEDTAG_LANG] == LANG_ENGLISH):
# score = computeEnglishDocsSimScore(jobInfo, doc1, doc2)
# logger.info("Compared using shingles. %s", jobInfo)
# else:
# score = computeDocSimScoreUsingEntities(jobInfo, doc1, doc2)
# # make it slightly easier for non-english docs to get clustered.
# score = score * 1.15
# logger.info("Compared using entities. %s", jobInfo)
#
# if FEEDTAG_LOCALE in doc1.tags and FEEDTAG_LOCALE in doc2.tags and \
# doc1.tags[FEEDTAG_LOCALE] != doc2.tags[FEEDTAG_LOCALE]:
#
# logger.info(
# "The two docs are from different locations. Adding penalty. %s",
# jobInfo)
# score = score - 0.4
# if score < 0:
# score = 0
#
# if doc1.tags[TAG_PUBLISHER] != doc2.tags[TAG_PUBLISHER]:
# # make it slightly easier for different-publisher-docs to get clustered
# score = score * 1.1
#
# doc1Publisher = doc1.tags[TAG_PUBLISHER]
# doc2Publisher = doc2.tags[TAG_PUBLISHER]
# if doc1Publisher == doc2Publisher:
# penaltyPublishers = ['TOI', 'TelanganaToday', 'Hindu']
# if doc1Publisher in penaltyPublishers :
# logger.info(
# "Adding penalty for same publisher %s. %s",
# doc1Publisher,
# jobInfo)
# score = score*0.8
#
# if score > 1:
# score = 1
#
# logger.info("Comparision score: %s. %s", str(score), jobInfo)
# return score
#
# def getDocEnglishContentSimilarity(doc1, doc2):
# return th.compareUsingShingles(
# getDocEnglishContent(doc1),
# getDocEnglishContent(doc2))
#
# Path: newsApp/docManager.py
# class DocManager:
# """
# Manage documents stored in cloud.
#
# Contains functions for CRUD operations on documents
# """
#
# def __init__(self):
# """
# Instantiates a new instance of DocManager class
#
# 'bucketConnString' : connection string of s3 bucket in which docs
# are stored.
# """
#
# self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING']
# self.cache = getCache()
# self.__cacheExpiry= 900
#
# def __getBucket(self):
# bucketConnParams = parseConnectionString(self.bucketConnString)
# conn = getS3Connection(self.bucketConnString)
#
# return conn.get_bucket(bucketConnParams['bucketName'], validate=False)
#
# def __isDocNew(self, key, timeLimit):
# if _getEpochSecs(key.last_modified) < timeLimit:
# return False
#
# doc = self.get(key.name)
# return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \
# (FEEDTAG_DO_NOT_CLUSTER not in doc.tags)
#
# def put(self, doc):
# k = Key(self.__getBucket())
# k.key = doc.key
#
# # not storing tags directly in blob's metadata as the maximum size
# # allowed there is only 2kb.
# tags = dict(doc.tags)
# tags['content'] = doc.content
# keyContents = json.dumps(tags)
# k.set_contents_from_string(keyContents)
# self.cache.set(k.key, keyContents, self.__cacheExpiry)
#
# def get(self, docKey):
# keyContents = self.cache.get(docKey)
# if not keyContents:
# k = Key(self.__getBucket())
# k.key = docKey
# keyContents = k.get_contents_as_string()
# self.cache.set(docKey, keyContents, self.__cacheExpiry)
#
# storedTags = json.loads(keyContents)
# content = storedTags.pop('content', None)
# tags = storedTags
#
# return Doc(docKey, content, tags)
#
# def delete(self, docKey):
# k = Key(self.__getBucket())
# k.key = docKey
# k.delete()
# self.cache.delete(docKey)
. Output only the next line. | def _getImagesForDoc(doc): |
Here is a snippet: <|code_start|>
def _getDocsInParallel(docKeys):
que = queue.Queue()
threads_list = list()
docManager = DocManager()
for docKey in docKeys:
t = Thread(
target=lambda q, arg1: q.put(docManager.get(arg1)),
args=(que, docKey))
t.start()
threads_list.append(t)
for t in threads_list:
t.join()
docs = list()
while not que.empty():
docs.append(que.get())
return docs
def _getDocTitle(doc):
title = doc.tags.get(LINKTAG_TITLE, "").strip()
if doc.tags[FEEDTAG_LANG] == LANG_ENGLISH:
return th.removeNonAsciiChars(title)
else:
return title
def _getDocSummary(doc):
summary = doc.tags.get(LINKTAG_SUMMARYTEXT, "").strip()
<|code_end|>
. Write the next line using the current file imports:
import hashlib
import json
import queue
from operator import itemgetter
from threading import Thread
from .constants import *
from .docHelper import getDocComparisionScore,getDocEnglishContentSimilarity
from .docManager import DocManager
from . import textHelper as th
and context from other files:
# Path: newsApp/docHelper.py
# def getDocComparisionScore(jobInfo, doc1, doc2):
# score = 0
# if (doc1.tags[FEEDTAG_LANG] == LANG_ENGLISH) and \
# (doc2.tags[FEEDTAG_LANG] == LANG_ENGLISH):
# score = computeEnglishDocsSimScore(jobInfo, doc1, doc2)
# logger.info("Compared using shingles. %s", jobInfo)
# else:
# score = computeDocSimScoreUsingEntities(jobInfo, doc1, doc2)
# # make it slightly easier for non-english docs to get clustered.
# score = score * 1.15
# logger.info("Compared using entities. %s", jobInfo)
#
# if FEEDTAG_LOCALE in doc1.tags and FEEDTAG_LOCALE in doc2.tags and \
# doc1.tags[FEEDTAG_LOCALE] != doc2.tags[FEEDTAG_LOCALE]:
#
# logger.info(
# "The two docs are from different locations. Adding penalty. %s",
# jobInfo)
# score = score - 0.4
# if score < 0:
# score = 0
#
# if doc1.tags[TAG_PUBLISHER] != doc2.tags[TAG_PUBLISHER]:
# # make it slightly easier for different-publisher-docs to get clustered
# score = score * 1.1
#
# doc1Publisher = doc1.tags[TAG_PUBLISHER]
# doc2Publisher = doc2.tags[TAG_PUBLISHER]
# if doc1Publisher == doc2Publisher:
# penaltyPublishers = ['TOI', 'TelanganaToday', 'Hindu']
# if doc1Publisher in penaltyPublishers :
# logger.info(
# "Adding penalty for same publisher %s. %s",
# doc1Publisher,
# jobInfo)
# score = score*0.8
#
# if score > 1:
# score = 1
#
# logger.info("Comparision score: %s. %s", str(score), jobInfo)
# return score
#
# def getDocEnglishContentSimilarity(doc1, doc2):
# return th.compareUsingShingles(
# getDocEnglishContent(doc1),
# getDocEnglishContent(doc2))
#
# Path: newsApp/docManager.py
# class DocManager:
# """
# Manage documents stored in cloud.
#
# Contains functions for CRUD operations on documents
# """
#
# def __init__(self):
# """
# Instantiates a new instance of DocManager class
#
# 'bucketConnString' : connection string of s3 bucket in which docs
# are stored.
# """
#
# self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING']
# self.cache = getCache()
# self.__cacheExpiry= 900
#
# def __getBucket(self):
# bucketConnParams = parseConnectionString(self.bucketConnString)
# conn = getS3Connection(self.bucketConnString)
#
# return conn.get_bucket(bucketConnParams['bucketName'], validate=False)
#
# def __isDocNew(self, key, timeLimit):
# if _getEpochSecs(key.last_modified) < timeLimit:
# return False
#
# doc = self.get(key.name)
# return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \
# (FEEDTAG_DO_NOT_CLUSTER not in doc.tags)
#
# def put(self, doc):
# k = Key(self.__getBucket())
# k.key = doc.key
#
# # not storing tags directly in blob's metadata as the maximum size
# # allowed there is only 2kb.
# tags = dict(doc.tags)
# tags['content'] = doc.content
# keyContents = json.dumps(tags)
# k.set_contents_from_string(keyContents)
# self.cache.set(k.key, keyContents, self.__cacheExpiry)
#
# def get(self, docKey):
# keyContents = self.cache.get(docKey)
# if not keyContents:
# k = Key(self.__getBucket())
# k.key = docKey
# keyContents = k.get_contents_as_string()
# self.cache.set(docKey, keyContents, self.__cacheExpiry)
#
# storedTags = json.loads(keyContents)
# content = storedTags.pop('content', None)
# tags = storedTags
#
# return Doc(docKey, content, tags)
#
# def delete(self, docKey):
# k = Key(self.__getBucket())
# k.key = docKey
# k.delete()
# self.cache.delete(docKey)
, which may include functions, classes, or code. Output only the next line. | if doc.tags[FEEDTAG_LANG] == LANG_ENGLISH: |
Given the following code snippet before the placeholder: <|code_start|>
UNECESSARY_FEED_TAGS = [
FEEDTAG_TYPE,
FEEDTAG_NEXTPOLLTIME,
FEEDTAG_POLLFREQUENCY,
<|code_end|>
, predict the next line using imports from the current file:
import calendar
import json
import time
import logging
import feedparser
import requests
from .constants import *
from .feed import Feed
from .feedManager import FeedManager
from .webPageLoader import *
from . import htmlProcessor as hp
from .minerJobManager import MinerJobManager
from .link import Link
from .linkManager import LinkManager
from .workerJob import WorkerJob
and context including class names, function names, and sometimes code from other files:
# Path: newsApp/feed.py
# class Feed(DbItem):
# """
# Represents a web feed.
#
# Each feed consists of a unique identifier and a set of tags(key-value pairs).
# """
#
# Path: newsApp/feedManager.py
# class FeedManager(DbItemManagerV2):
# """
# Manage feeds stored on AWS dynamo db database.
#
# Contains functions for CRUD operations on the feeds stored
#
# Following environment variables need to be set -
# 'FEEDTAGSTABLE_CONNECTIONSTRING' : connection string of feed tags table.
# """
#
# def __init__(self):
# """
# Instantiates the feedManager.
# """
#
# DbItemManagerV2.__init__(self,
# os.environ['FEEDTAGSTABLE_CONNECTIONSTRING'])
#
# def put(self, feed):
# """
# Put a new feed.
# """
#
# # add polling info tags and put into database
# feed.tags[FEEDTAG_NEXTPOLLTIME] = int(time.time())
# if FEEDTAG_POLLFREQUENCY not in feed.tags:
# feed.tags[FEEDTAG_POLLFREQUENCY] = DEFAULT_FEED_POLLING_FREQUENCY
# DbItemManagerV2.put(self, feed)
#
# def getStaleFeeds(self):
# """
# Returns a list of feedIds of stale feeds (i.e whose next poll time
# is less than current time.
# """
#
# currentTime = int(time.time())
# scanResults = DbItemManagerV2.scan(self, nextPollTime__lte = currentTime)
# return (result.id for result in scanResults)
#
# def updateFeedOnSuccessfullPoll(self, feed):
# """
# Updates the polling related tags of specified feed and puts in db.
# """
#
# feed.tags[FEEDTAG_LASTPOLLTIME] = int(time.time())
# feed.tags[FEEDTAG_NEXTPOLLTIME] = (feed.tags[FEEDTAG_LASTPOLLTIME] +
# feed.tags[FEEDTAG_POLLFREQUENCY]*60)
# DbItemManagerV2.put(self, feed)
#
# Path: newsApp/minerJobManager.py
# class MinerJobManager(JobManager):
# """
# Helper class to enqueue and dequeue jobs to the miner job queue.
# """
#
# def __init__(self):
# JobManager.__init__(self, 'MINER_JOBSQUEUE_CONNECTIONSTRING')
#
# Path: newsApp/link.py
# class Link(DbItem):
# """
# Represents a link to a webPage.
#
# Each wlink consists of a unique identifier(the url)
# and a set of tags(key-value pairs).
# """
#
# def __init__(self, id, tags=None):
# """
# Instantiates a link object representing a link to a web page.
# """
#
# DbItem.__init__(self, id, tags)
#
# def checkExistence(self):
# try:
# _openUrlWithRetries(self.id)
# except:
# return False
#
# return True
#
# def getFinalRedirect(self):
# return getIdentifierUrl(self.id)
#
# def getHtmlDynamic(self):
# return loadPageAndGetHtml(self.id)
#
# def getHtmlStatic(self):
# return getHtmlStatic(self.id)
#
# Path: newsApp/linkManager.py
# class LinkManager(DbItemManagerV2):
# """
# Manage links stored on AWS dynamo db database.
#
# Contains functions for CRUD operations on the links stored
#
# Following environment variables need to be set -
# 'LINKTAGSTABLE_CONNECTIONSTRING' : connection string of link tags table.
# """
#
# def __init__(self):
# """
# Instantiates the linkManager.
# """
#
# DbItemManagerV2.__init__(self,
# os.environ['LINKTAGSTABLE_CONNECTIONSTRING'])
#
# def get(self, linkId):
# """
# Put a new link.
# """
#
# dbItem = DbItemManagerV2.get(self, linkId)
# link = Link(linkId, dbItem.tags)
#
# return link
#
# def getUnprocessedLinks(self):
# return DbItemManagerV2.query_2(
# self,
# isProcessed__eq = 'false',
# index = 'isProcessed-itemId-index')
#
# Path: newsApp/workerJob.py
# class WorkerJob:
# """
# Represents a job which can be processed by one of the worker roles.
# """
#
# def __init__(self, jobName, jobParams, jobId = None):
# """
# Instantiates a new worker job object.
# Requires 'jobName': a string representing name of the job
# Requires 'jobParams': a dictionary where keys represnt job parameter
# names, and corresponding values the job parameter values.
# Optional 'jobId': a identier for this job. If not provided a
# randomly generated alphanumeric string is used.
# """
#
# if (jobId is None):
# self.jobId = _generateRandomJobId()
# else :
# self.jobId = jobId
#
# self.jobName = jobName
# self.jobParams = jobParams
#
# def deserializeFromString(self, serializedJob):
# """
# Sets this worker job object to the specified serialized string
# representation.
# """
#
# tempDict = json.loads(serializedJob)
# jobName = tempDict.pop('jobName', None)
# jobId = tempDict.pop('jobId', None)
# self.__init__(jobName, tempDict, jobId)
#
# def serializeToString(self):
# """
# Serialize to a human-readable string representation of this object
# """
#
# tempDict = dict(self.jobParams)
# tempDict['jobName'] = self.jobName
# tempDict['jobId'] = self.jobId
# return json.dumps(tempDict)
. Output only the next line. | FEEDTAG_LASTPOLLTIME, |
Given the following code snippet before the placeholder: <|code_start|>
def _openUrlWithRetries(url, max_retries = 1):
nRetries = 0
while (True):
try:
response = requests.get(url, timeout = 20)
return response
<|code_end|>
, predict the next line using imports from the current file:
from .dbItem import DbItem
from .webPageLoader import loadPageAndGetHtml, getHtmlStatic
import time
import requests
and context including class names, function names, and sometimes code from other files:
# Path: newsApp/dbItem.py
# class DbItem:
# """
# Represents a generic item which can be added/updated/deleted/retrieved to a
# database.
#
# Each dbItem consists of a unique identifier and a set of tags(which are simple key-value pairs).
# """
#
# def __init__(self, id, tags=None):
# """
# Instantiates a new dbItem object.
#
# Requires a 'id' parameter which should be a simple representing a unique
# identifier for the dbItem.
#
# Optionally accepts a 'tags' parameter which should be a dictionary of
# key-value pairs. e.g. you can have a tag for the language of dbItem.
# """
#
# self.id = id
# self.tags = tags
#
# Path: newsApp/webPageLoader.py
# def loadPageAndGetHtml(url):
# pageHtml = ""
#
# try:
# with warnings.catch_warnings():
# #ignore PhantomJS deprecated warning.
# warnings.simplefilter("ignore")
#
# driver = webdriver.PhantomJS(executable_path='node_modules/phantomjs-prebuilt/bin/phantomjs')
# driver.implicitly_wait(30)
# driver.set_page_load_timeout(30)
#
# try:
# driver.get(url)
# pageHtml = driver.page_source
# finally:
# try:
# driver.service.process.send_signal(signal.SIGTERM)
# driver.quit()
# except:
# pass
# except:
# logger.info("Could not load page with url %s through selenium", url)
# pass
#
# if not pageHtml:
# pageHtml = getHtmlStatic(url)
#
# return pageHtml
#
# def getHtmlStatic(url, max_retries = 1):
# nRetries = 0
# headers = {'User-Agent': 'newsbot'}
# while (True):
# try:
# return requests.get(url, headers = headers, timeout = 20).text
# except Exception as e:
# if (nRetries >= max_retries):
# raise e
# else:
# time.sleep(5)
# nRetries = nRetries + 1
. Output only the next line. | except Exception as e: |
Predict the next line after this snippet: <|code_start|>
def _openUrlWithRetries(url, max_retries = 1):
nRetries = 0
while (True):
<|code_end|>
using the current file's imports:
from .dbItem import DbItem
from .webPageLoader import loadPageAndGetHtml, getHtmlStatic
import time
import requests
and any relevant context from other files:
# Path: newsApp/dbItem.py
# class DbItem:
# """
# Represents a generic item which can be added/updated/deleted/retrieved to a
# database.
#
# Each dbItem consists of a unique identifier and a set of tags(which are simple key-value pairs).
# """
#
# def __init__(self, id, tags=None):
# """
# Instantiates a new dbItem object.
#
# Requires a 'id' parameter which should be a simple representing a unique
# identifier for the dbItem.
#
# Optionally accepts a 'tags' parameter which should be a dictionary of
# key-value pairs. e.g. you can have a tag for the language of dbItem.
# """
#
# self.id = id
# self.tags = tags
#
# Path: newsApp/webPageLoader.py
# def loadPageAndGetHtml(url):
# pageHtml = ""
#
# try:
# with warnings.catch_warnings():
# #ignore PhantomJS deprecated warning.
# warnings.simplefilter("ignore")
#
# driver = webdriver.PhantomJS(executable_path='node_modules/phantomjs-prebuilt/bin/phantomjs')
# driver.implicitly_wait(30)
# driver.set_page_load_timeout(30)
#
# try:
# driver.get(url)
# pageHtml = driver.page_source
# finally:
# try:
# driver.service.process.send_signal(signal.SIGTERM)
# driver.quit()
# except:
# pass
# except:
# logger.info("Could not load page with url %s through selenium", url)
# pass
#
# if not pageHtml:
# pageHtml = getHtmlStatic(url)
#
# return pageHtml
#
# def getHtmlStatic(url, max_retries = 1):
# nRetries = 0
# headers = {'User-Agent': 'newsbot'}
# while (True):
# try:
# return requests.get(url, headers = headers, timeout = 20).text
# except Exception as e:
# if (nRetries >= max_retries):
# raise e
# else:
# time.sleep(5)
# nRetries = nRetries + 1
. Output only the next line. | try: |
Predict the next line for this snippet: <|code_start|>
def _openUrlWithRetries(url, max_retries = 1):
nRetries = 0
while (True):
try:
<|code_end|>
with the help of current file imports:
from .dbItem import DbItem
from .webPageLoader import loadPageAndGetHtml, getHtmlStatic
import time
import requests
and context from other files:
# Path: newsApp/dbItem.py
# class DbItem:
# """
# Represents a generic item which can be added/updated/deleted/retrieved to a
# database.
#
# Each dbItem consists of a unique identifier and a set of tags(which are simple key-value pairs).
# """
#
# def __init__(self, id, tags=None):
# """
# Instantiates a new dbItem object.
#
# Requires a 'id' parameter which should be a simple representing a unique
# identifier for the dbItem.
#
# Optionally accepts a 'tags' parameter which should be a dictionary of
# key-value pairs. e.g. you can have a tag for the language of dbItem.
# """
#
# self.id = id
# self.tags = tags
#
# Path: newsApp/webPageLoader.py
# def loadPageAndGetHtml(url):
# pageHtml = ""
#
# try:
# with warnings.catch_warnings():
# #ignore PhantomJS deprecated warning.
# warnings.simplefilter("ignore")
#
# driver = webdriver.PhantomJS(executable_path='node_modules/phantomjs-prebuilt/bin/phantomjs')
# driver.implicitly_wait(30)
# driver.set_page_load_timeout(30)
#
# try:
# driver.get(url)
# pageHtml = driver.page_source
# finally:
# try:
# driver.service.process.send_signal(signal.SIGTERM)
# driver.quit()
# except:
# pass
# except:
# logger.info("Could not load page with url %s through selenium", url)
# pass
#
# if not pageHtml:
# pageHtml = getHtmlStatic(url)
#
# return pageHtml
#
# def getHtmlStatic(url, max_retries = 1):
# nRetries = 0
# headers = {'User-Agent': 'newsbot'}
# while (True):
# try:
# return requests.get(url, headers = headers, timeout = 20).text
# except Exception as e:
# if (nRetries >= max_retries):
# raise e
# else:
# time.sleep(5)
# nRetries = nRetries + 1
, which may contain function names, class names, or code. Output only the next line. | response = requests.get(url, timeout = 20) |
Given the code snippet: <|code_start|>
InitLogging()
def clusterDocsJob():
clusterDocs("clusterDocs")
if __name__ == '__main__':
<|code_end|>
, generate the next line using the imports in this file:
from .constants import *
from .loggingHelper import *
from .clusteringJobs import clusterDocs
and context (functions, classes, or occasionally code) from other files:
# Path: newsApp/clusteringJobs.py
# def clusterDocs(jobId):
# jobInfo = "Job id: " + jobId
# logger.info("Started clustering docs. %s.", jobInfo)
#
# distanceTableManager = DistanceTableManager()
# clusterManager = ClusterManager()
#
# distances = distanceTableManager.getDistanceMatrix()
# logger.info("Got the distance matrix. %s.", jobInfo)
#
# clusters = list(clusterManager.getCurrentClusters())
# logger.info("Got the clusters. %s.", jobInfo)
#
# logger.info("Started clustering. %s.", jobInfo)
# clusterHierarchical(jobInfo, clusters, distances)
# logger.info("Finished clustering. %s.", jobInfo)
#
# clusterManager.putCurrentClusters(clusters)
# logger.info("Put the computed clusters. %s.", jobInfo)
. Output only the next line. | clusterDocsJob() |
Predict the next line for this snippet: <|code_start|>
class DocManagerTests(unittest.TestCase):
def testPutGetDeleteDoc(self):
testDocManager = DocManager()
testDoc = Doc(
<|code_end|>
with the help of current file imports:
import unittest
from newsApp.doc import Doc
from newsApp.docManager import DocManager
and context from other files:
# Path: newsApp/doc.py
# class Doc:
# """
# Represents a document.
#
# Each doc consists of an auto-generated id, original, processed content
# and some tags(which are key-value pairs to help in clustering).
# """
#
# def __init__(self, key, content, tags):
# """
# Instantiates a new document object.
#
# """
#
# self.key = key.upper();
# self.content = content;
# self.tags = tags;
#
# Path: newsApp/docManager.py
# class DocManager:
# """
# Manage documents stored in cloud.
#
# Contains functions for CRUD operations on documents
# """
#
# def __init__(self):
# """
# Instantiates a new instance of DocManager class
#
# 'bucketConnString' : connection string of s3 bucket in which docs
# are stored.
# """
#
# self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING']
# self.cache = getCache()
# self.__cacheExpiry= 900
#
# def __getBucket(self):
# bucketConnParams = parseConnectionString(self.bucketConnString)
# conn = getS3Connection(self.bucketConnString)
#
# return conn.get_bucket(bucketConnParams['bucketName'], validate=False)
#
# def __isDocNew(self, key, timeLimit):
# if _getEpochSecs(key.last_modified) < timeLimit:
# return False
#
# doc = self.get(key.name)
# return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \
# (FEEDTAG_DO_NOT_CLUSTER not in doc.tags)
#
# def put(self, doc):
# k = Key(self.__getBucket())
# k.key = doc.key
#
# # not storing tags directly in blob's metadata as the maximum size
# # allowed there is only 2kb.
# tags = dict(doc.tags)
# tags['content'] = doc.content
# keyContents = json.dumps(tags)
# k.set_contents_from_string(keyContents)
# self.cache.set(k.key, keyContents, self.__cacheExpiry)
#
# def get(self, docKey):
# keyContents = self.cache.get(docKey)
# if not keyContents:
# k = Key(self.__getBucket())
# k.key = docKey
# keyContents = k.get_contents_as_string()
# self.cache.set(docKey, keyContents, self.__cacheExpiry)
#
# storedTags = json.loads(keyContents)
# content = storedTags.pop('content', None)
# tags = storedTags
#
# return Doc(docKey, content, tags)
#
# def delete(self, docKey):
# k = Key(self.__getBucket())
# k.key = docKey
# k.delete()
# self.cache.delete(docKey)
, which may contain function names, class names, or code. Output only the next line. | 'unittest', |
Continue the code snippet: <|code_start|>
class DocManagerTests(unittest.TestCase):
def testPutGetDeleteDoc(self):
testDocManager = DocManager()
testDoc = Doc(
'unittest',
'docContent',
{ 'tag1' : 'value1', 'tag2' : 'value2' })
# put the doc
testDocManager.put(testDoc)
# get the doc and validate it's same as one you put
retrievedDoc = testDocManager.get(testDoc.key)
self.assertTrue(retrievedDoc.key == testDoc.key)
<|code_end|>
. Use current file imports:
import unittest
from newsApp.doc import Doc
from newsApp.docManager import DocManager
and context (classes, functions, or code) from other files:
# Path: newsApp/doc.py
# class Doc:
# """
# Represents a document.
#
# Each doc consists of an auto-generated id, original, processed content
# and some tags(which are key-value pairs to help in clustering).
# """
#
# def __init__(self, key, content, tags):
# """
# Instantiates a new document object.
#
# """
#
# self.key = key.upper();
# self.content = content;
# self.tags = tags;
#
# Path: newsApp/docManager.py
# class DocManager:
# """
# Manage documents stored in cloud.
#
# Contains functions for CRUD operations on documents
# """
#
# def __init__(self):
# """
# Instantiates a new instance of DocManager class
#
# 'bucketConnString' : connection string of s3 bucket in which docs
# are stored.
# """
#
# self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING']
# self.cache = getCache()
# self.__cacheExpiry= 900
#
# def __getBucket(self):
# bucketConnParams = parseConnectionString(self.bucketConnString)
# conn = getS3Connection(self.bucketConnString)
#
# return conn.get_bucket(bucketConnParams['bucketName'], validate=False)
#
# def __isDocNew(self, key, timeLimit):
# if _getEpochSecs(key.last_modified) < timeLimit:
# return False
#
# doc = self.get(key.name)
# return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \
# (FEEDTAG_DO_NOT_CLUSTER not in doc.tags)
#
# def put(self, doc):
# k = Key(self.__getBucket())
# k.key = doc.key
#
# # not storing tags directly in blob's metadata as the maximum size
# # allowed there is only 2kb.
# tags = dict(doc.tags)
# tags['content'] = doc.content
# keyContents = json.dumps(tags)
# k.set_contents_from_string(keyContents)
# self.cache.set(k.key, keyContents, self.__cacheExpiry)
#
# def get(self, docKey):
# keyContents = self.cache.get(docKey)
# if not keyContents:
# k = Key(self.__getBucket())
# k.key = docKey
# keyContents = k.get_contents_as_string()
# self.cache.set(docKey, keyContents, self.__cacheExpiry)
#
# storedTags = json.loads(keyContents)
# content = storedTags.pop('content', None)
# tags = storedTags
#
# return Doc(docKey, content, tags)
#
# def delete(self, docKey):
# k = Key(self.__getBucket())
# k.key = docKey
# k.delete()
# self.cache.delete(docKey)
. Output only the next line. | self.assertTrue(retrievedDoc.content == testDoc.content) |
Given the following code snippet before the placeholder: <|code_start|>
class FeedManagerTests(unittest.TestCase):
def testPutGetDelete(self):
testFeedManager = FeedManager()
testFeed = Feed(
'testFeedName',
{ 'tag1' : 'value1', 'tag2' : 'value2' })
# put the feed
<|code_end|>
, predict the next line using imports from the current file:
from newsApp.constants import *
from newsApp.feedManager import FeedManager
from newsApp.feed import Feed
import time
import unittest
and context including class names, function names, and sometimes code from other files:
# Path: newsApp/feedManager.py
# class FeedManager(DbItemManagerV2):
# """
# Manage feeds stored on AWS dynamo db database.
#
# Contains functions for CRUD operations on the feeds stored
#
# Following environment variables need to be set -
# 'FEEDTAGSTABLE_CONNECTIONSTRING' : connection string of feed tags table.
# """
#
# def __init__(self):
# """
# Instantiates the feedManager.
# """
#
# DbItemManagerV2.__init__(self,
# os.environ['FEEDTAGSTABLE_CONNECTIONSTRING'])
#
# def put(self, feed):
# """
# Put a new feed.
# """
#
# # add polling info tags and put into database
# feed.tags[FEEDTAG_NEXTPOLLTIME] = int(time.time())
# if FEEDTAG_POLLFREQUENCY not in feed.tags:
# feed.tags[FEEDTAG_POLLFREQUENCY] = DEFAULT_FEED_POLLING_FREQUENCY
# DbItemManagerV2.put(self, feed)
#
# def getStaleFeeds(self):
# """
# Returns a list of feedIds of stale feeds (i.e whose next poll time
# is less than current time.
# """
#
# currentTime = int(time.time())
# scanResults = DbItemManagerV2.scan(self, nextPollTime__lte = currentTime)
# return (result.id for result in scanResults)
#
# def updateFeedOnSuccessfullPoll(self, feed):
# """
# Updates the polling related tags of specified feed and puts in db.
# """
#
# feed.tags[FEEDTAG_LASTPOLLTIME] = int(time.time())
# feed.tags[FEEDTAG_NEXTPOLLTIME] = (feed.tags[FEEDTAG_LASTPOLLTIME] +
# feed.tags[FEEDTAG_POLLFREQUENCY]*60)
# DbItemManagerV2.put(self, feed)
#
# Path: newsApp/feed.py
# class Feed(DbItem):
# """
# Represents a web feed.
#
# Each feed consists of a unique identifier and a set of tags(key-value pairs).
# """
. Output only the next line. | testFeedManager.put(testFeed) |
Based on the snippet: <|code_start|>### See PIN-based authorization for details at
### https://dev.twitter.com/docs/auth/pin-based-authorization
consumer_key= input('consumerKey: ').strip()
consumer_secret= input('consumerSecret: ').strip()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# get access token from the user and redirect to auth URL
auth_url = auth.get_authorization_url()
<|code_end|>
, predict the immediate next line with the help of imports:
import tweepy
from .notifierTwitter import NotifierTwitter
and context (classes, functions, sometimes code) from other files:
# Path: newsApp/notifierTwitter.py
# class NotifierTwitter(NotifierBase):
# def __init__(self):
# NotifierBase.__init__(self)
# self.tableConnString = os.environ['TWITTERHANDLESTABLE_CONNECTIONSTRING']
# self.encryptionKey = os.environ['TWITTERHANDLESTABLE_KEY']
# self.__table = None
#
# def __getTable(self):
# if not self.__table:
# self.__table = getDbTable(self.tableConnString)
#
# return self.__table
#
# def __getKeys(self, handle):
# table = self.__getTable()
# dbRows = list(table.query_2(handle__eq = handle))
#
# if not dbRows:
# return None
#
# dbRow = dbRows[0]
# return {
# 'handle': dbRow['handle'],
# 'consumerKey': decryptSecret(dbRow['consumerKey'], self.encryptionKey),
# 'consumerSecret': decryptSecret(dbRow['consumerSecret'], self.encryptionKey),
# 'token': decryptSecret(dbRow['token'], self.encryptionKey),
# 'tokenSecret': decryptSecret(dbRow['tokenSecret'], self.encryptionKey)
# }
#
# def __getTwitterApi(self, jobId, handle):
# keys = self.__getKeys(handle)
# logging.info("Got secrets for twitter handle %s. Job id: %s", handle, jobId)
#
# auth = tweepy.OAuthHandler(keys['consumerKey'], keys['consumerSecret'])
# auth.set_access_token(keys['token'], keys['tokenSecret'])
# return tweepy.API(auth)
#
# def addHandle(self, handle, consumerKey, consumerSecret, token, tokenSecret):
# table = self.__getTable()
# table.put_item(data={
# 'handle': handle,
# 'consumerKey': encryptSecret(consumerKey, self.encryptionKey),
# 'consumerSecret': encryptSecret(consumerSecret, self.encryptionKey),
# 'token': encryptSecret(token, self.encryptionKey),
# 'tokenSecret': encryptSecret(tokenSecret, self.encryptionKey)
# })
#
# def getNotificationText(self, cluster):
# storyUrl = "https://" + self.domainName + "/story/" + cluster.articles[0]['id']
# tweetText = ""
# linkLength = 23 #t.co length
# tweetLength = linkLength
#
# for article in cluster.articles:
# # don't tweet old articles in cluster
# if article['publishedOn'] > (int(time.time()) - 18 * 60 * 60):
# articleTitle = article['title']
# articleLink = article['link']
# articleText = articleTitle + " (via: " + articleLink + ")\n\n"
# # "{} (via: {})\n".format(articleTitle, articleLink)
# articleTextLength = len(articleText) - (len(articleLink) - linkLength)
# if (tweetLength + articleTextLength) < 240:
# tweetText = tweetText + articleText
# tweetLength = tweetLength + articleTextLength
# else:
# break
#
# tweetText = tweetText + storyUrl
# return tweetText
#
# def doesLocaleExist(self, locale):
# if not self.__getKeys(locale):
# return False
# else:
# return True
#
# def notifyForLocales(self, jobId, cluster):
# jobLog = "Job id: " + jobId
#
# tweetText = self.getNotificationText(cluster)
# logging.info("Going to tweet'%s'. %s", tweetText, jobLog)
#
# for locale in cluster.locales:
# if self.doesLocaleExist(locale):
# api = self.__getTwitterApi(jobId, locale)
# logging.info("Got the twitter api interface for %s. %s", locale, jobLog)
#
# api.update_status(tweetText)
# logging.info("Posted the tweet successfully. Job id: %s", jobLog)
. Output only the next line. | print('Authorization URL: ' + auth_url) |
Based on the snippet: <|code_start|>
class JobManagerTests(unittest.TestCase):
def __compareJobs(self, job1, job2):
self.assertTrue(job1.jobId, job2.jobId)
self.assertTrue(job1.jobName, job2.jobName)
self.assertTrue(job1.jobParams, job2.jobParams)
def testEnqueueDequeueJobs(self):
testJob1 = WorkerJob(
'jobType1',
{ 'param1Name' : 'dummy1', 'param2Name' : 'dummy2' })
testJob2 = WorkerJob(
'jobType2',
{ 'param1Name' : 'dummy3', 'param2Name' : 'dummy4', 'param3Name' : 'dummy5' })
testJob3 = WorkerJob(
'jobType3',
{ 'param1Name' : 'dummy6' })
# enqueue job1 and retrieve using dequeueJob
testJobManager = JobManager('TEST_JOBSQUEUE_CONNECTIONSTRING')
testJobManager.enqueueJob(testJob1)
time.sleep(1)
retrievedJob1 = testJobManager.dequeueJob()
self.__compareJobs(testJob1, retrievedJob1)
# enqueue rest of jobs
testJobManager.enqueueJob(testJob2)
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
import time
from newsApp.workerJob import WorkerJob
from newsApp.jobManager import JobManager
and context (classes, functions, sometimes code) from other files:
# Path: newsApp/workerJob.py
# class WorkerJob:
# """
# Represents a job which can be processed by one of the worker roles.
# """
#
# def __init__(self, jobName, jobParams, jobId = None):
# """
# Instantiates a new worker job object.
# Requires 'jobName': a string representing name of the job
# Requires 'jobParams': a dictionary where keys represnt job parameter
# names, and corresponding values the job parameter values.
# Optional 'jobId': a identier for this job. If not provided a
# randomly generated alphanumeric string is used.
# """
#
# if (jobId is None):
# self.jobId = _generateRandomJobId()
# else :
# self.jobId = jobId
#
# self.jobName = jobName
# self.jobParams = jobParams
#
# def deserializeFromString(self, serializedJob):
# """
# Sets this worker job object to the specified serialized string
# representation.
# """
#
# tempDict = json.loads(serializedJob)
# jobName = tempDict.pop('jobName', None)
# jobId = tempDict.pop('jobId', None)
# self.__init__(jobName, tempDict, jobId)
#
# def serializeToString(self):
# """
# Serialize to a human-readable string representation of this object
# """
#
# tempDict = dict(self.jobParams)
# tempDict['jobName'] = self.jobName
# tempDict['jobId'] = self.jobId
# return json.dumps(tempDict)
#
# Path: newsApp/jobManager.py
# class JobManager:
# """
# Helper class to enqueue and dequeue jobs to the job queue.
# """
#
# def __init__(self, connectionStringKey):
# """
# Instantiates the job manager.
#
# 'connectionStringKey' : name of environment variable containing the
# connection string to use.
# """
#
# self.queue = getQueue(os.environ[connectionStringKey])
#
# def enqueueJob(self, job):
# """
# Enqueue the job into the jobs queue.
# """
#
# enqueueMessage(self.queue, job.serializeToString())
#
# def convertDequeuedMessageToJob(self, dequeuedMessage):
# if dequeuedMessage is None:
# return None
# dequeuedJob = WorkerJob(None, None)
# dequeuedJob.deserializeFromString(dequeuedMessage)
# return dequeuedJob
#
# def dequeueJob(self):
# """
# Dequeue a job from the job queue.
# """
#
# dequeuedMessage = dequeueMessage(self.queue)
# return self.convertDequeuedMessageToJob(dequeuedMessage)
#
# def count(self):
# """
# Return the count of messages in queue.
# """
#
# return self.queue.count()
. Output only the next line. | testJobManager.enqueueJob(testJob3) |
Predict the next line after this snippet: <|code_start|>
class JobManagerTests(unittest.TestCase):
def __compareJobs(self, job1, job2):
self.assertTrue(job1.jobId, job2.jobId)
self.assertTrue(job1.jobName, job2.jobName)
self.assertTrue(job1.jobParams, job2.jobParams)
<|code_end|>
using the current file's imports:
import unittest
import time
from newsApp.workerJob import WorkerJob
from newsApp.jobManager import JobManager
and any relevant context from other files:
# Path: newsApp/workerJob.py
# class WorkerJob:
# """
# Represents a job which can be processed by one of the worker roles.
# """
#
# def __init__(self, jobName, jobParams, jobId = None):
# """
# Instantiates a new worker job object.
# Requires 'jobName': a string representing name of the job
# Requires 'jobParams': a dictionary where keys represnt job parameter
# names, and corresponding values the job parameter values.
# Optional 'jobId': a identier for this job. If not provided a
# randomly generated alphanumeric string is used.
# """
#
# if (jobId is None):
# self.jobId = _generateRandomJobId()
# else :
# self.jobId = jobId
#
# self.jobName = jobName
# self.jobParams = jobParams
#
# def deserializeFromString(self, serializedJob):
# """
# Sets this worker job object to the specified serialized string
# representation.
# """
#
# tempDict = json.loads(serializedJob)
# jobName = tempDict.pop('jobName', None)
# jobId = tempDict.pop('jobId', None)
# self.__init__(jobName, tempDict, jobId)
#
# def serializeToString(self):
# """
# Serialize to a human-readable string representation of this object
# """
#
# tempDict = dict(self.jobParams)
# tempDict['jobName'] = self.jobName
# tempDict['jobId'] = self.jobId
# return json.dumps(tempDict)
#
# Path: newsApp/jobManager.py
# class JobManager:
# """
# Helper class to enqueue and dequeue jobs to the job queue.
# """
#
# def __init__(self, connectionStringKey):
# """
# Instantiates the job manager.
#
# 'connectionStringKey' : name of environment variable containing the
# connection string to use.
# """
#
# self.queue = getQueue(os.environ[connectionStringKey])
#
# def enqueueJob(self, job):
# """
# Enqueue the job into the jobs queue.
# """
#
# enqueueMessage(self.queue, job.serializeToString())
#
# def convertDequeuedMessageToJob(self, dequeuedMessage):
# if dequeuedMessage is None:
# return None
# dequeuedJob = WorkerJob(None, None)
# dequeuedJob.deserializeFromString(dequeuedMessage)
# return dequeuedJob
#
# def dequeueJob(self):
# """
# Dequeue a job from the job queue.
# """
#
# dequeuedMessage = dequeueMessage(self.queue)
# return self.convertDequeuedMessageToJob(dequeuedMessage)
#
# def count(self):
# """
# Return the count of messages in queue.
# """
#
# return self.queue.count()
. Output only the next line. | def testEnqueueDequeueJobs(self): |
Predict the next line after this snippet: <|code_start|>
f = path.join(d, "airports.dat")
if (not path.isfile(f)):
print("Please download airports.dat from "
"http://openflights.org/data.html into examples folder")
sys.exit(1)
with open(f, "r") as f:
A = [l.replace("\"", "").split(",") for l in f]
I = [i for s in sys.argv[1:] for i,e in enumerate(A)
if e[4].lower() == s.lower()]
if (len(I) < 2):
print("Please specify 2 or more 3-letter IATA airport codes "
"on the command-line")
sys.exit(2)
for i in range(0, len(I)-1):
dis,inb,fib = E.earth_gcdist(
float(A[I[i]][7])*C.Conversion.DEG_TO_RAD,
float(A[I[i]][6])*C.Conversion.DEG_TO_RAD,
float(A[I[i+1]][7])*C.Conversion.DEG_TO_RAD,
float(A[I[i+1]][6])*C.Conversion.DEG_TO_RAD)
print("%3s (%-30s) %11s, %10s ->" % (
A[I[i]][4], A[I[i]][1], A[I[i]][7], A[I[i]][6]))
print("%3s (%-30s) %11s, %10s\n" % (
A[I[i+1]][4], A[I[i+1]][1], A[I[i+1]][7], A[I[i+1]][6]))
print("%14s, %9s, %s" % (
"GC distance", "Initial", "Final bearing"))
<|code_end|>
using the current file's imports:
import sys
from os import path
from pykepler import constants as C
from pykepler import earth_figure as E
and any relevant context from other files:
# Path: pykepler/constants.py
# class SolarSystemPlanets:
# class ReturnValues:
# class Constants:
# class Conversion:
# class DegMinSec:
# MERCURY, \
# VENUS, \
# EARTH, \
# MARS, \
# JUPITER, \
# SATURN, \
# URANUS, \
# NEPTUNE = range(8)
# SUCCESS = 0
# ERR_INVALID_PLANET = -1
# ERR_INVALID_DATE = -2
# ERR_INVALID_ECCENTRICITY = -3
# ERR_CONVERGENCE = -4
# ERR_INVALID_DATA = -5
# PI = 3.141592653589793238462643
# TWO_PI = 2.0*PI
# J2000_EPOCH = 2451545.0
# MJD_EPOCH = 2400000.5
# GAUSS_GRAV_CONSTANT = 0.01720209895
# AU = 149597870.691
# DEG_TO_RAD = Constants.PI/180.0
# ACS_TO_RAD = DEG_TO_RAD/3600.0
# MAS_TO_RAD = ACS_TO_RAD/1000.0
# UAS_TO_RAD = MAS_TO_RAD/1000.0
# RAD_TO_DEG = 180.0/Constants.PI
# RAD_TO_ACS = 1.0/ACS_TO_RAD
# HRS_TO_DEG = 15.0
# DEG_TO_HRS = 1.0/HRS_TO_DEG
# RAD_TO_HRS = RAD_TO_DEG*DEG_TO_HRS
# HRS_TO_RAD = 1.0/RAD_TO_HRS
# def __init__(self, d = 0.0):
# def __repr__(self):
# def degrees(d, m, s):
#
# Path: pykepler/earth_figure.py
# def earth_figure_values(geog_latitude, height_msl):
# def earth_gcdist(lon1, lat1, lon2, lat2):
# def earth_gcend(lon1, lat1, inb, dist):
. Output only the next line. | print(u"%11.5f km, %8.4f\xB0, %8.4f\xB0\n" % ( |
Given the following code snippet before the placeholder: <|code_start|># flightseg.py - Display great circle flight segment details
# Copyright (C) 2017 Shiva Iyer <shiva.iyer AT g m a i l DOT c o m>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
d = path.dirname(path.abspath(__file__))
sys.path.append(path.dirname(d))
f = path.join(d, "airports.dat")
if (not path.isfile(f)):
print("Please download airports.dat from "
"http://openflights.org/data.html into examples folder")
<|code_end|>
, predict the next line using imports from the current file:
import sys
from os import path
from pykepler import constants as C
from pykepler import earth_figure as E
and context including class names, function names, and sometimes code from other files:
# Path: pykepler/constants.py
# class SolarSystemPlanets:
# class ReturnValues:
# class Constants:
# class Conversion:
# class DegMinSec:
# MERCURY, \
# VENUS, \
# EARTH, \
# MARS, \
# JUPITER, \
# SATURN, \
# URANUS, \
# NEPTUNE = range(8)
# SUCCESS = 0
# ERR_INVALID_PLANET = -1
# ERR_INVALID_DATE = -2
# ERR_INVALID_ECCENTRICITY = -3
# ERR_CONVERGENCE = -4
# ERR_INVALID_DATA = -5
# PI = 3.141592653589793238462643
# TWO_PI = 2.0*PI
# J2000_EPOCH = 2451545.0
# MJD_EPOCH = 2400000.5
# GAUSS_GRAV_CONSTANT = 0.01720209895
# AU = 149597870.691
# DEG_TO_RAD = Constants.PI/180.0
# ACS_TO_RAD = DEG_TO_RAD/3600.0
# MAS_TO_RAD = ACS_TO_RAD/1000.0
# UAS_TO_RAD = MAS_TO_RAD/1000.0
# RAD_TO_DEG = 180.0/Constants.PI
# RAD_TO_ACS = 1.0/ACS_TO_RAD
# HRS_TO_DEG = 15.0
# DEG_TO_HRS = 1.0/HRS_TO_DEG
# RAD_TO_HRS = RAD_TO_DEG*DEG_TO_HRS
# HRS_TO_RAD = 1.0/RAD_TO_HRS
# def __init__(self, d = 0.0):
# def __repr__(self):
# def degrees(d, m, s):
#
# Path: pykepler/earth_figure.py
# def earth_figure_values(geog_latitude, height_msl):
# def earth_gcdist(lon1, lat1, lon2, lat2):
# def earth_gcend(lon1, lat1, inb, dist):
. Output only the next line. | sys.exit(1) |
Here is a snippet: <|code_start|>
class AbstractUploadSaveHandler(with_metaclass(ABCMeta, object)):
def __init__(self, upload):
self.upload = upload
@abstractmethod
def handle_save(self):
pass
def run(self):
# Trigger state change
self.upload.start_saving()
self.upload.save()
# Initialize saving
self.handle_save()
def finish(self):
# Trigger signal
signals.saved.send(sender=self.__class__, instance=self)
# Finish
self.upload.finish()
self.upload.save()
class DefaultSaveHandler(AbstractUploadSaveHandler):
<|code_end|>
. Write the next line using the current file imports:
from abc import ABCMeta, abstractmethod
from django.core.files import File
from six import with_metaclass
from django.utils.module_loading import import_string
from rest_framework_tus import signals
from .settings import TUS_SAVE_HANDLER_CLASS
and context from other files:
# Path: rest_framework_tus/signals.py
#
# Path: rest_framework_tus/settings.py
# TUS_SAVE_HANDLER_CLASS = \
# REST_FRAMEWORK_TUS.get('SAVE_HANDLER_CLASS', 'rest_framework_tus.storage.DefaultSaveHandler')
, which may include functions, classes, or code. Output only the next line. | destination_file_field = 'uploaded_file' |
Continue the code snippet: <|code_start|>class AbstractUploadSaveHandler(with_metaclass(ABCMeta, object)):
def __init__(self, upload):
self.upload = upload
@abstractmethod
def handle_save(self):
pass
def run(self):
# Trigger state change
self.upload.start_saving()
self.upload.save()
# Initialize saving
self.handle_save()
def finish(self):
# Trigger signal
signals.saved.send(sender=self.__class__, instance=self)
# Finish
self.upload.finish()
self.upload.save()
class DefaultSaveHandler(AbstractUploadSaveHandler):
destination_file_field = 'uploaded_file'
def handle_save(self):
# Save temporary field to file field
<|code_end|>
. Use current file imports:
from abc import ABCMeta, abstractmethod
from django.core.files import File
from six import with_metaclass
from django.utils.module_loading import import_string
from rest_framework_tus import signals
from .settings import TUS_SAVE_HANDLER_CLASS
and context (classes, functions, or code) from other files:
# Path: rest_framework_tus/signals.py
#
# Path: rest_framework_tus/settings.py
# TUS_SAVE_HANDLER_CLASS = \
# REST_FRAMEWORK_TUS.get('SAVE_HANDLER_CLASS', 'rest_framework_tus.storage.DefaultSaveHandler')
. Output only the next line. | file_field = getattr(self.upload, self.destination_file_field) |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
class UtilsTest(TestCase):
def test_encode_64(self):
data = b'filename123.jpg'
# Encode
result = encode_base64_to_string(data)
# Decode
initial = decode_base64(result.encode('utf-8'))
<|code_end|>
using the current file's imports:
from unittest.case import TestCase
from rest_framework_tus.utils import encode_upload_metadata, encode_base64_to_string
from rest_framework_tus.compat import decode_base64
and any relevant context from other files:
# Path: rest_framework_tus/utils.py
# def encode_upload_metadata(upload_metadata):
# """
# Encodes upload metadata according to the TUS 1.0.0 spec (http://tus.io/protocols/resumable-upload.html#creation)
#
# :param dict upload_metadata:
# :return str:
# """
# # Prepare encoded data
# encoded_data = [(key, encode_base64_to_string(value))
# for (key, value) in sorted(upload_metadata.items(), key=lambda item: item[0])]
#
# # Encode into string
# return ','.join([' '.join([key, encoded_value]) for key, encoded_value in encoded_data])
#
# def encode_base64_to_string(data):
# """
# Helper to encode a string or bytes value to a base64 string as bytes
#
# :param six.text_types data:
# :return six.binary_type:
# """
#
# if not isinstance(data, six.binary_type):
# if isinstance(data, six.text_type):
# data = data.encode('utf-8')
# else:
# data = six.text_type(data).encode('utf-8')
#
# return encode_base64(data).decode('ascii').rstrip('\n')
#
# Path: rest_framework_tus/compat.py
. Output only the next line. | assert initial == data |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
class UtilsTest(TestCase):
def test_encode_64(self):
<|code_end|>
. Write the next line using the current file imports:
from unittest.case import TestCase
from rest_framework_tus.utils import encode_upload_metadata, encode_base64_to_string
from rest_framework_tus.compat import decode_base64
and context from other files:
# Path: rest_framework_tus/utils.py
# def encode_upload_metadata(upload_metadata):
# """
# Encodes upload metadata according to the TUS 1.0.0 spec (http://tus.io/protocols/resumable-upload.html#creation)
#
# :param dict upload_metadata:
# :return str:
# """
# # Prepare encoded data
# encoded_data = [(key, encode_base64_to_string(value))
# for (key, value) in sorted(upload_metadata.items(), key=lambda item: item[0])]
#
# # Encode into string
# return ','.join([' '.join([key, encoded_value]) for key, encoded_value in encoded_data])
#
# def encode_base64_to_string(data):
# """
# Helper to encode a string or bytes value to a base64 string as bytes
#
# :param six.text_types data:
# :return six.binary_type:
# """
#
# if not isinstance(data, six.binary_type):
# if isinstance(data, six.text_type):
# data = data.encode('utf-8')
# else:
# data = six.text_type(data).encode('utf-8')
#
# return encode_base64(data).decode('ascii').rstrip('\n')
#
# Path: rest_framework_tus/compat.py
, which may include functions, classes, or code. Output only the next line. | data = b'filename123.jpg' |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
logger = logging.getLogger(__name__)
@receiver(received, sender=get_upload_model())
def on_receiving_done(sender, instance, **kwargs):
logger.debug('on_receiving_done: {}'.format(instance))
save_handler = get_save_handler()
save_handler(upload=instance).run()
@receiver(saved, sender=get_upload_model())
def on_saving_done(sender, instance, **kwargs):
logger.debug('on_saving_done: {}'.format(instance))
@receiver(finished, sender=get_upload_model())
def on_finished(sender, instance, **kwargs):
<|code_end|>
, determine the next line of code. You have imports:
import logging
from django.dispatch import receiver
from rest_framework_tus.models import get_upload_model
from rest_framework_tus.signals import received, saved, finished
from rest_framework_tus.storage import get_save_handler
and context (class names, function names, or code) available:
# Path: rest_framework_tus/models.py
# def get_upload_model():
# """
# Returns the User model that is active in this project.
# """
# from django.apps import apps as django_apps
# from .settings import TUS_UPLOAD_MODEL
# try:
# return django_apps.get_model(TUS_UPLOAD_MODEL)
# except ValueError:
# raise ImproperlyConfigured('UPLOAD_MODEL must be of the form \'app_label.model_name\'')
# except LookupError:
# raise ImproperlyConfigured('UPLOAD_MODEL refers to model \'%s\' that has not been installed' % TUS_UPLOAD_MODEL)
#
# Path: rest_framework_tus/signals.py
#
# Path: rest_framework_tus/storage.py
# def get_save_handler(import_path=None):
# return import_string(import_path or TUS_SAVE_HANDLER_CLASS)
. Output only the next line. | logger.debug('on_finished: {}'.format(instance)) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
logger = logging.getLogger(__name__)
@receiver(received, sender=get_upload_model())
def on_receiving_done(sender, instance, **kwargs):
logger.debug('on_receiving_done: {}'.format(instance))
save_handler = get_save_handler()
save_handler(upload=instance).run()
@receiver(saved, sender=get_upload_model())
<|code_end|>
, generate the next line using the imports in this file:
import logging
from django.dispatch import receiver
from rest_framework_tus.models import get_upload_model
from rest_framework_tus.signals import received, saved, finished
from rest_framework_tus.storage import get_save_handler
and context (functions, classes, or occasionally code) from other files:
# Path: rest_framework_tus/models.py
# def get_upload_model():
# """
# Returns the User model that is active in this project.
# """
# from django.apps import apps as django_apps
# from .settings import TUS_UPLOAD_MODEL
# try:
# return django_apps.get_model(TUS_UPLOAD_MODEL)
# except ValueError:
# raise ImproperlyConfigured('UPLOAD_MODEL must be of the form \'app_label.model_name\'')
# except LookupError:
# raise ImproperlyConfigured('UPLOAD_MODEL refers to model \'%s\' that has not been installed' % TUS_UPLOAD_MODEL)
#
# Path: rest_framework_tus/signals.py
#
# Path: rest_framework_tus/storage.py
# def get_save_handler(import_path=None):
# return import_string(import_path or TUS_SAVE_HANDLER_CLASS)
. Output only the next line. | def on_saving_done(sender, instance, **kwargs): |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
logger = logging.getLogger(__name__)
@receiver(received, sender=get_upload_model())
def on_receiving_done(sender, instance, **kwargs):
logger.debug('on_receiving_done: {}'.format(instance))
save_handler = get_save_handler()
<|code_end|>
, generate the next line using the imports in this file:
import logging
from django.dispatch import receiver
from rest_framework_tus.models import get_upload_model
from rest_framework_tus.signals import received, saved, finished
from rest_framework_tus.storage import get_save_handler
and context (functions, classes, or occasionally code) from other files:
# Path: rest_framework_tus/models.py
# def get_upload_model():
# """
# Returns the User model that is active in this project.
# """
# from django.apps import apps as django_apps
# from .settings import TUS_UPLOAD_MODEL
# try:
# return django_apps.get_model(TUS_UPLOAD_MODEL)
# except ValueError:
# raise ImproperlyConfigured('UPLOAD_MODEL must be of the form \'app_label.model_name\'')
# except LookupError:
# raise ImproperlyConfigured('UPLOAD_MODEL refers to model \'%s\' that has not been installed' % TUS_UPLOAD_MODEL)
#
# Path: rest_framework_tus/signals.py
#
# Path: rest_framework_tus/storage.py
# def get_save_handler(import_path=None):
# return import_string(import_path or TUS_SAVE_HANDLER_CLASS)
. Output only the next line. | save_handler(upload=instance).run() |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
logger = logging.getLogger(__name__)
@receiver(received, sender=get_upload_model())
def on_receiving_done(sender, instance, **kwargs):
logger.debug('on_receiving_done: {}'.format(instance))
save_handler = get_save_handler()
save_handler(upload=instance).run()
@receiver(saved, sender=get_upload_model())
<|code_end|>
with the help of current file imports:
import logging
from django.dispatch import receiver
from rest_framework_tus.models import get_upload_model
from rest_framework_tus.signals import received, saved, finished
from rest_framework_tus.storage import get_save_handler
and context from other files:
# Path: rest_framework_tus/models.py
# def get_upload_model():
# """
# Returns the User model that is active in this project.
# """
# from django.apps import apps as django_apps
# from .settings import TUS_UPLOAD_MODEL
# try:
# return django_apps.get_model(TUS_UPLOAD_MODEL)
# except ValueError:
# raise ImproperlyConfigured('UPLOAD_MODEL must be of the form \'app_label.model_name\'')
# except LookupError:
# raise ImproperlyConfigured('UPLOAD_MODEL refers to model \'%s\' that has not been installed' % TUS_UPLOAD_MODEL)
#
# Path: rest_framework_tus/signals.py
#
# Path: rest_framework_tus/storage.py
# def get_save_handler(import_path=None):
# return import_string(import_path or TUS_SAVE_HANDLER_CLASS)
, which may contain function names, class names, or code. Output only the next line. | def on_saving_done(sender, instance, **kwargs): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
router = TusAPIRouter()
router.register(r'files', UploadViewSet, base_name='upload')
urlpatterns = [
<|code_end|>
, determine the next line of code. You have imports:
from django.conf.urls import url, include
from rest_framework_tus.views import UploadViewSet
from .routers import TusAPIRouter
and context (class names, function names, or code) available:
# Path: rest_framework_tus/views.py
# class UploadViewSet(TusCreateMixin,
# TusPatchMixin,
# TusHeadMixin,
# TusTerminateMixin,
# GenericViewSet):
# serializer_class = UploadSerializer
# metadata_class = UploadMetadata
# lookup_field = 'guid'
# lookup_value_regex = '[a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{12}'
# parser_classes = [TusUploadStreamParser]
#
# def get_queryset(self):
# return get_upload_model().objects.all()
#
# Path: rest_framework_tus/routers.py
# class TusAPIRouter(SimpleRouter):
# routes = [
# # List route.
# get_list_route(),
# # Dynamically generated list routes.
# # Generated using @list_route decorator
# # on methods of the viewset.
# DynamicListRoute(
# url=r'^{prefix}/{methodname}{trailing_slash}$',
# name='{basename}-{methodnamehyphen}',
# initkwargs={}
# ),
# # Detail route.
# get_detail_route(),
# # Dynamically generated detail routes.
# # Generated using @detail_route decorator on methods of the viewset.
# DynamicDetailRoute(
# url=r'^{prefix}/{lookup}/{methodname}{trailing_slash}$',
# name='{basename}-{methodnamehyphen}',
# initkwargs={}
# ),
# ]
. Output only the next line. | url(r'', include(router.urls, namespace='api')) |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
router = TusAPIRouter()
router.register(r'files', UploadViewSet, base_name='upload')
urlpatterns = [
url(r'', include(router.urls, namespace='api'))
<|code_end|>
using the current file's imports:
from django.conf.urls import url, include
from rest_framework_tus.views import UploadViewSet
from .routers import TusAPIRouter
and any relevant context from other files:
# Path: rest_framework_tus/views.py
# class UploadViewSet(TusCreateMixin,
# TusPatchMixin,
# TusHeadMixin,
# TusTerminateMixin,
# GenericViewSet):
# serializer_class = UploadSerializer
# metadata_class = UploadMetadata
# lookup_field = 'guid'
# lookup_value_regex = '[a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{12}'
# parser_classes = [TusUploadStreamParser]
#
# def get_queryset(self):
# return get_upload_model().objects.all()
#
# Path: rest_framework_tus/routers.py
# class TusAPIRouter(SimpleRouter):
# routes = [
# # List route.
# get_list_route(),
# # Dynamically generated list routes.
# # Generated using @list_route decorator
# # on methods of the viewset.
# DynamicListRoute(
# url=r'^{prefix}/{methodname}{trailing_slash}$',
# name='{basename}-{methodnamehyphen}',
# initkwargs={}
# ),
# # Detail route.
# get_detail_route(),
# # Dynamically generated detail routes.
# # Generated using @detail_route decorator on methods of the viewset.
# DynamicDetailRoute(
# url=r'^{prefix}/{lookup}/{methodname}{trailing_slash}$',
# name='{basename}-{methodnamehyphen}',
# initkwargs={}
# ),
# ]
. Output only the next line. | ] |
Given the code snippet: <|code_start|>#!/usr/bin/env python3
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
os.environ['PPP_QUESTIONPARSING_GRAMMATICAL_CONFIG'] = '../example_config.json'
default_language = 'en'
default_lookup_limit = 500 # number of uri to lookup
<|code_end|>
, generate the next line using the imports in this file:
import requests
import sys
import os
import time
import pickle
import difflib # string similarity
from conceptnet5.nodes import normalized_concept_name, uri_to_lemmas
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet as wn
from ppp_questionparsing_grammatical import nounDB
and context (functions, classes, or occasionally code) from other files:
# Path: ppp_questionparsing_grammatical/nounDB.py
# class TextStream:
# class Nounificator:
# def __init__(self, directSymbol = ' -> ', inverseSymbol = ' <- ', separator = ', '):
# def load(self, f):
# def dump(self, data, f):
# def __init__(self):
# def select(self, x):
# def __str__(self):
# def __eq__(self, other):
# def load(self, fileName):
# def save(self, fileName):
# def _add(self, verb, noun, target):
# def addDirect(self, verb, noun):
# def addInverse(self, verb, noun):
# def addListDirect(self, verb, nounList):
# def addListInverse(self, verb, nounList):
# def _remove(self, verb, noun, target):
# def removeDirect(self, verb, noun):
# def removeInverse(self, verb, noun):
# def _removeVerb(self, verb, target):
# def removeVerbDirect(self, verb):
# def removeVerbInverse(self, verb):
# def _toNouns(self, verb, target):
# def directNouns(self, verb):
# def inverseNouns(self, verb):
# def exists(self, verb):
# def merge(self, other):
. Output only the next line. | default_number_results = 50 # number of results to return at the end |
Predict the next line for this snippet: <|code_start|>
def test_exists_table():
with db_connection.env(login_active=False):
req = app.request('/table', 'GET')
assert req.status == "200 OK"
req = app.request('/table', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_settings():
with db_connection.env(login_active=False):
req = app.request('/settings', 'GET')
assert req.status == "200 OK"
req = app.request('/settings', 'POST')
assert req.status == "200 OK"
def test_exists_settings_page():
with db_connection.env(login_active=False):
req = app.request('/settings_page', 'GET')
assert req.status == "200 OK"
req = app.request('/settings_page', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_login():
with db_connection.env(login_active=True):
req = app.request('/login', 'GET')
assert req.status == "200 OK"
req = app.request('/login', 'POST')
<|code_end|>
with the help of current file imports:
from spec.python import db_connection
import sam.common
import sam.constants
import web
and context from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
, which may contain function names, class names, or code. Output only the next line. | assert req.status == "200 OK" |
Given the code snippet: <|code_start|>
logger = logging.getLogger(__name__)
class Subscriptions:
CREATE_MYSQL = os.path.join(constants.base_path, 'sql/setup_subscription_tables_mysql.sql')
CREATE_SQLITE = os.path.join(constants.base_path, 'sql/setup_subscription_tables_sqlite.sql')
DROP_SQL = os.path.join(constants.base_path, 'sql/drop_subscription.sql')
table = "Subscriptions"
<|code_end|>
, generate the next line using the imports in this file:
import os
import cPickle
import logging
import web
import sam.models.ports
from sam import constants
from sam import common
and context (functions, classes, or occasionally code) from other files:
# Path: sam/constants.py
# LDAP = {
# 'connection_string': config.get('LDAP', 'connection_string', default='')
# }
# def enable_local_mode():
# def init_urls():
# def find_url(target):
# def get_navbar(lang):
. Output only the next line. | def __init__(self, db): |
Continue the code snippet: <|code_start|> assert u.viewing == sub_id
assert u.groups.issuperset({'login', 'subscribed'})
assert 'logout' not in u.groups
assert 'unsubscribed' not in u.groups
assert 'debug' not in u.groups
with db_connection.env(login_active=True):
u = User({})
assert u.email is None
assert u.name is None
assert u.logged_in is False
assert u.plan_active is False
assert u.plan is None
assert u.subscription is None
assert u.viewing is None
assert u.groups.issuperset({'logout', 'unsubscribed'})
assert 'login' not in u.groups
assert 'subscribed' not in u.groups
assert 'debug' not in u.groups
def test_logged_in_user():
with db_connection.env(login_active=True):
login()
u = User(session)
assert u.email == 'test@email.com'
assert u.name == 'Test User'
assert u.logged_in is True
<|code_end|>
. Use current file imports:
from spec.python import db_connection
from sam.models.user import User
and context (classes, functions, or code) from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
. Output only the next line. | assert u.plan_active is True |
Continue the code snippet: <|code_start|>
def test_add_rule():
path = "custom: test_rule.yml"
name = "test_rule_612"
description = "lorem ipsum dolor sit amet"
params = {'1': True, '2': 3.141, '3': 'else'}
r = rules.Rules(db, sub_id)
# no rules present:
r.clear()
assert r.count() == 0
# add some bad rules first:
with pytest.raises(ValueError):
r.add_rule(path, '', description, params)
with pytest.raises(ValueError):
r.add_rule(path, 42, description, params)
with pytest.raises(ValueError):
r.add_rule(path, name, False, params)
# add a real rule
r.add_rule(path, name, description, params)
assert r.count() == 1
all_rules = r.get_all_rules()
new_rule = all_rules[0]
assert new_rule.get_name() == name
assert new_rule.get_desc() == description
assert new_rule.path == path
<|code_end|>
. Use current file imports:
import cPickle
import pytest
import web
from spec.python import db_connection
from sam.models.security import rules
and context (classes, functions, or code) from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
. Output only the next line. | def test_row_to_rule(): |
Based on the snippet: <|code_start|>
# make some a valid edits:
r = get_dummy_rule()
r.set_exposed_params({'source_ip': '44.33.22.11', 'color': 'red', 'bidirectional': 'true', 'port': '123'})
params = r.get_exposed_params()
regex_compiled = params['source_ip'].pop('regex_compiled')
assert type(rc) == type(regex_compiled)
regex_compiled = params['dest_ip'].pop('regex_compiled')
assert type(rc) == type(regex_compiled)
expected = {
'source_ip': {
'label': 'temp label',
'format': 'text',
'value': '44.33.22.11',
'regex': "^([0-9]|[01]?[0-9][0-9]|2[0-4][0-9]|25[0-5])(\\.([0-9]|[01]?[0-9][0-9]|2[0-4][0-9]|25[0-5])){3}$"
},
'dest_ip': {
'label': 'temp label 2',
'format': 'text',
'value': '5.6.7.8',
'regex': "^([0-9]|[01]?[0-9][0-9]|2[0-4][0-9]|25[0-5])(\\.([0-9]|[01]?[0-9][0-9]|2[0-4][0-9]|25[0-5])){3}$"
},
'port': {
'label': 'temp label 3',
'format': 'text',
'value': '123',
},
'bidirectional': {
'label': 'Check both ways',
'format': 'checkbox',
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import re
import yaml
import pprint
from spec.python import db_connection
from sam.models.security import rule, rule_template
from sam import constants
and context (classes, functions, sometimes code) from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
#
# Path: sam/models/security/rule_template.py
# def get_all():
# def abs_rule_path(path):
# def get_definition(path, cache={}):
# def __init__(self, path, yml):
# def import_yml(self, data):
# def load_exposed(yml):
# def load_inclusions(self, yml):
# def load_action_defaults(yml):
# def get_exposed(self):
# def get_action_defaults(self):
# def get_inclusions(self):
# class RuleTemplate(object):
#
# Path: sam/constants.py
# LDAP = {
# 'connection_string': config.get('LDAP', 'connection_string', default='')
# }
# def enable_local_mode():
# def init_urls():
# def find_url(target):
# def get_navbar(lang):
. Output only the next line. | 'value': 'true', |
Given snippet: <|code_start|> }
assert params == expected
# try out the pretranslate
r.set_action_params({'alert_label': 'Special $rule_name Label', 'email_subject': '[SAM] $color'})
params = r.get_translation_table()
assert params['alert_label'] == 'Special My Test Rule Label'
assert params['email_subject'] == '[SAM] blue'
def test_get_conditions():
r = get_dummy_rule()
assert r.get_conditions() == "src host $source_ip and dst host $dest_ip and dst port $port"
r = rule.Rule(123, True, "my rule", "description of my rule", 'compromised.yml')
assert r.get_conditions() == "dst host in $bad_hosts"
r = rule.Rule(123, True, "my rule", "description of my rule", 'dos.yml')
assert r.get_conditions() == "having conn[links] > $threshold"
r = rule.Rule(123, True, "my rule", "description of my rule", 'plugin: missing_plugin.yml')
assert r.get_conditions() == "Error."
def test_export_params():
r = get_dummy_rule()
params = r.export_params()
assert set(params.keys()) == {'actions', 'exposed'}
expected_actions = {
'alert_active': constants.security['alert_active'],
'alert_severity': '8',
'alert_label': 'Special Label',
'email_active': constants.security['email_active'],
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import re
import yaml
import pprint
from spec.python import db_connection
from sam.models.security import rule, rule_template
from sam import constants
and context:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
#
# Path: sam/models/security/rule_template.py
# def get_all():
# def abs_rule_path(path):
# def get_definition(path, cache={}):
# def __init__(self, path, yml):
# def import_yml(self, data):
# def load_exposed(yml):
# def load_inclusions(self, yml):
# def load_action_defaults(yml):
# def get_exposed(self):
# def get_action_defaults(self):
# def get_inclusions(self):
# class RuleTemplate(object):
#
# Path: sam/constants.py
# LDAP = {
# 'connection_string': config.get('LDAP', 'connection_string', default='')
# }
# def enable_local_mode():
# def init_urls():
# def find_url(target):
# def get_navbar(lang):
which might include code, classes, or functions. Output only the next line. | 'email_address': 'abc@zyx.com', |
Given snippet: <|code_start|> assert actions['alert_severity'] == '8'
assert actions['alert_label'] == 'Special Label'
assert actions['email_active'] == constants.security['email_active']
assert actions['email_address'] == 'abc@zyx.com'
assert actions['email_subject'] == '[SAM] Special Email Subject'
assert actions['sms_active'] == constants.security['sms_active']
assert actions['sms_number'] == '1 123 456 7890'
assert actions['sms_message'] == '[SAM] Special SMS Message'
def test_get_set_action_params():
# verify initial conditions
r = get_dummy_rule()
actions = r.get_action_params()
expected = {
'alert_active': constants.security['alert_active'],
'alert_severity': '8',
'alert_label': 'Special Label',
'email_active': constants.security['email_active'],
'email_address': 'abc@zyx.com',
'email_subject': '[SAM] Special Email Subject',
'sms_active': constants.security['sms_active'],
'sms_number': '1 123 456 7890',
'sms_message': '[SAM] Special SMS Message',
}
pprint.pprint(actions)
assert actions == expected
# edit a few params
r.set_action_params({'alert_severity': '1', 'email_address': 'example@example.com'})
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import re
import yaml
import pprint
from spec.python import db_connection
from sam.models.security import rule, rule_template
from sam import constants
and context:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
#
# Path: sam/models/security/rule_template.py
# def get_all():
# def abs_rule_path(path):
# def get_definition(path, cache={}):
# def __init__(self, path, yml):
# def import_yml(self, data):
# def load_exposed(yml):
# def load_inclusions(self, yml):
# def load_action_defaults(yml):
# def get_exposed(self):
# def get_action_defaults(self):
# def get_inclusions(self):
# class RuleTemplate(object):
#
# Path: sam/constants.py
# LDAP = {
# 'connection_string': config.get('LDAP', 'connection_string', default='')
# }
# def enable_local_mode():
# def init_urls():
# def find_url(target):
# def get_navbar(lang):
which might include code, classes, or functions. Output only the next line. | r.set_action_params({'sms_message': 'bogus'}) |
Predict the next line for this snippet: <|code_start|>sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))) # could be executed from any directory
def check_database():
# Validate the database format
if not integrity.check_and_fix_integrity():
exit(1)
def create_session(app):
# Create the session object
if web.config.get('_session') is None:
common.session = web.session.Session(app, common.session_store)
web.config._session = common.session
<|code_end|>
with the help of current file imports:
import sys
import os
import operator
import web
from sam import constants
from sam import common
from sam import integrity
from sam import httpserver
and context from other files:
# Path: sam/constants.py
# LDAP = {
# 'connection_string': config.get('LDAP', 'connection_string', default='')
# }
# def enable_local_mode():
# def init_urls():
# def find_url(target):
# def get_navbar(lang):
#
# Path: sam/integrity.py
# def get_table_names(db):
# def get_all_subs(db):
# def get_all_dss(db):
# def check_db_access_MySQL(params):
# def check_db_access_SQLite(params):
# def check_and_fix_db_access(params):
# def check_and_fix_db_access_MySQL(params):
# def check_and_fix_db_access_SQLite(params):
# def check_shared_tables(db):
# def fix_shared_tables(db, missing_tables):
# def fix_UDF_MySQL(db):
# def fix_UDF_SQLite(db):
# def fill_port_table(db):
# def check_default_subscription(db):
# def fix_default_subscription(db, errors):
# def check_subscriptions(db):
# def fix_subscriptions(db, errors):
# def fix_default_rules(db):
# def check_settings(db):
# def fix_settings(db, errors):
# def check_data_sources(db):
# def fix_data_sources(db, errors):
# def check_sessions_table(db):
# def fix_sessions_table(db, is_missing):
# def check_plugins(db):
# def fix_plugins(db, errors):
# def check_integrity(db=None):
# def check_and_fix_integrity(db=None, params=None):
, which may contain function names, class names, or code. Output only the next line. | else: |
Here is a snippet: <|code_start|>sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))) # could be executed from any directory
def check_database():
# Validate the database format
if not integrity.check_and_fix_integrity():
<|code_end|>
. Write the next line using the current file imports:
import sys
import os
import operator
import web
from sam import constants
from sam import common
from sam import integrity
from sam import httpserver
and context from other files:
# Path: sam/constants.py
# LDAP = {
# 'connection_string': config.get('LDAP', 'connection_string', default='')
# }
# def enable_local_mode():
# def init_urls():
# def find_url(target):
# def get_navbar(lang):
#
# Path: sam/integrity.py
# def get_table_names(db):
# def get_all_subs(db):
# def get_all_dss(db):
# def check_db_access_MySQL(params):
# def check_db_access_SQLite(params):
# def check_and_fix_db_access(params):
# def check_and_fix_db_access_MySQL(params):
# def check_and_fix_db_access_SQLite(params):
# def check_shared_tables(db):
# def fix_shared_tables(db, missing_tables):
# def fix_UDF_MySQL(db):
# def fix_UDF_SQLite(db):
# def fill_port_table(db):
# def check_default_subscription(db):
# def fix_default_subscription(db, errors):
# def check_subscriptions(db):
# def fix_subscriptions(db, errors):
# def fix_default_rules(db):
# def check_settings(db):
# def fix_settings(db, errors):
# def check_data_sources(db):
# def fix_data_sources(db, errors):
# def check_sessions_table(db):
# def fix_sessions_table(db, is_missing):
# def check_plugins(db):
# def fix_plugins(db, errors):
# def check_integrity(db=None):
# def check_and_fix_integrity(db=None, params=None):
, which may include functions, classes, or code. Output only the next line. | exit(1) |
Given the code snippet: <|code_start|>
db = db_connection.db
sub_id = db_connection.default_sub
ds_empty = db_connection.dsid_short
importer_names = ['paloalto', 'netflow', 'aws', 'asasyslog', 'tcpdump']
try:
importer_names.append('tshark')
except:
pass
short_log = """
{"message":"1,2011/06/21 18:06:18,0009C100218,TRAFFIC,end,1,2011/06/21 18:06:27,8.131.66.13,7.66.10.231,0.0.0.0,0.0.0.0,Allow export to Syslog,,,incomplete,vsys1,TAP-T0000R021,TAP-T0000R021,ethernet1/3,ethernet1/3,Copy Traffic Logs to Syslog,2011/06/21 18:06:27,309703,1,61590,443,0,0,0x19,tcp,allow,66,66,0,1,2011/06/21 18:06:19,5,any,0,945780,0x0,8.0.0.0-8.255.255.255,US,0,1,0,aged-out,0,0,0,0,,Palo-Alto-Networks,from-policy","@version":"1","@timestamp":"2011-06-22T01:06:27.000Z","host":"9.8.7.6","priority":14,"timestamp":"Jun 21 18:06:27","logsource":"Palo-Alto-Networks","severity":6,"facility":1,"facility_label":"user-level","severity_label":"Informational"}
{"message":"1,2011/06/21 18:06:19,0009C100218,TRAFFIC,end,1,2011/06/21 18:06:27,7.66.133.39,6.146.175.209,0.0.0.0,0.0.0.0,Allow export to Syslog,,,netbios-ns,vsys1,TAP-T0000R021,TAP-T0000R021,ethernet1/3,ethernet1/3,Copy Traffic Logs to Syslog,2011/06/21 18:06:27,158099,1,137,137,0,0,0x19,udp,allow,184,184,0,3,2011/06/21 18:05:51,33,any,0,945779,0x0,US,6.16.0.0-6.119.255.255,0,3,0,aged-out,0,0,0,0,,Palo-Alto-Networks,from-policy","@version":"1","@timestamp":"2011-06-22T01:06:27.000Z","host":"9.8.7.6","priority":14,"timestamp":"Jun 21 18:06:27","logsource":"Palo-Alto-Networks","severity":6,"facility":1,"facility_label":"user-level","severity_label":"Informational"}
<|code_end|>
, generate the next line using the imports in this file:
import sam.common as common
import sam.models.links
import sam.models.nodes
import sam.models.upload
import sam.importers.import_base
import dateutil.parser
from spec.python import db_connection
and context (functions, classes, or occasionally code) from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
. Output only the next line. | {"message":"1,2011/06/21 18:06:20,0009C100218,TRAFFIC,end,1,2011/06/21 18:06:20,6.229.180.169,7.66.81.57,0.0.0.0,0.0.0.0,Allow export to Syslog,,,netbios-ns,vsys1,TAP-T0000R021,TAP-T0000R021,ethernet1/3,ethernet1/3,Copy Traffic Logs to Syslog,2011/06/21 18:06:20,80266,1,137,137,0,0,0x19,udp,allow,253,253,0,1,2011/06/21 18:05:54,30,any,0,945781,0x0,6.16.0.0-6.119.255.255,US,0,1,0,aged-out,0,0,0,0,,Palo-Alto-Networks,from-policy","@version":"1","@timestamp":"2011-06-22T01:06:20.000Z","host":"9.8.7.6","priority":14,"timestamp":"Jun 21 18:06:20","logsource":"Palo-Alto-Networks","severity":6,"facility":1,"facility_label":"user-level","severity_label":"Informational"} |
Next line prediction: <|code_start|>
sub = db_connection.default_sub
def test_logout():
with db_connection.env(mock_input=True, login_active=True, mock_session=True):
p = sam.pages.logout.Logout()
assert p.page.user.logged_in is False
p.page.user.login_simple('phony', sub)
<|code_end|>
. Use current file imports:
(from spec.python import db_connection
import sam.pages.logout)
and context including class names, function names, or small code snippets from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
. Output only the next line. | assert p.page.user.logged_in is True |
Based on the snippet: <|code_start|>def test_SocketBuffer():
assert isinstance(server_collector.SOCKET_BUFFER, server_collector.SocketBuffer)
buffer = server_collector.SocketBuffer()
assert len(buffer) == 0
bs = "".join(map(chr, range(256))) + '\x00'
buffer.store_data('abc')
buffer.store_data('def\nghi\rjkl\r\nmno\n\rp')
buffer.store_data(bs)
assert len(buffer) == 3
contents = buffer.pop_all()
assert len(buffer) == 0
assert contents[0] == 'abc'
assert contents[2] == bs
def test_SocketListener():
buffer = server_collector.SOCKET_BUFFER
buffer.pop_all()
assert len(buffer) == 0
server_collector.SocketListener(('data1', None), ('foreign', 'address'), None)
server_collector.SocketListener(('data2\n', None), ('foreign', 'address'), None)
server_collector.SocketListener((' \t data3', None), ('foreign', 'address'), None)
assert len(buffer) == 3
datas = buffer.pop_all()
assert datas == ['data1', 'data2\n', ' \t data3']
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import requests
import cPickle
import threading
import multiprocessing
import time
import signal
from spec.python import db_connection
from sam import server_collector
from sam.importers.import_tcpdump import TCPDumpImporter
from sam.importers.import_paloalto import PaloAltoImporter
and context (classes, functions, sometimes code) from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
. Output only the next line. | def test_FileListener(): |
Next line prediction: <|code_start|>sub_id = db_connection.default_sub
ds_full = db_connection.dsid_default
ds_empty = db_connection.dsid_short
ds_other = db_connection.dsid_live
session = {}
def test_create():
lk_model = LiveKeys(db, sub_id)
ds_ids = (ds_full, ds_empty, ds_other)
lk_model.delete_all()
for id in ds_ids:
lk_model.create(id)
keys = lk_model.read()
for key in keys:
assert key['subscription'] == sub_id
assert key['ds_id'] in ds_ids
# TODO: assert key['access_key'] looks "random"?
def test_validate():
lk_model = LiveKeys(db, sub_id)
lk_model.delete_all()
lk_model.create(ds_full)
key = lk_model.read()[0]
bad_key = LiveKeys.generate_salt(24)
blank_key = ""
no_key = None
<|code_end|>
. Use current file imports:
(from spec.python import db_connection
from sam.models.livekeys import LiveKeys)
and context including class names, function names, or small code snippets from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
. Output only the next line. | correct_key = key['access_key'] |
Based on the snippet: <|code_start|> assert whois.ip_itos(int(1e0)) == "0.0.0.1"
assert whois.ip_itos(int(1e1)) == "0.0.0.10"
assert whois.ip_itos(int(1e2)) == "0.0.0.100"
assert whois.ip_itos(int(1e3)) == "0.0.3.232"
assert whois.ip_itos(int(1e4)) == "0.0.39.16"
assert whois.ip_itos(int(1e5)) == "0.1.134.160"
assert whois.ip_itos(int(1e6)) == "0.15.66.64"
assert whois.ip_itos(int(1e7)) == "0.152.150.128"
assert whois.ip_itos(int(1e8)) == "5.245.225.0"
assert whois.ip_itos(int(1e9)) == "59.154.202.0"
def test_ip_stoi():
assert whois.ip_stoi("0.0.0.1") == 1e0
assert whois.ip_stoi("0.0.0.10") == 1e1
assert whois.ip_stoi("0.0.0.100") == 1e2
assert whois.ip_stoi("0.0.3.232") == 1e3
assert whois.ip_stoi("0.0.39.16") == 1e4
assert whois.ip_stoi("0.1.134.160") == 1e5
assert whois.ip_stoi("0.15.66.64") == 1e6
assert whois.ip_stoi("0.152.150.128") == 1e7
assert whois.ip_stoi("5.245.225.0") == 1e8
assert whois.ip_stoi("59.154.202.0") == 1e9
assert whois.ip_stoi("1.2.3.4") == 16909060
assert whois.ip_stoi("1.2.3.4/32") == 16909060
assert whois.ip_stoi("1.2.3.4/24") == 16909056
assert whois.ip_stoi("1.2.3.4/16") == 16908288
assert whois.ip_stoi("1.2.3.4/8") == 16777216
assert whois.ip_stoi("10/8") == 167772160
assert whois.ip_stoi("10/16") == 167772160
<|code_end|>
, predict the immediate next line with the help of imports:
from spec.python import db_connection
from sam.models import whois
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: spec/python/db_connection.py
# TEST_DATABASE_MYSQL = 'samapper_test'
# TEST_DATABASE_SQLITE = '/tmp/sam_test.db'
# class Mocker(object):
# class Session(dict):
# class env(object):
# def __init__(self, *args, **kwargs):
# def __getitem__(self, k):
# def __setitem__(self, k, v):
# def __getattr__(self, name):
# def receiver(*args, **kwargs):
# def __call__(self, *args, **kwargs):
# def clear(self):
# def was_called(self, name, *args, **kwargs):
# def kill(self):
# def __init__(self, mock_input=False, login_active=None, mock_session=False, mock_render=False, lang='en'):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def make_timestamp(timestring):
# def unix_timestamp_to_datetime(stamp):
# def get_test_db_connection():
# def setup_datasources(db, sub_id):
# def template_sql(path, *args):
# def setup_network(db, sub_id, ds_id):
# def setup_node_extras(sub_id):
. Output only the next line. | assert whois.ip_stoi("10/24") == 167772160 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.