Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given snippet: <|code_start|> return fetch_url(url, parser='json')
def full_img_url(img, size='w500'):
return (IMG_BASE + '/' + size + img) if img else None
def movie_item(movie_data):
title = movie_data['title']
poster = full_img_url(movie_data['poster_path'])
url = '/movie/%d?append_to_response=credits,videos' % movie_data['id']
item_add(ST_MOVIE_INFO, title, url, poster=poster)
def next_page_item(url, data, next_state):
if data['page'] < data['total_pages']:
page = data['page']
if '&page=' in url:
next_url = url.replace('&page=%d' % (page), '&page=%d' % (page + 1))
else:
if not '?' in url: url += '?'
next_url = url + '&page=%d' % (page + 1)
text = _('Load more results (page {0} of {1})').format(page, data['total_pages'])
item_add(next_state, text, next_url, action=ACT_MORE)
################################################################################
# home page
################################################################################
if STATE == ST_HOME:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from epymc.extapi.onlinevideo import api_version, state_get, \
fetch_url, play_url, item_add, call_ydl, url_encode, language_get, \
ACT_NONE, ACT_FOLDER, ACT_MORE, ACT_PLAY, ACT_SEARCH
and context:
# Path: epymc/extapi/onlinevideo.py
# ACT_DEFAULT = 0
# ACT_NONE = 1
# ACT_FOLDER = 2
# ACT_MORE = 3
# ACT_PLAY = 4
# ACT_SEARCH = 5
# def state_get():
# def language_get():
# def item_add(next_state, label, url, info=None, icon=None, poster=None, action=ACT_DEFAULT):
# def play_url(url):
# def report_error(msg):
# def local_resource(_file_, res):
# def fetch_url(url, headers=None, parser=None):
# def ydl_executable():
# def call_ydl(url):
# def url_encode(params):
# def seconds_to_duration(seconds):
# def relative_date(date):
# def format_date(date):
which might include code, classes, or functions. Output only the next line. | item_add(ST_SEARCH, _('Search movies'), 'search', action=ACT_SEARCH) |
Given the code snippet: <|code_start|>
SUPPORT: To regenerate thumbnail, just delete the image file under thumbnails folder inside the post directory.
SUPPORT: To remove any link from the blog post, delete the entry after the post is created **in the blog directory**
Note down all the links somewhere then run the following command from blog directory to delete them
E.g. Image links will be like


$ pbpaste | awk -F\/ '{print $6}' | tr -d ')' | while read img; do find . -name $img -delete; done # noqa: W605
Usage:
$ python hn-links.py -l https://news.ycombinator.com/item?id=25381191 -b <blog_directory> --open-in-editor
"""
UTF_ENCODING = "utf-8"
# Common functions
def fetch_html(url, post_html_page_file):
logging.info(f"Fetching HTML title for {url}")
if post_html_page_file.exists():
logging.info(f"🌕 Loading page from cache {post_html_page_file}")
return post_html_page_file.read_text(encoding=UTF_ENCODING)
<|code_end|>
, generate the next line using the imports in this file:
import logging
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from datetime import datetime
from pathlib import Path
from urllib.parse import parse_qs, urlparse
from bs4 import BeautifulSoup
from py_executable_checklist.workflow import WorkflowBase, run_command, run_workflow
from common_utils import fetch_html_page, html_parser_from
and context (functions, classes, or occasionally code) from other files:
# Path: common_utils.py
# def fetch_html_page(page_url):
# user_agent = (
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.89 Safari/537.36"
# )
# headers = {"User-Agent": user_agent}
# page = requests.get(page_url, headers=headers, timeout=10)
# return page.text
#
# def html_parser_from(page_html):
# return BeautifulSoup(page_html, "html.parser")
. Output only the next line. | page_html = fetch_html_page(url) |
Next line prediction: <|code_start|> f.mkdir(parents=True, exist_ok=True)
# output
context["hn_post_id"] = hn_post_id
context["target_folder"] = target_folder
context["child_links_folder"] = child_links_folder
context["thumbnails_folder"] = thumbnails_folder
class GrabPostHtml(WorkflowBase):
"""Use requests to download HTML using a browser user agent"""
hn_link: str
target_folder: str
def run(self, context):
post_html_page_file = Path(self.target_folder) / "hn_post.html"
page_html = fetch_html(self.hn_link, post_html_page_file)
# output
context["page_html"] = page_html
class ParsePostHtml(WorkflowBase):
"""Create BeautifulSoap parser from html"""
page_html: str
def run(self, context):
# output
<|code_end|>
. Use current file imports:
(import logging
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from datetime import datetime
from pathlib import Path
from urllib.parse import parse_qs, urlparse
from bs4 import BeautifulSoup
from py_executable_checklist.workflow import WorkflowBase, run_command, run_workflow
from common_utils import fetch_html_page, html_parser_from)
and context including class names, function names, or small code snippets from other files:
# Path: common_utils.py
# def fetch_html_page(page_url):
# user_agent = (
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.89 Safari/537.36"
# )
# headers = {"User-Agent": user_agent}
# page = requests.get(page_url, headers=headers, timeout=10)
# return page.text
#
# def html_parser_from(page_html):
# return BeautifulSoup(page_html, "html.parser")
. Output only the next line. | context["bs"] = html_parser_from(self.page_html) |
Based on the snippet: <|code_start|>#!/usr/bin/env python3
"""
Count number of files by type in a directory
input:
- directory
- file extensions
output:
- total number of files
"""
def main(source_directory: str, extensions: List[str]):
print(f"Source directory: {source_directory}, extensions: {extensions}")
source_directory = Path(source_directory).expanduser()
if not source_directory.is_dir():
raise ValueError(f"{source_directory} is not a directory")
files_in_directory = [
p.resolve()
for p in source_directory.glob("**/*")
<|code_end|>
, predict the immediate next line with the help of imports:
from pathlib import Path
from typing import List
from media_manager import valid_file
import fire
and context (classes, functions, sometimes code) from other files:
# Path: media_manager.py
# def valid_file(file_path):
# invalid_file_extensions = [".json", ".ini", ".zip"]
# invalid_names = [".ds_store"]
# return (
# file_path.exists()
# and not file_path.is_dir()
# and file_path.suffix.lower() not in invalid_file_extensions
# and file_path.name.lower() not in invalid_names
# )
. Output only the next line. | if p.is_file() and p.suffix.lower() in extensions and valid_file(p) |
Continue the code snippet: <|code_start|>$ cat <filename>.html | pup 'a attr{href}' >> links.txt
3. Run this script
$ EDITOR=/usr/local/bin/idea ./links_to_hugo.py --links-file .temp/links.txt --post-title "Post title" \
--blog-directory "<full-path-to-blog-directory" --open-in-editor
4. Review blog post in the editor and remove any links if necessary
5. Run this script to clean up any images that are left behind due to deleted links
$ ./unused_files.py -s <blog-root>/static/images -t <blog-root>/content -d
6. make deploy from blog directory
7. make commit-all from blog directory
"""
UTF_ENCODING = "utf-8"
# Common functions
def fetch_html(url, post_html_page_file):
logging.info(f"Fetching HTML title for {url}")
if post_html_page_file.exists():
logging.info(f"🌕 Loading page from cache {post_html_page_file}")
return post_html_page_file.read_text(encoding=UTF_ENCODING)
<|code_end|>
. Use current file imports:
import logging
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from datetime import datetime
from pathlib import Path
from subprocess import CalledProcessError
from jinja2 import Environment, FileSystemLoader
from py_executable_checklist.workflow import (
WorkflowBase,
notify_me,
run_command,
run_workflow,
)
from slug import slug
from common_utils import fetch_html_page, html_parser_from
and context (classes, functions, or code) from other files:
# Path: common_utils.py
# def fetch_html_page(page_url):
# user_agent = (
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.89 Safari/537.36"
# )
# headers = {"User-Agent": user_agent}
# page = requests.get(page_url, headers=headers, timeout=10)
# return page.text
#
# def html_parser_from(page_html):
# return BeautifulSoup(page_html, "html.parser")
. Output only the next line. | page_html = fetch_html_page(url) |
Given the following code snippet before the placeholder: <|code_start|> known_domains = ["ycombinator", "algolia", "hackernews", "youtube", "twitter", "chrome.google.com", "youtu.be"]
def has_known_domain(post_link):
return any(map(lambda l: l in post_link.lower(), known_domains))
return link.startswith("http") and not has_known_domain(link)
def run(self, context):
valid_links = [
link
for link in self.all_links
if self.is_valid_link(link) and self.accessible(link, self.child_links_folder)
]
# output
context["valid_links"] = valid_links
class GrabChildLinkTitle(WorkflowBase):
"""Get page title for each valid link"""
valid_links: list
child_links_folder: Path
def page_title_from(self, child_links_folder, link_in_comment):
page_slug = slug(link_in_comment)
page_path = f"{page_slug}.html"
post_html_page_file = child_links_folder / page_path
page_html = fetch_html(link_in_comment, post_html_page_file)
<|code_end|>
, predict the next line using imports from the current file:
import logging
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from datetime import datetime
from pathlib import Path
from subprocess import CalledProcessError
from jinja2 import Environment, FileSystemLoader
from py_executable_checklist.workflow import (
WorkflowBase,
notify_me,
run_command,
run_workflow,
)
from slug import slug
from common_utils import fetch_html_page, html_parser_from
and context including class names, function names, and sometimes code from other files:
# Path: common_utils.py
# def fetch_html_page(page_url):
# user_agent = (
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.89 Safari/537.36"
# )
# headers = {"User-Agent": user_agent}
# page = requests.get(page_url, headers=headers, timeout=10)
# return page.text
#
# def html_parser_from(page_html):
# return BeautifulSoup(page_html, "html.parser")
. Output only the next line. | bs = html_parser_from(page_html) |
Using the snippet: <|code_start|>
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument("-p", "--page-url", type=str, required=True, help="PhpBB Forum Page Url")
return parser.parse_args()
class InitScript(WorkflowBase):
"""Initialise environment"""
def _init_script(self):
handlers = [
logging.StreamHandler(),
]
logging.basicConfig(
handlers=handlers,
format="%(asctime)s - %(filename)s:%(lineno)d - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logging.captureWarnings(capture=True)
def run(self, context):
self._init_script()
template_folder = "sample"
template_dir = os.path.dirname(os.path.abspath(__file__)) + "/" + template_folder
jinja_env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True, autoescape=True)
context["jinja_env"] = jinja_env
context["output_folder"] = output_folder = "generated"
<|code_end|>
, determine the next line of code. You have imports:
import logging
import os
import random
import subprocess
import time
from argparse import ArgumentParser
from jinja2 import Environment, FileSystemLoader
from selenium import webdriver
from slug import slug
from common.workflow import WorkflowBase, run_workflow
from common_utils import create_dir
and context (class names, function names, or code) available:
# Path: common_utils.py
# def create_dir(output_dir, delete_existing=False):
# path = Path(output_dir)
# if path.exists() and delete_existing:
# shutil.rmtree(output_dir)
# elif not path.exists():
# path.mkdir()
. Output only the next line. | create_dir(output_folder, delete_existing=False) |
Using the snippet: <|code_start|>#!/usr/bin/env python3
"""
Telegram bot to convert web page links to PDF
"""
load_dotenv()
OUTPUT_DIR = Path.home().joinpath("OutputDir", "web-to-pdf")
def welcome(update: Update, _):
if update.message:
update.message.reply_text(
"👋 Hi there. ⬇️ I'm a bot that converts web pages to PDFs. ⬆️. " "Try sending me a link to a web page"
)
def help_command(update: Update, _):
if update.message:
update.message.reply_text("Help!")
def _handle_web_page(web_page_url: str) -> str:
<|code_end|>
, determine the next line of code. You have imports:
import argparse
import logging
import os
import py_executable_checklist.workflow
import telegram
from pathlib import Path
from dotenv import load_dotenv
from slug import slug
from telegram import Update
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
from common_utils import fetch_html_page, html_parser_from, retry
and context (class names, function names, or code) available:
# Path: common_utils.py
# def fetch_html_page(page_url):
# user_agent = (
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.89 Safari/537.36"
# )
# headers = {"User-Agent": user_agent}
# page = requests.get(page_url, headers=headers, timeout=10)
# return page.text
#
# def html_parser_from(page_html):
# return BeautifulSoup(page_html, "html.parser")
#
# def retry(exceptions, tries=4, delay=3, back_off=2):
# def deco_retry(f):
# @wraps(f)
# def f_retry(*args, **kwargs):
# m_retries, m_delay = tries, delay
# while m_retries > 1:
# try:
# return f(*args, **kwargs)
# except exceptions as e:
# msg = f"{e}, Retrying in {m_delay} seconds..."
# logging.warning(msg)
# time.sleep(m_delay)
# m_retries -= 1
# m_delay *= back_off
# return f(*args, **kwargs)
#
# return f_retry # true decorator
#
# return deco_retry
. Output only the next line. | page_html = fetch_html_page(web_page_url) |
Here is a snippet: <|code_start|>#!/usr/bin/env python3
"""
Telegram bot to convert web page links to PDF
"""
load_dotenv()
OUTPUT_DIR = Path.home().joinpath("OutputDir", "web-to-pdf")
def welcome(update: Update, _):
if update.message:
update.message.reply_text(
"👋 Hi there. ⬇️ I'm a bot that converts web pages to PDFs. ⬆️. " "Try sending me a link to a web page"
)
def help_command(update: Update, _):
if update.message:
update.message.reply_text("Help!")
def _handle_web_page(web_page_url: str) -> str:
page_html = fetch_html_page(web_page_url)
<|code_end|>
. Write the next line using the current file imports:
import argparse
import logging
import os
import py_executable_checklist.workflow
import telegram
from pathlib import Path
from dotenv import load_dotenv
from slug import slug
from telegram import Update
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
from common_utils import fetch_html_page, html_parser_from, retry
and context from other files:
# Path: common_utils.py
# def fetch_html_page(page_url):
# user_agent = (
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.89 Safari/537.36"
# )
# headers = {"User-Agent": user_agent}
# page = requests.get(page_url, headers=headers, timeout=10)
# return page.text
#
# def html_parser_from(page_html):
# return BeautifulSoup(page_html, "html.parser")
#
# def retry(exceptions, tries=4, delay=3, back_off=2):
# def deco_retry(f):
# @wraps(f)
# def f_retry(*args, **kwargs):
# m_retries, m_delay = tries, delay
# while m_retries > 1:
# try:
# return f(*args, **kwargs)
# except exceptions as e:
# msg = f"{e}, Retrying in {m_delay} seconds..."
# logging.warning(msg)
# time.sleep(m_delay)
# m_retries -= 1
# m_delay *= back_off
# return f(*args, **kwargs)
#
# return f_retry # true decorator
#
# return deco_retry
, which may include functions, classes, or code. Output only the next line. | bs = html_parser_from(page_html) |
Based on the snippet: <|code_start|>
load_dotenv()
OUTPUT_DIR = Path.home().joinpath("OutputDir", "web-to-pdf")
def welcome(update: Update, _):
if update.message:
update.message.reply_text(
"👋 Hi there. ⬇️ I'm a bot that converts web pages to PDFs. ⬆️. " "Try sending me a link to a web page"
)
def help_command(update: Update, _):
if update.message:
update.message.reply_text("Help!")
def _handle_web_page(web_page_url: str) -> str:
page_html = fetch_html_page(web_page_url)
bs = html_parser_from(page_html)
web_page_title = slug(bs.title.string if bs.title and bs.title.string else web_page_url)
target_file = OUTPUT_DIR / f"{web_page_title}.pdf"
cmd = f'./webpage_to_pdf.py -i "{web_page_url}" -o "{target_file}" --headless'
py_executable_checklist.workflow.run_command(cmd)
return target_file.as_posix()
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
import logging
import os
import py_executable_checklist.workflow
import telegram
from pathlib import Path
from dotenv import load_dotenv
from slug import slug
from telegram import Update
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
from common_utils import fetch_html_page, html_parser_from, retry
and context (classes, functions, sometimes code) from other files:
# Path: common_utils.py
# def fetch_html_page(page_url):
# user_agent = (
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.89 Safari/537.36"
# )
# headers = {"User-Agent": user_agent}
# page = requests.get(page_url, headers=headers, timeout=10)
# return page.text
#
# def html_parser_from(page_html):
# return BeautifulSoup(page_html, "html.parser")
#
# def retry(exceptions, tries=4, delay=3, back_off=2):
# def deco_retry(f):
# @wraps(f)
# def f_retry(*args, **kwargs):
# m_retries, m_delay = tries, delay
# while m_retries > 1:
# try:
# return f(*args, **kwargs)
# except exceptions as e:
# msg = f"{e}, Retrying in {m_delay} seconds..."
# logging.warning(msg)
# time.sleep(m_delay)
# m_retries -= 1
# m_delay *= back_off
# return f(*args, **kwargs)
#
# return f_retry # true decorator
#
# return deco_retry
. Output only the next line. | @retry(telegram.error.TimedOut, tries=3) |
Next line prediction: <|code_start|> def fix(v):
return v[1:] if v.startswith(os.sep) else v
if len(segments) > 1:
segments = [segments[0]] + [fix(v) for v in segments[1:]]
return os.path.join(*segments)
def join_path_later(*segments):
"""
Like :func:`join_path`, but deferred.
:param segments: path segments; calls :func:`ronin.utils.strings.stringify` on each
:type segments: [str|FunctionType|None]
:returns: function that calls :func:`join_path`
:rtype: FunctionType
"""
return lambda _: join_path(*segments)
def base_path(path):
"""
Returns the real base path string of a file.
:param path: path; calls :func:`ronin.utils.strings.stringify` on it
:type path: str|FunctionType
:returns: base path of ``path``
:rtype: str
"""
<|code_end|>
. Use current file imports:
(from .strings import stringify, stringify_list
from ..contexts import current_context
from glob2 import glob as glob2
import os)
and context including class names, function names, or small code snippets from other files:
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
#
# def stringify_list(values):
# """
# Calls :func:`stringify` on all elements. Return values of None are preserved.
#
# :param values: values
# :type values: []
# :returns: values
# :rtype: [str]
# """
#
# return [stringify(v) for v in values]
#
# Path: ronin/contexts.py
# def current_context(immutable=True):
# """
# Returns the current context if there is one, otherwise raises :class:`NoContextException`.
#
# By default, the context will be treated as immutable.
#
# :param immutable: set to False in order to allow changes to the context
# :type immutable: bool
# :returns: current context
# :rtype: :class:`Context`
# """
#
# ctx = Context._peek_thread_local()
# if ctx is None:
# raise NoContextException()
# return Context(ctx, True) if immutable else ctx
. Output only the next line. | path = stringify(path) |
Predict the next line for this snippet: <|code_start|>#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
def join_path(*segments):
"""
Joins the path segments into a single path tring. No attempt is made to make it an absolute
path, nor to check that it exists on the filesystem.
Null segments are skipped. Also note that unlike ``os.path.join``, segments beginning with a
path separator character will not cause the path to reset.
:param segments: path segments; calls :func:`ronin.utils.strings.stringify` on each
:type segments: [str|FunctionType|None]
:returns: joined path
:rtype: str
"""
<|code_end|>
with the help of current file imports:
from .strings import stringify, stringify_list
from ..contexts import current_context
from glob2 import glob as glob2
import os
and context from other files:
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
#
# def stringify_list(values):
# """
# Calls :func:`stringify` on all elements. Return values of None are preserved.
#
# :param values: values
# :type values: []
# :returns: values
# :rtype: [str]
# """
#
# return [stringify(v) for v in values]
#
# Path: ronin/contexts.py
# def current_context(immutable=True):
# """
# Returns the current context if there is one, otherwise raises :class:`NoContextException`.
#
# By default, the context will be treated as immutable.
#
# :param immutable: set to False in order to allow changes to the context
# :type immutable: bool
# :returns: current context
# :rtype: :class:`Context`
# """
#
# ctx = Context._peek_thread_local()
# if ctx is None:
# raise NoContextException()
# return Context(ctx, True) if immutable else ctx
, which may contain function names, class names, or code. Output only the next line. | segments = stringify_list(segments) |
Next line prediction: <|code_start|>
return lambda _: join_path(*segments)
def base_path(path):
"""
Returns the real base path string of a file.
:param path: path; calls :func:`ronin.utils.strings.stringify` on it
:type path: str|FunctionType
:returns: base path of ``path``
:rtype: str
"""
path = stringify(path)
return os.path.dirname(os.path.realpath(path))
def input_path(*segments):
"""
Joins the path segments to the context's ``paths.input``.
See :func:`join_path`.
:param segments: path segments; calls :func:`ronin.utils.strings.stringify` on each
:type segments: [str|FunctionType|None]
:returns: path joined to ``paths.input``
:rtype: str
"""
<|code_end|>
. Use current file imports:
(from .strings import stringify, stringify_list
from ..contexts import current_context
from glob2 import glob as glob2
import os)
and context including class names, function names, or small code snippets from other files:
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
#
# def stringify_list(values):
# """
# Calls :func:`stringify` on all elements. Return values of None are preserved.
#
# :param values: values
# :type values: []
# :returns: values
# :rtype: [str]
# """
#
# return [stringify(v) for v in values]
#
# Path: ronin/contexts.py
# def current_context(immutable=True):
# """
# Returns the current context if there is one, otherwise raises :class:`NoContextException`.
#
# By default, the context will be treated as immutable.
#
# :param immutable: set to False in order to allow changes to the context
# :type immutable: bool
# :returns: current context
# :rtype: :class:`Context`
# """
#
# ctx = Context._peek_thread_local()
# if ctx is None:
# raise NoContextException()
# return Context(ctx, True) if immutable else ctx
. Output only the next line. | with current_context() as ctx: |
Predict the next line after this snippet: <|code_start|> """
A list that raises :class:`~exceptions.TypeError` exceptions when objects of the wrong type are
inserted.
:param items: initial list
:type items: list
:param value_type: type(s) required for list values
:type value_type: :obj:`type` or :obj:`str` or (:obj:`type` or :obj:`str`)
:param wrapper_function: calls this optional function on all values before added to the list
:type wrapper_function: ~types.FunctionType
:param unwrapper_function: calls this optional function on all values when retrieved from the
list
:type unwrapper_function: ~types.FunctionType
"""
def __init__(self, items=None, value_type=None, wrapper_function=None, unwrapper_function=None):
super(StrictList, self).__init__()
if isinstance(items, StrictList):
self.value_type = items.value_type
self.wrapper_function = items.wrapper_function
self.unwrapper_function = items.unwrapper_function
self.value_type = _convert_type(value_type)
self.wrapper_function = wrapper_function
self.unwrapper_function = unwrapper_function
if items:
for item in items:
self.append(item)
def _wrap(self, value):
if (self.value_type is not None) and (not isinstance(value, self.value_type)):
<|code_end|>
using the current file's imports:
from .types import type_name, import_symbol
from .unicode import string
from collections import OrderedDict
from inspect import isclass
and any relevant context from other files:
# Path: ronin/utils/types.py
# def type_name(the_type):
# """
# Human-readable name of type(s). Built-in types will avoid the "__builtin__" prefix.
#
# Tuples are always handled as a join of "|".
#
# :param the_type: type(s)
# :type the_type: type|(type)
# :returns: name of type(s)
# :rtype: str
# """
#
# if isinstance(the_type, tuple):
# return '|'.join([type_name(v) for v in the_type])
# module = to_str(the_type.__module__)
# name = to_str(the_type.__name__)
# return name if module == '__builtin__' else '{}.{}'.format(module, name)
#
# def import_symbol(name):
# """
# Imports a symbol based on its fully qualified name.
#
# :param name: symbol name
# :type name: str
# :returns: symbol
# :raises ImportError: if could not import the module
# :raises AttributeError: if could not find the symbol in the module
# """
#
# if name and ('.' in name):
# module_name, name = name.rsplit('.', 1)
# return getattr(__import__(module_name, fromlist=[name], level=0), name)
# raise ImportError('import not found: {}'.format(name))
#
# Path: ronin/utils/unicode.py
. Output only the next line. | raise TypeError('value must be a "{}": {!r}'.format(type_name(self.value_type), value)) |
Next line prediction: <|code_start|> self.key_type = _convert_type(key_type)
self.value_type = _convert_type(value_type)
self.wrapper_function = wrapper_function
self.unwrapper_function = unwrapper_function
if items:
for k, v in items:
self[k] = v
def __getitem__(self, key):
if (self.key_type is not None) and (not isinstance(key, self.key_type)):
raise TypeError('key must be a "{}": {!r}'.format(type_name(self.key_type), key))
value = super(StrictDict, self).__getitem__(key)
if self.unwrapper_function is not None:
value = self.unwrapper_function(value)
return value
def __setitem__(self, key, value, **_):
if (self.key_type is not None) and (not isinstance(key, self.key_type)):
raise TypeError('key must be a "{}": {!r}'.format(type_name(self.key_type), key))
if (self.value_type is not None) and (not isinstance(value, self.value_type)):
raise TypeError('value must be a "{}": {!r}'.format(type_name(self.value_type), value))
if self.wrapper_function is not None:
value = self.wrapper_function(value)
return super(StrictDict, self).__setitem__(key, value)
def _convert_type(the_type):
if isinstance(the_type, tuple):
return tuple(_convert_type(v) for v in the_type)
elif isinstance(the_type, string):
<|code_end|>
. Use current file imports:
(from .types import type_name, import_symbol
from .unicode import string
from collections import OrderedDict
from inspect import isclass)
and context including class names, function names, or small code snippets from other files:
# Path: ronin/utils/types.py
# def type_name(the_type):
# """
# Human-readable name of type(s). Built-in types will avoid the "__builtin__" prefix.
#
# Tuples are always handled as a join of "|".
#
# :param the_type: type(s)
# :type the_type: type|(type)
# :returns: name of type(s)
# :rtype: str
# """
#
# if isinstance(the_type, tuple):
# return '|'.join([type_name(v) for v in the_type])
# module = to_str(the_type.__module__)
# name = to_str(the_type.__name__)
# return name if module == '__builtin__' else '{}.{}'.format(module, name)
#
# def import_symbol(name):
# """
# Imports a symbol based on its fully qualified name.
#
# :param name: symbol name
# :type name: str
# :returns: symbol
# :raises ImportError: if could not import the module
# :raises AttributeError: if could not find the symbol in the module
# """
#
# if name and ('.' in name):
# module_name, name = name.rsplit('.', 1)
# return getattr(__import__(module_name, fromlist=[name], level=0), name)
# raise ImportError('import not found: {}'.format(name))
#
# Path: ronin/utils/unicode.py
. Output only the next line. | the_type = import_symbol(the_type) |
Next line prediction: <|code_start|> self.unwrapper_function = items.unwrapper_function
self.key_type = _convert_type(key_type)
self.value_type = _convert_type(value_type)
self.wrapper_function = wrapper_function
self.unwrapper_function = unwrapper_function
if items:
for k, v in items:
self[k] = v
def __getitem__(self, key):
if (self.key_type is not None) and (not isinstance(key, self.key_type)):
raise TypeError('key must be a "{}": {!r}'.format(type_name(self.key_type), key))
value = super(StrictDict, self).__getitem__(key)
if self.unwrapper_function is not None:
value = self.unwrapper_function(value)
return value
def __setitem__(self, key, value, **_):
if (self.key_type is not None) and (not isinstance(key, self.key_type)):
raise TypeError('key must be a "{}": {!r}'.format(type_name(self.key_type), key))
if (self.value_type is not None) and (not isinstance(value, self.value_type)):
raise TypeError('value must be a "{}": {!r}'.format(type_name(self.value_type), value))
if self.wrapper_function is not None:
value = self.wrapper_function(value)
return super(StrictDict, self).__setitem__(key, value)
def _convert_type(the_type):
if isinstance(the_type, tuple):
return tuple(_convert_type(v) for v in the_type)
<|code_end|>
. Use current file imports:
(from .types import type_name, import_symbol
from .unicode import string
from collections import OrderedDict
from inspect import isclass)
and context including class names, function names, or small code snippets from other files:
# Path: ronin/utils/types.py
# def type_name(the_type):
# """
# Human-readable name of type(s). Built-in types will avoid the "__builtin__" prefix.
#
# Tuples are always handled as a join of "|".
#
# :param the_type: type(s)
# :type the_type: type|(type)
# :returns: name of type(s)
# :rtype: str
# """
#
# if isinstance(the_type, tuple):
# return '|'.join([type_name(v) for v in the_type])
# module = to_str(the_type.__module__)
# name = to_str(the_type.__name__)
# return name if module == '__builtin__' else '{}.{}'.format(module, name)
#
# def import_symbol(name):
# """
# Imports a symbol based on its fully qualified name.
#
# :param name: symbol name
# :type name: str
# :returns: symbol
# :raises ImportError: if could not import the module
# :raises AttributeError: if could not find the symbol in the module
# """
#
# if name and ('.' in name):
# module_name, name = name.rsplit('.', 1)
# return getattr(__import__(module_name, fromlist=[name], level=0), name)
# raise ImportError('import not found: {}'.format(name))
#
# Path: ronin/utils/unicode.py
. Output only the next line. | elif isinstance(the_type, string): |
Given the following code snippet before the placeholder: <|code_start|>atexit.register(_restore_terminal)
terminal = Terminal()
def announce(message, prefix='rōnin', color='green'):
"""
Writes a message to the terminal with a colored prefix.
:param message: message
:type message: str
:param color: color name
:type color: str
"""
if color:
prefix = getattr(terminal, color)(prefix)
print('{}: {}'.format(prefix, message))
def error(message):
"""
Writes an error message to the terminal with a red prefix.
:param message: message or exception
:type message: str or BaseException subclass instance
"""
if isinstance(message, BaseException):
the_type = type(message).__name__
<|code_end|>
, predict the next line using imports from the current file:
from .unicode import to_str
from blessings import Terminal
import colorama, atexit
and context including class names, function names, and sometimes code from other files:
# Path: ronin/utils/unicode.py
. Output only the next line. | message = to_str(message) |
Next line prediction: <|code_start|>
UNESCAPED_STRING_RE = re.compile(r'(?<!\\) ')
def stringify(value):
"""
Casts the value to a Unicode string. If the value is a function, calls it using
:func:`ronin.contexts.current_context` as its only argument, and recurses until a
non-FunctionType value is returned.
None values are preserved, whether None is directly sent to this function or is the return
value of function argument.
This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
passed around instead of strings.
:param value: value or None
:type value: str|FunctionType
:returns: stringified value or None
:rtype: str
"""
if value is None:
return None
elif hasattr(value, '__call__'):
with current_context() as ctx:
value = value(ctx)
return stringify(value)
else:
try:
<|code_end|>
. Use current file imports:
(from .unicode import to_str
from ..contexts import current_context
import re)
and context including class names, function names, or small code snippets from other files:
# Path: ronin/utils/unicode.py
#
# Path: ronin/contexts.py
# def current_context(immutable=True):
# """
# Returns the current context if there is one, otherwise raises :class:`NoContextException`.
#
# By default, the context will be treated as immutable.
#
# :param immutable: set to False in order to allow changes to the context
# :type immutable: bool
# :returns: current context
# :rtype: :class:`Context`
# """
#
# ctx = Context._peek_thread_local()
# if ctx is None:
# raise NoContextException()
# return Context(ctx, True) if immutable else ctx
. Output only the next line. | return to_str(value) |
Here is a snippet: <|code_start|>
from __future__ import unicode_literals
_ENCODING = 'utf-8'
UNESCAPED_STRING_RE = re.compile(r'(?<!\\) ')
def stringify(value):
"""
Casts the value to a Unicode string. If the value is a function, calls it using
:func:`ronin.contexts.current_context` as its only argument, and recurses until a
non-FunctionType value is returned.
None values are preserved, whether None is directly sent to this function or is the return
value of function argument.
This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
passed around instead of strings.
:param value: value or None
:type value: str|FunctionType
:returns: stringified value or None
:rtype: str
"""
if value is None:
return None
elif hasattr(value, '__call__'):
<|code_end|>
. Write the next line using the current file imports:
from .unicode import to_str
from ..contexts import current_context
import re
and context from other files:
# Path: ronin/utils/unicode.py
#
# Path: ronin/contexts.py
# def current_context(immutable=True):
# """
# Returns the current context if there is one, otherwise raises :class:`NoContextException`.
#
# By default, the context will be treated as immutable.
#
# :param immutable: set to False in order to allow changes to the context
# :type immutable: bool
# :returns: current context
# :rtype: :class:`Context`
# """
#
# ctx = Context._peek_thread_local()
# if ctx is None:
# raise NoContextException()
# return Context(ctx, True) if immutable else ctx
, which may include functions, classes, or code. Output only the next line. | with current_context() as ctx: |
Given snippet: <|code_start|>extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Rōnin'
copyright = u'2016-2018, Tal Liron' # @ReservedAssignment
author = u'Tal Liron'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os, sys
from ronin.version import VERSION
from sphinx.domains.python import PythonDomain
and context:
# Path: ronin/version.py
# VERSION = '1.1.2'
which might include code, classes, or functions. Output only the next line. | version = VERSION |
Here is a snippet: <|code_start|># Copyright 2016-2018 Tal Liron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
class Extension(object):
"""
Base class for extensions.
Extensions can nest child extensions (and so can they).
:ivar extensions: child extensions
:vartype command: [:class:`Extension`]
"""
def __init__(self):
<|code_end|>
. Write the next line using the current file imports:
from .contexts import current_context
from .utils.types import verify_type
from .utils.collections import StrictList
from .utils.strings import stringify
from types import FunctionType
and context from other files:
# Path: ronin/contexts.py
# def current_context(immutable=True):
# """
# Returns the current context if there is one, otherwise raises :class:`NoContextException`.
#
# By default, the context will be treated as immutable.
#
# :param immutable: set to False in order to allow changes to the context
# :type immutable: bool
# :returns: current context
# :rtype: :class:`Context`
# """
#
# ctx = Context._peek_thread_local()
# if ctx is None:
# raise NoContextException()
# return Context(ctx, True) if immutable else ctx
#
# Path: ronin/utils/types.py
# def verify_type(value, the_type):
# """
# Raises :class:`TypeError` if the value is not an instance of the type.
#
# :param value: value
# :param the_type: type or type name
# :type the_type: type|str
# :raises TypeError: if ``value`` is not an instance of ``the_type``
# :raises ~exceptions.ValueError: if ``the_type`` is invalid
# :raises ImportError: if could not import the module
# :raises AttributeError: if could not find the symbol in the module
# """
#
# if isinstance(the_type, string):
# the_type = import_symbol(the_type)
# if not isclass(the_type):
# raise ValueError('{} is not a type'.format(the_type))
#
# if not isinstance(value, the_type):
# raise TypeError('not an instance of {}: {}'.format(type_name(the_type),
# type_name(type(value))))
#
# Path: ronin/utils/collections.py
# class StrictList(list):
# """
# A list that raises :class:`~exceptions.TypeError` exceptions when objects of the wrong type are
# inserted.
#
# :param items: initial list
# :type items: list
# :param value_type: type(s) required for list values
# :type value_type: :obj:`type` or :obj:`str` or (:obj:`type` or :obj:`str`)
# :param wrapper_function: calls this optional function on all values before added to the list
# :type wrapper_function: ~types.FunctionType
# :param unwrapper_function: calls this optional function on all values when retrieved from the
# list
# :type unwrapper_function: ~types.FunctionType
# """
#
# def __init__(self, items=None, value_type=None, wrapper_function=None, unwrapper_function=None):
# super(StrictList, self).__init__()
# if isinstance(items, StrictList):
# self.value_type = items.value_type
# self.wrapper_function = items.wrapper_function
# self.unwrapper_function = items.unwrapper_function
# self.value_type = _convert_type(value_type)
# self.wrapper_function = wrapper_function
# self.unwrapper_function = unwrapper_function
# if items:
# for item in items:
# self.append(item)
#
# def _wrap(self, value):
# if (self.value_type is not None) and (not isinstance(value, self.value_type)):
# raise TypeError('value must be a "{}": {!r}'.format(type_name(self.value_type), value))
# if self.wrapper_function is not None:
# value = self.wrapper_function(value)
# return value
#
# def _unwrap(self, value):
# if self.unwrapper_function is not None:
# value = self.unwrapper_function(value)
# return value
#
# def __getitem__(self, index):
# value = super(StrictList, self).__getitem__(index)
# value = self._unwrap(value)
# return value
#
# def __setitem__(self, index, value):
# value = self._wrap(value)
# return super(StrictList, self).__setitem__(index, value)
#
# def __iadd__(self, values):
# values = [self._wrap(v) for v in values]
# return super(StrictList, self).__iadd__(values)
#
# def append(self, value):
# value = self._wrap(value)
# return super(StrictList, self).append(value)
#
# def extend(self, values):
# values = [self._wrap(v) for v in values]
# return super(StrictList, self).extend(values)
#
# def insert(self, index, value):
# value = self._wrap(value)
# return super(StrictList, self).insert(index, value)
#
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
, which may include functions, classes, or code. Output only the next line. | self.extensions = StrictList(value_type=Extension) |
Given the code snippet: <|code_start|>
class Executor(object):
"""
Base class for executors.
:ivar command: command
:vartype command: str or ~types.FunctionType
:ivar command_types: command types supported (used by extensions)
:vartype command_types: [:obj:`str`]
:ivar output_extension: when calculating outputs, change extension to this
:vartype output_extension: str or ~types.FunctionType
:ivar output_prefix: when calculating outputs, prefix this to filename
:vartype output_prefix: str or ~types.FunctionType
:ivar hooks: called when generating the Ninja file
:vartype hooks: [:obj:`~types.FunctionType`]
"""
def __init__(self):
self.command = None
self.command_types = StrictList(value_type=str)
self.output_extension = None
self.output_prefix = None
self.output_type = 'binary'
self.hooks = StrictList(value_type='types.FunctionType')
self._deps_file = None
self._deps_type = None
def write_command(self, f, argument_filter=None):
for hook in self.hooks:
hook(self)
<|code_end|>
, generate the next line using the imports in this file:
from .utils.strings import stringify, join_later
from .utils.collections import StrictList
from io import StringIO
and context (functions, classes, or occasionally code) from other files:
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
#
# def join_later(values, separator=' '):
# """
# Creates a lambda that calls :func:`stringify_list` and joins the results on ``separator``.
#
# :param values: values
# :type values: []
# :param separator: separator
# :type separator: str|FunctionType
# :returns: lambda returning the joined string
# :rtype: FunctionType
# """
#
# return lambda _: stringify(separator).join(stringify_list(values))
#
# Path: ronin/utils/collections.py
# class StrictList(list):
# """
# A list that raises :class:`~exceptions.TypeError` exceptions when objects of the wrong type are
# inserted.
#
# :param items: initial list
# :type items: list
# :param value_type: type(s) required for list values
# :type value_type: :obj:`type` or :obj:`str` or (:obj:`type` or :obj:`str`)
# :param wrapper_function: calls this optional function on all values before added to the list
# :type wrapper_function: ~types.FunctionType
# :param unwrapper_function: calls this optional function on all values when retrieved from the
# list
# :type unwrapper_function: ~types.FunctionType
# """
#
# def __init__(self, items=None, value_type=None, wrapper_function=None, unwrapper_function=None):
# super(StrictList, self).__init__()
# if isinstance(items, StrictList):
# self.value_type = items.value_type
# self.wrapper_function = items.wrapper_function
# self.unwrapper_function = items.unwrapper_function
# self.value_type = _convert_type(value_type)
# self.wrapper_function = wrapper_function
# self.unwrapper_function = unwrapper_function
# if items:
# for item in items:
# self.append(item)
#
# def _wrap(self, value):
# if (self.value_type is not None) and (not isinstance(value, self.value_type)):
# raise TypeError('value must be a "{}": {!r}'.format(type_name(self.value_type), value))
# if self.wrapper_function is not None:
# value = self.wrapper_function(value)
# return value
#
# def _unwrap(self, value):
# if self.unwrapper_function is not None:
# value = self.unwrapper_function(value)
# return value
#
# def __getitem__(self, index):
# value = super(StrictList, self).__getitem__(index)
# value = self._unwrap(value)
# return value
#
# def __setitem__(self, index, value):
# value = self._wrap(value)
# return super(StrictList, self).__setitem__(index, value)
#
# def __iadd__(self, values):
# values = [self._wrap(v) for v in values]
# return super(StrictList, self).__iadd__(values)
#
# def append(self, value):
# value = self._wrap(value)
# return super(StrictList, self).append(value)
#
# def extend(self, values):
# values = [self._wrap(v) for v in values]
# return super(StrictList, self).extend(values)
#
# def insert(self, index, value):
# value = self._wrap(value)
# return super(StrictList, self).insert(index, value)
. Output only the next line. | f.write(stringify(self.command)) |
Predict the next line for this snippet: <|code_start|> if to_filter and argument_filter:
argument = argument_filter(argument)
if append:
if argument not in arguments:
arguments.append(argument)
else:
arguments.remove(argument)
if arguments:
f.write(' ')
f.write(' '.join(arguments))
def add_argument(self, *value):
self._argument(True, True, *value)
def add_argument_unfiltered(self, *value):
self._argument(True, False, *value)
def remove_argument(self, *value):
self._argument(False, True, *value)
def remove_argument_unfiltered(self, *value):
self._argument(False, False, *value)
def _argument(self, append, to_filter, *value):
l = len(value)
if l == 0:
return
elif l == 1:
value = value[0]
else:
<|code_end|>
with the help of current file imports:
from .utils.strings import stringify, join_later
from .utils.collections import StrictList
from io import StringIO
and context from other files:
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
#
# def join_later(values, separator=' '):
# """
# Creates a lambda that calls :func:`stringify_list` and joins the results on ``separator``.
#
# :param values: values
# :type values: []
# :param separator: separator
# :type separator: str|FunctionType
# :returns: lambda returning the joined string
# :rtype: FunctionType
# """
#
# return lambda _: stringify(separator).join(stringify_list(values))
#
# Path: ronin/utils/collections.py
# class StrictList(list):
# """
# A list that raises :class:`~exceptions.TypeError` exceptions when objects of the wrong type are
# inserted.
#
# :param items: initial list
# :type items: list
# :param value_type: type(s) required for list values
# :type value_type: :obj:`type` or :obj:`str` or (:obj:`type` or :obj:`str`)
# :param wrapper_function: calls this optional function on all values before added to the list
# :type wrapper_function: ~types.FunctionType
# :param unwrapper_function: calls this optional function on all values when retrieved from the
# list
# :type unwrapper_function: ~types.FunctionType
# """
#
# def __init__(self, items=None, value_type=None, wrapper_function=None, unwrapper_function=None):
# super(StrictList, self).__init__()
# if isinstance(items, StrictList):
# self.value_type = items.value_type
# self.wrapper_function = items.wrapper_function
# self.unwrapper_function = items.unwrapper_function
# self.value_type = _convert_type(value_type)
# self.wrapper_function = wrapper_function
# self.unwrapper_function = unwrapper_function
# if items:
# for item in items:
# self.append(item)
#
# def _wrap(self, value):
# if (self.value_type is not None) and (not isinstance(value, self.value_type)):
# raise TypeError('value must be a "{}": {!r}'.format(type_name(self.value_type), value))
# if self.wrapper_function is not None:
# value = self.wrapper_function(value)
# return value
#
# def _unwrap(self, value):
# if self.unwrapper_function is not None:
# value = self.unwrapper_function(value)
# return value
#
# def __getitem__(self, index):
# value = super(StrictList, self).__getitem__(index)
# value = self._unwrap(value)
# return value
#
# def __setitem__(self, index, value):
# value = self._wrap(value)
# return super(StrictList, self).__setitem__(index, value)
#
# def __iadd__(self, values):
# values = [self._wrap(v) for v in values]
# return super(StrictList, self).__iadd__(values)
#
# def append(self, value):
# value = self._wrap(value)
# return super(StrictList, self).append(value)
#
# def extend(self, values):
# values = [self._wrap(v) for v in values]
# return super(StrictList, self).extend(values)
#
# def insert(self, index, value):
# value = self._wrap(value)
# return super(StrictList, self).insert(index, value)
, which may contain function names, class names, or code. Output only the next line. | value = join_later(value) |
Given the following code snippet before the placeholder: <|code_start|>#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
class Executor(object):
"""
Base class for executors.
:ivar command: command
:vartype command: str or ~types.FunctionType
:ivar command_types: command types supported (used by extensions)
:vartype command_types: [:obj:`str`]
:ivar output_extension: when calculating outputs, change extension to this
:vartype output_extension: str or ~types.FunctionType
:ivar output_prefix: when calculating outputs, prefix this to filename
:vartype output_prefix: str or ~types.FunctionType
:ivar hooks: called when generating the Ninja file
:vartype hooks: [:obj:`~types.FunctionType`]
"""
def __init__(self):
self.command = None
<|code_end|>
, predict the next line using imports from the current file:
from .utils.strings import stringify, join_later
from .utils.collections import StrictList
from io import StringIO
and context including class names, function names, and sometimes code from other files:
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
#
# def join_later(values, separator=' '):
# """
# Creates a lambda that calls :func:`stringify_list` and joins the results on ``separator``.
#
# :param values: values
# :type values: []
# :param separator: separator
# :type separator: str|FunctionType
# :returns: lambda returning the joined string
# :rtype: FunctionType
# """
#
# return lambda _: stringify(separator).join(stringify_list(values))
#
# Path: ronin/utils/collections.py
# class StrictList(list):
# """
# A list that raises :class:`~exceptions.TypeError` exceptions when objects of the wrong type are
# inserted.
#
# :param items: initial list
# :type items: list
# :param value_type: type(s) required for list values
# :type value_type: :obj:`type` or :obj:`str` or (:obj:`type` or :obj:`str`)
# :param wrapper_function: calls this optional function on all values before added to the list
# :type wrapper_function: ~types.FunctionType
# :param unwrapper_function: calls this optional function on all values when retrieved from the
# list
# :type unwrapper_function: ~types.FunctionType
# """
#
# def __init__(self, items=None, value_type=None, wrapper_function=None, unwrapper_function=None):
# super(StrictList, self).__init__()
# if isinstance(items, StrictList):
# self.value_type = items.value_type
# self.wrapper_function = items.wrapper_function
# self.unwrapper_function = items.unwrapper_function
# self.value_type = _convert_type(value_type)
# self.wrapper_function = wrapper_function
# self.unwrapper_function = unwrapper_function
# if items:
# for item in items:
# self.append(item)
#
# def _wrap(self, value):
# if (self.value_type is not None) and (not isinstance(value, self.value_type)):
# raise TypeError('value must be a "{}": {!r}'.format(type_name(self.value_type), value))
# if self.wrapper_function is not None:
# value = self.wrapper_function(value)
# return value
#
# def _unwrap(self, value):
# if self.unwrapper_function is not None:
# value = self.unwrapper_function(value)
# return value
#
# def __getitem__(self, index):
# value = super(StrictList, self).__getitem__(index)
# value = self._unwrap(value)
# return value
#
# def __setitem__(self, index, value):
# value = self._wrap(value)
# return super(StrictList, self).__setitem__(index, value)
#
# def __iadd__(self, values):
# values = [self._wrap(v) for v in values]
# return super(StrictList, self).__iadd__(values)
#
# def append(self, value):
# value = self._wrap(value)
# return super(StrictList, self).append(value)
#
# def extend(self, values):
# values = [self._wrap(v) for v in values]
# return super(StrictList, self).extend(values)
#
# def insert(self, index, value):
# value = self._wrap(value)
# return super(StrictList, self).insert(index, value)
. Output only the next line. | self.command_types = StrictList(value_type=str) |
Using the snippet: <|code_start|> Human-readable name of type(s). Built-in types will avoid the "__builtin__" prefix.
Tuples are always handled as a join of "|".
:param the_type: type(s)
:type the_type: type|(type)
:returns: name of type(s)
:rtype: str
"""
if isinstance(the_type, tuple):
return '|'.join([type_name(v) for v in the_type])
module = to_str(the_type.__module__)
name = to_str(the_type.__name__)
return name if module == '__builtin__' else '{}.{}'.format(module, name)
def verify_type(value, the_type):
"""
Raises :class:`TypeError` if the value is not an instance of the type.
:param value: value
:param the_type: type or type name
:type the_type: type|str
:raises TypeError: if ``value`` is not an instance of ``the_type``
:raises ~exceptions.ValueError: if ``the_type`` is invalid
:raises ImportError: if could not import the module
:raises AttributeError: if could not find the symbol in the module
"""
<|code_end|>
, determine the next line of code. You have imports:
from .unicode import string, to_str
from inspect import isclass
and context (class names, function names, or code) available:
# Path: ronin/utils/unicode.py
. Output only the next line. | if isinstance(the_type, string): |
Using the snippet: <|code_start|> """
Imports a symbol based on its fully qualified name.
:param name: symbol name
:type name: str
:returns: symbol
:raises ImportError: if could not import the module
:raises AttributeError: if could not find the symbol in the module
"""
if name and ('.' in name):
module_name, name = name.rsplit('.', 1)
return getattr(__import__(module_name, fromlist=[name], level=0), name)
raise ImportError('import not found: {}'.format(name))
def type_name(the_type):
"""
Human-readable name of type(s). Built-in types will avoid the "__builtin__" prefix.
Tuples are always handled as a join of "|".
:param the_type: type(s)
:type the_type: type|(type)
:returns: name of type(s)
:rtype: str
"""
if isinstance(the_type, tuple):
return '|'.join([type_name(v) for v in the_type])
<|code_end|>
, determine the next line of code. You have imports:
from .unicode import string, to_str
from inspect import isclass
and context (class names, function names, or code) available:
# Path: ronin/utils/unicode.py
. Output only the next line. | module = to_str(the_type.__module__) |
Using the snippet: <|code_start|>def configure_platform(prefixes=None, which_command=None):
"""
Configures the current context's platform support.
:param prefixes: overrides for the default platform prefixes; unspecified keys will remain
unchanged from their defaults
:type prefixes: {str: str|FunctionType}
:param which_command: absolute path to :func:`which` command; defaults to "/usr/bin/which"
:type which_command: str|FunctionType
"""
with current_context(False) as ctx:
ctx.platform.prefixes = DEFAULT_PLATFORM_PREFIXES.copy()
if platform_prefixes:
ctx.platform.prefixes.update(prefixes)
ctx.platform.which_command = which_command or DEFAULT_WHICH_COMMAND
def platform_command(command, platform):
"""
The command prefixed for the platform, from :func:`platform_prefixes`.
:param command: command
:type command: str|FunctionType
:param platform: platform
:type platform: str|FunctionType
:returns: prefixed command
:rtype: str
"""
<|code_end|>
, determine the next line of code. You have imports:
from .strings import stringify
from ..contexts import current_context
from subprocess import check_output, CalledProcessError
import sys, platform
and context (class names, function names, or code) available:
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
#
# Path: ronin/contexts.py
# def current_context(immutable=True):
# """
# Returns the current context if there is one, otherwise raises :class:`NoContextException`.
#
# By default, the context will be treated as immutable.
#
# :param immutable: set to False in order to allow changes to the context
# :type immutable: bool
# :returns: current context
# :rtype: :class:`Context`
# """
#
# ctx = Context._peek_thread_local()
# if ctx is None:
# raise NoContextException()
# return Context(ctx, True) if immutable else ctx
. Output only the next line. | command = stringify(command) |
Next line prediction: <|code_start|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from __future__ import absolute_import # so we can import 'platform'
DEFAULT_WHICH_COMMAND = '/usr/bin/which'
DEFAULT_PLATFORM_PREFIXES = {
'linux64': 'x86_64-linux-gnu-',
'linux32': 'x86_64-linux-gnu-', #'i686-linux-gnu-',
'win64': 'x86_64-w64-mingw32-',
'win32': 'i686-w64-mingw32-'}
def configure_platform(prefixes=None, which_command=None):
"""
Configures the current context's platform support.
:param prefixes: overrides for the default platform prefixes; unspecified keys will remain
unchanged from their defaults
:type prefixes: {str: str|FunctionType}
:param which_command: absolute path to :func:`which` command; defaults to "/usr/bin/which"
:type which_command: str|FunctionType
"""
<|code_end|>
. Use current file imports:
(from .strings import stringify
from ..contexts import current_context
from subprocess import check_output, CalledProcessError
import sys, platform)
and context including class names, function names, or small code snippets from other files:
# Path: ronin/utils/strings.py
# def stringify(value):
# """
# Casts the value to a Unicode string. If the value is a function, calls it using
# :func:`ronin.contexts.current_context` as its only argument, and recurses until a
# non-FunctionType value is returned.
#
# None values are preserved, whether None is directly sent to this function or is the return
# value of function argument.
#
# This function is the heart of Rōnin's deferred value capability, as it allows lambdas to be
# passed around instead of strings.
#
# :param value: value or None
# :type value: str|FunctionType
# :returns: stringified value or None
# :rtype: str
# """
#
# if value is None:
# return None
# elif hasattr(value, '__call__'):
# with current_context() as ctx:
# value = value(ctx)
# return stringify(value)
# else:
# try:
# return to_str(value)
# except UnicodeDecodeError:
# return str(value).decode(_ENCODING)
#
# Path: ronin/contexts.py
# def current_context(immutable=True):
# """
# Returns the current context if there is one, otherwise raises :class:`NoContextException`.
#
# By default, the context will be treated as immutable.
#
# :param immutable: set to False in order to allow changes to the context
# :type immutable: bool
# :returns: current context
# :rtype: :class:`Context`
# """
#
# ctx = Context._peek_thread_local()
# if ctx is None:
# raise NoContextException()
# return Context(ctx, True) if immutable else ctx
. Output only the next line. | with current_context(False) as ctx: |
Given the following code snippet before the placeholder: <|code_start|>)
RUNTIMES = {
"threading": pytest.param(
Runtime(
name="threading",
actor_class=ThreadingActor,
event_class=threading.Event,
future_class=ThreadingFuture,
sleep_func=time.sleep,
),
id="threading",
)
}
@pytest.fixture(scope="session", params=RUNTIMES.values())
def runtime(request):
return request.param
@pytest.fixture
def stop_all():
yield
ActorRegistry.stop_all()
@pytest.fixture
def log_handler():
<|code_end|>
, predict the next line using imports from the current file:
import logging
import threading
import time
import pytest
from collections import namedtuple
from pykka import ActorRegistry, ThreadingActor, ThreadingFuture
from tests.log_handler import PykkaTestLogHandler
and context including class names, function names, and sometimes code from other files:
# Path: tests/log_handler.py
# class PykkaTestLogHandler(logging.Handler):
# def __init__(self, *args, **kwargs):
# self.lock = threading.RLock()
# with self.lock:
# self.events = collections.defaultdict(threading.Event)
# self.messages = {}
# self.reset()
# logging.Handler.__init__(self, *args, **kwargs)
#
# def emit(self, record):
# with self.lock:
# level = record.levelname.lower()
# self.messages[level].append(record)
# self.events[level].set()
#
# def reset(self):
# with self.lock:
# for level in ("debug", "info", "warning", "error", "critical"):
# self.events[level].clear()
# self.messages[level] = []
#
# def wait_for_message(self, level, num_messages=1, timeout=5):
# """Wait until at least ``num_messages`` log messages have been emitted
# to the given log level."""
# deadline = time.time() + timeout
# while time.time() < deadline:
# with self.lock:
# if len(self.messages[level]) >= num_messages:
# return
# self.events[level].clear()
# self.events[level].wait(1)
# raise Exception(f"Timeout: Waited {timeout:d}s for log message")
. Output only the next line. | log_handler = PykkaTestLogHandler() |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
"""
An x11 bridge provides a secure/firewalled link between a desktop application and the host x11 server. In this case, we use XPRA to do the bridging.
::.
------------- -------------
|desktop app| <--/tmp/.X11-unix--> |xpra server| Untrusted
------------- -------------
^
| ~/.xpra
v
------------- -------------
| host | <--/tmp/.X11-unix--> |xpra client| Trusted
------------- -------------
This configuration involves 3 containers.
1) contains the untrusted desktop application
2) contains an untrusted xpra server
3) contains a trusted xpra client
I up-to-date version of xpra can be used, xpra need not be installed on the host.
"""
#external imports
#internal imports
<|code_end|>
with the help of current file imports:
import os
import time
import shutil
import errno
import sys
import hashlib
import subuserlib.verify
import subuserlib.subuser
from subuserlib.classes.service import Service
from collections import OrderedDict
from subuserlib.classes.permissionsAccepters.acceptPermissionsAtCLI import AcceptPermissionsAtCLI
and context from other files:
# Path: subuserlib/classes/service.py
# class Service(UserOwnedObject):
# __metaclass__ = abc.ABCMeta
#
# def __init__(self,user,subuser):
# self.__subuser = subuser
# UserOwnedObject.__init__(self,user)
#
# @abc.abstractmethod
# def start(self,serviceStatus):
# """
# Start the service. Block untill the service has started. Returns a modified service status dictionary with any service specific properties set.
# """
# pass
#
# @abc.abstractmethod
# def stop(self,serviceStatus):
# """
# Stop the service. Block untill the service has stopped.
# """
# pass
#
# @abc.abstractmethod
# def cleanUp(self):
# pass
#
# @abc.abstractmethod
# def isRunning(self,serviceStatus):
# """
# Returns True if the services is running.
# """
# pass
#
# def getLockfileDir(self):
# return os.path.join(self.user.config["lock-dir"],"services",self.__subuser.name)
#
# def getLockfilePath(self):
# return os.path.join(self.getLockfileDir(),self.name+".json")
#
# def removeLockFile(self):
# os.remove(self.getLockfilePath())
# try:
# os.rmdir(self.getLockfileDir())
# except OSError:
# pass
#
# def getLock(self):
# try:
# self.user.endUser.makedirs(self.getLockfileDir())
# except OSError as exception:
# if exception.errno != errno.EEXIST:
# raise
# while True:
# try:
# lockFd = open(self.getLockfilePath(),mode="r+")
# break
# except IOError:
# self.user.endUser.create_file(self.getLockfilePath())
# fcntl.flock(lockFd,fcntl.LOCK_EX)
# return lockFd
#
# def addClient(self):
# """
# Increase the services client counter, starting the service if necessary. Blocks untill the service is ready to accept the new client.
# """
# with self.getLock() as lockFile:
# try:
# serviceStatus = json.load(lockFile, object_pairs_hook=OrderedDict)
# except ValueError:
# serviceStatus = {}
# serviceStatus["client-counter"] = 0
# if serviceStatus["client-counter"] == 0 or not self.isRunning(serviceStatus):
# serviceStatus["client-counter"] = 0
# serviceStatus = self.start(serviceStatus)
# serviceStatus["client-counter"] = serviceStatus["client-counter"] + 1
# lockFile.seek(0)
# lockFile.truncate()
# json.dump(serviceStatus,lockFile)
# fcntl.flock(lockFile,fcntl.LOCK_UN)
#
# def removeClient(self):
# """
# Decrease the services client counter, stopping the service if no longer necessary.
# """
# noMoreClients = False
# with self.getLock() as lockFile:
# try:
# lock_info = lockFile.read()
# serviceStatus = json.loads(lock_info, object_pairs_hook=OrderedDict)
# except ValueError as e:
# sys.exit("Error in lock file. Failed to release lock:\n"+lock_info+"\n"+str(e))
# serviceStatus["client-counter"] = serviceStatus["client-counter"] - 1
# if serviceStatus["client-counter"] < 0:
# raise RemoveClientException("The client-counter is already zero. Client cannot be removed! \n" + str(serviceStatus))
# if serviceStatus["client-counter"] == 0:
# self.stop(serviceStatus)
# noMoreClients = True
# lockFile.seek(0)
# lockFile.truncate()
# json.dump(serviceStatus,lockFile)
# if noMoreClients:
# self.removeLockFile()
# fcntl.flock(lockFile,fcntl.LOCK_UN)
#
# def cleanUpIfNotRunning(self):
# """
# Check if running.
# If not running, run cleanUp method.
# """
# with self.getLock() as lockFile:
# try:
# serviceStatus = json.load(lockFile, object_pairs_hook=OrderedDict)
# except ValueError:
# serviceStatus = {}
# serviceStatus["client-counter"] = 0
# if serviceStatus["client-counter"] == 0 or not self.isRunning(serviceStatus):
# self.cleanUp()
# self.removeLockFile()
# fcntl.flock(lockFile,fcntl.LOCK_UN)
, which may contain function names, class names, or code. Output only the next line. | class XpraX11Bridge(Service): |
Predict the next line for this snippet: <|code_start|> self.preArgs = []
self.subuserName = None
self.subuserArgs = []
self.consumedSubuserName = False
def readArg(self,arg):
if not self.consumedSubuserName:
if arg.startswith("-"):
self.preArgs.append(arg)
else:
self.subuserName = arg
self.consumedSubuserName = True
else:
self.subuserArgs.append(arg)
#################################################################################################
@subuserlib.profile.do_cprofile
def runCommand(args):
preArgs = []
argParser = ArgParser()
for arg in args:
argParser.readArg(arg)
if not argParser.consumedSubuserName:
print("Subuser name not listed.")
parseCliArgs(["--help"])
options,_ = parseCliArgs(argParser.preArgs)
<|code_end|>
with the help of current file imports:
import sys
import optparse
import os
import subuserlib.commandLineArguments
import subuserlib.profile
from subuserlib.classes.user import User
and context from other files:
# Path: subuserlib/classes/user.py
# class User(object):
# """
# This class provides a "base" User object used by subuser. This is the stem of a tree like data structure which holds all of the various objects owned by a given user.
#
# You create a new User object by passing the username and home dir of the user.
#
# >>> import subuserlib.classes.user
# >>> u = subuserlib.classes.user.User(name="root",homeDir="/root/")
# >>> u.homeDir
# '/root/'
# """
# def __init__(self,name=None,homeDir=None,_locked=False):
# self.__config = None
# self.__registry = None
# self.__installedImages = None
# self.__dockerDaemon = None
# self.__runtimeCache = None
# self.__operation = None
# self._has_lock = _locked
# self.name = name
# if homeDir:
# self.homeDir = homeDir
# elif test.testing:
# self.homeDir = os.getcwd()
# else:
# self.homeDir = os.path.expanduser("~")
# self.endUser = EndUser(self)
#
# @property
# def config(self):
# """
# Get the user's :doc:`Config <config>` object.
#
# Note: the user's config will be loaded the first time this is called.
# """
# if self.__config == None:
# self.__config = config.Config(self)
# return self.__config
#
# @property
# def registry(self):
# """
# Get the user's subuser :doc:`Registry <registry>`.
#
# Note: the registry will be loaded the first time this is called.
# """
# if self.__registry == None:
# self.__registry = registry.Registry(self)
# self.__registry.ensureGitRepoInitialized()
# return self.__registry
#
# @registry.setter
# def registry(self, registry):
# self.__registry = registry
#
# def reloadRegistry(self):
# """
# Reload registry from disk.
# """
# self.__registry = None
#
# @property
# def installedImages(self):
# """
# Get the user's :doc:`InstalledImages <installed-images>` list.
#
# Note: the installed images list will be loaded the first time this is called.
# """
# if self.__installedImages == None:
# self.__installedImages = installedImages.InstalledImages(self)
# return self.__installedImages
#
# @property
# def dockerDaemon(self):
# """
# Get the :doc:`DockerDaemon <docker>` object. You will use this to communicate with the Docker daemon.
# """
# if self.__dockerDaemon == None:
# self.__dockerDaemon = dockerDaemon.DockerDaemon(self)
# return self.__dockerDaemon
#
# @property
# def operation(self):
# """
# Get the :doc:`Operation <operation>` object. This object contains runtime data relating to the current "operation". This includes image building configuration data as well as UX options.
# """
# if self.__operation == None:
# self.__operation = Operation(self)
# return self.__operation
, which may contain function names, class names, or code. Output only the next line. | user = User() |
Continue the code snippet: <|code_start|>#!/usr/bin/python3
# -*- coding: utf-8 -*-
#external imports
#internal imports
def parseCliArgs(realArgs):
usage = "usage: subuser version"
description = """Prints subuser's version and other usefull debugging info.
"""
parser = optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
parser.add_option("--json",dest="json",action="store_true",default=False,help="Display results in JSON format.")
return parser.parse_args(args=realArgs)
@subuserlib.profile.do_cprofile
def runCommand(realArgs):
"""
>>> import version #import self
>>> version.runCommand([])
Subuser version: 0.5
Docker info:
Foo: bar
"""
<|code_end|>
. Use current file imports:
import json
import sys
import optparse
import subuserlib.version
import subuserlib.commandLineArguments
import subuserlib.profile
from subuserlib.classes.user import User
and context (classes, functions, or code) from other files:
# Path: subuserlib/classes/user.py
# class User(object):
# """
# This class provides a "base" User object used by subuser. This is the stem of a tree like data structure which holds all of the various objects owned by a given user.
#
# You create a new User object by passing the username and home dir of the user.
#
# >>> import subuserlib.classes.user
# >>> u = subuserlib.classes.user.User(name="root",homeDir="/root/")
# >>> u.homeDir
# '/root/'
# """
# def __init__(self,name=None,homeDir=None,_locked=False):
# self.__config = None
# self.__registry = None
# self.__installedImages = None
# self.__dockerDaemon = None
# self.__runtimeCache = None
# self.__operation = None
# self._has_lock = _locked
# self.name = name
# if homeDir:
# self.homeDir = homeDir
# elif test.testing:
# self.homeDir = os.getcwd()
# else:
# self.homeDir = os.path.expanduser("~")
# self.endUser = EndUser(self)
#
# @property
# def config(self):
# """
# Get the user's :doc:`Config <config>` object.
#
# Note: the user's config will be loaded the first time this is called.
# """
# if self.__config == None:
# self.__config = config.Config(self)
# return self.__config
#
# @property
# def registry(self):
# """
# Get the user's subuser :doc:`Registry <registry>`.
#
# Note: the registry will be loaded the first time this is called.
# """
# if self.__registry == None:
# self.__registry = registry.Registry(self)
# self.__registry.ensureGitRepoInitialized()
# return self.__registry
#
# @registry.setter
# def registry(self, registry):
# self.__registry = registry
#
# def reloadRegistry(self):
# """
# Reload registry from disk.
# """
# self.__registry = None
#
# @property
# def installedImages(self):
# """
# Get the user's :doc:`InstalledImages <installed-images>` list.
#
# Note: the installed images list will be loaded the first time this is called.
# """
# if self.__installedImages == None:
# self.__installedImages = installedImages.InstalledImages(self)
# return self.__installedImages
#
# @property
# def dockerDaemon(self):
# """
# Get the :doc:`DockerDaemon <docker>` object. You will use this to communicate with the Docker daemon.
# """
# if self.__dockerDaemon == None:
# self.__dockerDaemon = dockerDaemon.DockerDaemon(self)
# return self.__dockerDaemon
#
# @property
# def operation(self):
# """
# Get the :doc:`Operation <operation>` object. This object contains runtime data relating to the current "operation". This includes image building configuration data as well as UX options.
# """
# if self.__operation == None:
# self.__operation = Operation(self)
# return self.__operation
. Output only the next line. | user = User() |
Given snippet: <|code_start|>#!/usr/bin/python3
# -*- coding: utf-8 -*-
#external imports
#internal imports
def parseCliArgs(realArgs):
usage = "usage: subuser ps"
description = """ List running subusers.
"""
parser=optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
parser.add_option("--internal",dest="internal",action="store_true",default=False,help="Include internal subusers in list.")
return parser.parse_args(args=realArgs)
@subuserlib.profile.do_cprofile
def runCommand(realArgs):
options,args = parseCliArgs(realArgs)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import optparse
import json
import os
import uuid
import subuserlib.commandLineArguments
import subuserlib.profile
from subuserlib.classes.user import User
and context:
# Path: subuserlib/classes/user.py
# class User(object):
# """
# This class provides a "base" User object used by subuser. This is the stem of a tree like data structure which holds all of the various objects owned by a given user.
#
# You create a new User object by passing the username and home dir of the user.
#
# >>> import subuserlib.classes.user
# >>> u = subuserlib.classes.user.User(name="root",homeDir="/root/")
# >>> u.homeDir
# '/root/'
# """
# def __init__(self,name=None,homeDir=None,_locked=False):
# self.__config = None
# self.__registry = None
# self.__installedImages = None
# self.__dockerDaemon = None
# self.__runtimeCache = None
# self.__operation = None
# self._has_lock = _locked
# self.name = name
# if homeDir:
# self.homeDir = homeDir
# elif test.testing:
# self.homeDir = os.getcwd()
# else:
# self.homeDir = os.path.expanduser("~")
# self.endUser = EndUser(self)
#
# @property
# def config(self):
# """
# Get the user's :doc:`Config <config>` object.
#
# Note: the user's config will be loaded the first time this is called.
# """
# if self.__config == None:
# self.__config = config.Config(self)
# return self.__config
#
# @property
# def registry(self):
# """
# Get the user's subuser :doc:`Registry <registry>`.
#
# Note: the registry will be loaded the first time this is called.
# """
# if self.__registry == None:
# self.__registry = registry.Registry(self)
# self.__registry.ensureGitRepoInitialized()
# return self.__registry
#
# @registry.setter
# def registry(self, registry):
# self.__registry = registry
#
# def reloadRegistry(self):
# """
# Reload registry from disk.
# """
# self.__registry = None
#
# @property
# def installedImages(self):
# """
# Get the user's :doc:`InstalledImages <installed-images>` list.
#
# Note: the installed images list will be loaded the first time this is called.
# """
# if self.__installedImages == None:
# self.__installedImages = installedImages.InstalledImages(self)
# return self.__installedImages
#
# @property
# def dockerDaemon(self):
# """
# Get the :doc:`DockerDaemon <docker>` object. You will use this to communicate with the Docker daemon.
# """
# if self.__dockerDaemon == None:
# self.__dockerDaemon = dockerDaemon.DockerDaemon(self)
# return self.__dockerDaemon
#
# @property
# def operation(self):
# """
# Get the :doc:`Operation <operation>` object. This object contains runtime data relating to the current "operation". This includes image building configuration data as well as UX options.
# """
# if self.__operation == None:
# self.__operation = Operation(self)
# return self.__operation
which might include code, classes, or functions. Output only the next line. | user = User() |
Given snippet: <|code_start|> archive.seek(0)
def readAndPrintStreamingBuildStatus(user,response):
jsonSegmentBytes = b''
output = b''
byte = response.read(1)
while byte:
jsonSegmentBytes += byte
output += byte
byte = response.read(1)
try:
lineDict = json.loads(jsonSegmentBytes.decode("utf-8"))
if lineDict == {}:
pass
elif "stream" in lineDict:
user.registry.log(lineDict["stream"])
elif "status" in lineDict:
user.registry.log(lineDict["status"])
elif "errorDetail" in lineDict:
raise exceptions.ImageBuildException("Build error:"+lineDict["errorDetail"]["message"]+"\n"+response.read().decode())
else:
raise exceptions.ImageBuildException("Build error:"+jsonSegmentBytes.decode("utf-8")+"\n"+response.read().decode("utf-8"))
jsonSegmentBytes = b''
except ValueError:
pass
output = output.decode("utf-8")
if not output.strip().startswith("{"):
user.registry.log(output)
return output
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import urllib
import tarfile
import os
import tempfile
import fnmatch
import re
import json
import sys
import httplib
import http.client
import StringIO
import io
import subuserlib.docker
import subuserlib.test
import subuserlib.classes.exceptions as exceptions
from subuserlib.classes.userOwnedObject import UserOwnedObject
from subuserlib.classes.uhttpConnection import UHTTPConnection
from subuserlib.classes.docker.container import Container
from subuserlib.classes.docker.mockDockerDaemon import MockDockerDaemon
and context:
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
# self.user = user
#
# Path: subuserlib/classes/uhttpConnection.py
# class UHTTPConnection(httplib.HTTPConnection):
# """
# Subclass of Python library HTTPConnection that uses a unix-domain socket.
# """
# def __init__(self, path):
# httplib.HTTPConnection.__init__(self, 'localhost')
# self.path = path
#
# def connect(self):
# sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# sock.connect(self.path)
# self.sock = sock
#
# Path: subuserlib/classes/docker/container.py
# class Container(UserOwnedObject):
# def __init__(self,user,containerId):
# self.id = containerId
# UserOwnedObject.__init__(self,user)
#
# def inspect(self):
# """
# Returns a dictionary of container properties.
# If the container no longer exists, return None.
# """
# self.user.dockerDaemon.getConnection().request("GET","/v1.13/containers/"+self.id+"/json")
# response = self.user.dockerDaemon.getConnection().getresponse()
# if not response.status == 200:
# response.read() # Read the response and discard it to prevent the server from getting locked up: https://stackoverflow.com/questions/3231543/python-httplib-responsenotready
# return None
# else:
# return json.loads(response.read().decode("utf-8"))
#
# def stop(self):
# self.user.dockerDaemon.getConnection().request("POST","/v1.13/containers/"+self.id+"/stop")
# response = self.user.dockerDaemon.getConnection().getresponse()
# response.read()
#
# def remove(self,force=False):
# queryParameters = {
# 'force': force
# }
# try:
# queryParametersString = urllib.urlencode(queryParameters)
# except AttributeError:
# queryParametersString = urllib.parse.urlencode(queryParameters) # Python 3
# self.user.dockerDaemon.getConnection().request("DELETE","/v1.13/containers/"+self.id+"?"+queryParametersString)
# response = self.user.dockerDaemon.getConnection().getresponse()
# response.read()
which might include code, classes, or functions. Output only the next line. | class DockerDaemon(UserOwnedObject): |
Continue the code snippet: <|code_start|> user.registry.log(lineDict["stream"])
elif "status" in lineDict:
user.registry.log(lineDict["status"])
elif "errorDetail" in lineDict:
raise exceptions.ImageBuildException("Build error:"+lineDict["errorDetail"]["message"]+"\n"+response.read().decode())
else:
raise exceptions.ImageBuildException("Build error:"+jsonSegmentBytes.decode("utf-8")+"\n"+response.read().decode("utf-8"))
jsonSegmentBytes = b''
except ValueError:
pass
output = output.decode("utf-8")
if not output.strip().startswith("{"):
user.registry.log(output)
return output
class DockerDaemon(UserOwnedObject):
def __init__(self,user):
self.__connection = None
self.__imagePropertiesCache = {}
UserOwnedObject.__init__(self,user)
def getConnection(self):
"""
Get an `HTTPConnection <https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection>`_ to the Docker daemon.
Note: You can find more info in the `Docker API docs <https://docs.docker.com/reference/api/docker_remote_api_v1.13/>`_
"""
if not self.__connection:
subuserlib.docker.getAndVerifyExecutable()
try:
<|code_end|>
. Use current file imports:
import urllib
import tarfile
import os
import tempfile
import fnmatch
import re
import json
import sys
import httplib
import http.client
import StringIO
import io
import subuserlib.docker
import subuserlib.test
import subuserlib.classes.exceptions as exceptions
from subuserlib.classes.userOwnedObject import UserOwnedObject
from subuserlib.classes.uhttpConnection import UHTTPConnection
from subuserlib.classes.docker.container import Container
from subuserlib.classes.docker.mockDockerDaemon import MockDockerDaemon
and context (classes, functions, or code) from other files:
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
# self.user = user
#
# Path: subuserlib/classes/uhttpConnection.py
# class UHTTPConnection(httplib.HTTPConnection):
# """
# Subclass of Python library HTTPConnection that uses a unix-domain socket.
# """
# def __init__(self, path):
# httplib.HTTPConnection.__init__(self, 'localhost')
# self.path = path
#
# def connect(self):
# sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# sock.connect(self.path)
# self.sock = sock
#
# Path: subuserlib/classes/docker/container.py
# class Container(UserOwnedObject):
# def __init__(self,user,containerId):
# self.id = containerId
# UserOwnedObject.__init__(self,user)
#
# def inspect(self):
# """
# Returns a dictionary of container properties.
# If the container no longer exists, return None.
# """
# self.user.dockerDaemon.getConnection().request("GET","/v1.13/containers/"+self.id+"/json")
# response = self.user.dockerDaemon.getConnection().getresponse()
# if not response.status == 200:
# response.read() # Read the response and discard it to prevent the server from getting locked up: https://stackoverflow.com/questions/3231543/python-httplib-responsenotready
# return None
# else:
# return json.loads(response.read().decode("utf-8"))
#
# def stop(self):
# self.user.dockerDaemon.getConnection().request("POST","/v1.13/containers/"+self.id+"/stop")
# response = self.user.dockerDaemon.getConnection().getresponse()
# response.read()
#
# def remove(self,force=False):
# queryParameters = {
# 'force': force
# }
# try:
# queryParametersString = urllib.urlencode(queryParameters)
# except AttributeError:
# queryParametersString = urllib.parse.urlencode(queryParameters) # Python 3
# self.user.dockerDaemon.getConnection().request("DELETE","/v1.13/containers/"+self.id+"?"+queryParametersString)
# response = self.user.dockerDaemon.getConnection().getresponse()
# response.read()
. Output only the next line. | self.__connection = UHTTPConnection("/var/run/docker.sock") |
Continue the code snippet: <|code_start|> def __init__(self,user):
self.__connection = None
self.__imagePropertiesCache = {}
UserOwnedObject.__init__(self,user)
def getConnection(self):
"""
Get an `HTTPConnection <https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection>`_ to the Docker daemon.
Note: You can find more info in the `Docker API docs <https://docs.docker.com/reference/api/docker_remote_api_v1.13/>`_
"""
if not self.__connection:
subuserlib.docker.getAndVerifyExecutable()
try:
self.__connection = UHTTPConnection("/var/run/docker.sock")
except PermissionError as e:
sys.exit("Permission error (%s) connecting to the docker socket. This usually happens when you've added yourself as a member of the docker group but haven't logged out/in again before starting subuser."% str(e))
return self.__connection
def getContainers(self,onlyRunning=False):
queryParameters = {'all': not onlyRunning}
queryParametersString = urllib.parse.urlencode(queryParameters)
self.getConnection().request("GET","/v1.13/containers/json?"+queryParametersString)
response = self.getConnection().getresponse()
if response.status == 200:
return json.loads(response.read().decode("utf-8"))
else:
return []
def getContainer(self,containerId):
<|code_end|>
. Use current file imports:
import urllib
import tarfile
import os
import tempfile
import fnmatch
import re
import json
import sys
import httplib
import http.client
import StringIO
import io
import subuserlib.docker
import subuserlib.test
import subuserlib.classes.exceptions as exceptions
from subuserlib.classes.userOwnedObject import UserOwnedObject
from subuserlib.classes.uhttpConnection import UHTTPConnection
from subuserlib.classes.docker.container import Container
from subuserlib.classes.docker.mockDockerDaemon import MockDockerDaemon
and context (classes, functions, or code) from other files:
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
# self.user = user
#
# Path: subuserlib/classes/uhttpConnection.py
# class UHTTPConnection(httplib.HTTPConnection):
# """
# Subclass of Python library HTTPConnection that uses a unix-domain socket.
# """
# def __init__(self, path):
# httplib.HTTPConnection.__init__(self, 'localhost')
# self.path = path
#
# def connect(self):
# sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# sock.connect(self.path)
# self.sock = sock
#
# Path: subuserlib/classes/docker/container.py
# class Container(UserOwnedObject):
# def __init__(self,user,containerId):
# self.id = containerId
# UserOwnedObject.__init__(self,user)
#
# def inspect(self):
# """
# Returns a dictionary of container properties.
# If the container no longer exists, return None.
# """
# self.user.dockerDaemon.getConnection().request("GET","/v1.13/containers/"+self.id+"/json")
# response = self.user.dockerDaemon.getConnection().getresponse()
# if not response.status == 200:
# response.read() # Read the response and discard it to prevent the server from getting locked up: https://stackoverflow.com/questions/3231543/python-httplib-responsenotready
# return None
# else:
# return json.loads(response.read().decode("utf-8"))
#
# def stop(self):
# self.user.dockerDaemon.getConnection().request("POST","/v1.13/containers/"+self.id+"/stop")
# response = self.user.dockerDaemon.getConnection().getresponse()
# response.read()
#
# def remove(self,force=False):
# queryParameters = {
# 'force': force
# }
# try:
# queryParametersString = urllib.urlencode(queryParameters)
# except AttributeError:
# queryParametersString = urllib.parse.urlencode(queryParameters) # Python 3
# self.user.dockerDaemon.getConnection().request("DELETE","/v1.13/containers/"+self.id+"?"+queryParametersString)
# response = self.user.dockerDaemon.getConnection().getresponse()
# response.read()
. Output only the next line. | return Container(self.user,containerId) |
Continue the code snippet: <|code_start|>#internal imports
class Config(userOwnedObject.UserOwnedObject, dict):
def __init__(self,user):
self.__delitem__ = None
self.__setitem__ = None
userOwnedObject.UserOwnedObject.__init__(self,user)
self._loadConfig()
def _getSubuserConfigPaths(self):
""" Returns a list of paths to config.json files in order that they should be looked in. """
configFileInHomeDir = os.path.join(self.user.homeDir,".subuser","config.json")
configFileInEtc = "/etc/subuser/config.json"
configFileInSubuserDir = paths.getSubuserDataFile("config.json")
return [configFileInHomeDir,configFileInEtc,configFileInSubuserDir]
def _expandPathsInConfig(self,config):
"""
Go through a freshly loaded config file and expand any environment variables in the paths.
"""
pathsToExpand = [
"bin-dir"
,"registry-dir"
,"installed-images-list"
,"locked-subusers-path"
,"subuser-home-dirs-dir"
,"repositories-dir"
,"runtime-cache"
,"lock-dir"
,"volumes-dir"]
<|code_end|>
. Use current file imports:
import os
from subuserlib.classes import userOwnedObject
from subuserlib import loadMultiFallbackJsonConfigFile
from subuserlib import paths
and context (classes, functions, or code) from other files:
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
#
# Path: subuserlib/loadMultiFallbackJsonConfigFile.py
# def filterOutNonExistantPaths(paths):
# def expandPathInDict(homeDir,pathAttribute,dictionary):
# def expandPathsInDict(homeDir,pathAttributes,dictionary):
# def getConfig(configFileHierarchy):
#
# Path: subuserlib/paths.py
# def upNDirsInPath(path,n):
# def getSubuserDir():
# def getSubuserExecutable():
# def getSubuserDataFile(filename):
. Output only the next line. | loadMultiFallbackJsonConfigFile.expandPathsInDict(self.user.homeDir,pathsToExpand,config) |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
"""
The Config class is used to hold user wide settings.
"""
#external imports
#internal imports
class Config(userOwnedObject.UserOwnedObject, dict):
def __init__(self,user):
self.__delitem__ = None
self.__setitem__ = None
userOwnedObject.UserOwnedObject.__init__(self,user)
self._loadConfig()
def _getSubuserConfigPaths(self):
""" Returns a list of paths to config.json files in order that they should be looked in. """
configFileInHomeDir = os.path.join(self.user.homeDir,".subuser","config.json")
configFileInEtc = "/etc/subuser/config.json"
<|code_end|>
with the help of current file imports:
import os
from subuserlib.classes import userOwnedObject
from subuserlib import loadMultiFallbackJsonConfigFile
from subuserlib import paths
and context from other files:
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
#
# Path: subuserlib/loadMultiFallbackJsonConfigFile.py
# def filterOutNonExistantPaths(paths):
# def expandPathInDict(homeDir,pathAttribute,dictionary):
# def expandPathsInDict(homeDir,pathAttributes,dictionary):
# def getConfig(configFileHierarchy):
#
# Path: subuserlib/paths.py
# def upNDirsInPath(path,n):
# def getSubuserDir():
# def getSubuserExecutable():
# def getSubuserDataFile(filename):
, which may contain function names, class names, or code. Output only the next line. | configFileInSubuserDir = paths.getSubuserDataFile("config.json") |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/python3
# -*- coding: utf-8 -*-
#external imports
#internal imports
####################################################
def parseCliArgs(realArgs):
usage = "usage: subuser repair [options]"
description = """
Repair your subuser installation.
This is usefull when migrating from one machine to another. You can copy your ~/.subuser folder to the new machine and run repair, and things should just work.
"""
parser = optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
parser.add_option("--accept",dest="accept",action="store_true",default=False,help="Acceppt permissions without asking.")
parser.add_option("--prompt",dest="prompt",action="store_true",default=False,help="Prompt before installing new images.")
parser.add_option("--no-build",dest="build",action="store_false",default=True,help="Don't build missing images.")
return parser.parse_args(args=realArgs)
@subuserlib.profile.do_cprofile
def runCommand(realArgs):
options,arguments=parseCliArgs(realArgs)
<|code_end|>
using the current file's imports:
import sys
import optparse
import subuserlib.verify
import subuserlib.commandLineArguments
import subuserlib.profile
from subuserlib.classes.user import LockedUser
from subuserlib.classes.permissionsAccepters.acceptPermissionsAtCLI import AcceptPermissionsAtCLI
and any relevant context from other files:
# Path: subuserlib/classes/user.py
# class LockedUser():
# def __init__(self,name=None,homeDir=None):
# self.lock = None
# self.__user = User(name=name,homeDir=homeDir,_locked=True)
#
# def __enter__(self):
# try:
# self.__user.endUser.makedirs(self.__user.config["lock-dir"])
# except OSError as exception:
# if exception.errno != errno.EEXIST:
# raise
# try:
# self.lock = subuserlib.lock.getLock(self.__user.endUser.get_file(os.path.join(self.__user.config["lock-dir"],"registry.lock"),'w'),timeout=1)
# self.lock.__enter__()
# except IOError as e:
# if e.errno != errno.EINTR:
# raise e
# sys.exit("Another subuser process is currently running and has a lock on the registry. Please try again later.")
# return self.__user
#
# def __exit__(self, type, value, traceback):
# self.lock.__exit__()
#
# Path: subuserlib/classes/permissionsAccepters/acceptPermissionsAtCLI.py
# class AcceptPermissionsAtCLI(PermissionsAccepter,UserOwnedObject):
# def __init__(self,user,alwaysAccept = False):
# self.alwaysAccept = alwaysAccept
# UserOwnedObject.__init__(self,user)
#
# def accept(self,subuser,newDefaults,oldDefaults,userApproved):
# if userApproved is None:
# subuserlib.print.printWithoutCrashing(subuser.name+u": would like to have the following permissions:")
# newDefaults.describe()
# createNewPermissions = True
# else:
# createNewPermissions = False
# (removedPermissions,additionsAndChanges) = subuserlib.permissions.compare(newDefaults = newDefaults, oldDefaults=oldDefaults, userApproved=userApproved)
# if additionsAndChanges == {} and removedPermissions == [] and not subuser.wereEntryPointsExposedThisRun():
# return
# if not additionsAndChanges == {}:
# subuserlib.print.printWithoutCrashing(subuser.name+" would like to add/change the following permissions:")
# subuserlib.print.printWithoutCrashing(subuserlib.permissions.getDescription(additionsAndChanges))
# if not removedPermissions == []:
# subuserlib.print.printWithoutCrashing("")
# subuserlib.print.printWithoutCrashing(subuser.name+" no longer needs the following permissions:")
# removedPermisisonsDict = {}
# for removedPermission in removedPermissions:
# removedPermisisonsDict[removedPermission] = oldDefaults[removedPermission]
# subuserlib.print.printWithoutCrashing(subuserlib.permissions.getDescription(removedPermisisonsDict))
# if "entrypoints" in additionsAndChanges and subuser.entryPointsExposed:
# subuserlib.print.printWithoutCrashing(subuser.name+" would like to expose the following entrypoints to the system PATH:")
# for entrypoint in additionsAndChanges["entrypoints"].keys():
# subuserlib.print.printWithoutCrashing(entrypoint)
# if subuser.wereEntryPointsExposedThisRun():
# if subuser.permissions["entrypoints"]:
# subuserlib.print.printWithoutCrashing(subuser.name+" would like to expose the following entrypoints to the system PATH:")
# for entrypoint in subuser.permissions["entrypoints"].keys():
# subuserlib.print.printWithoutCrashing(entrypoint)
# else:
# subuserlib.print.printWithoutCrashing("Entrypoints marked to be exposed, but nothing to expose.")
# if additionsAndChanges == {} and removedPermissions == []:
# return
# options = OrderedDict([("A","Accept and apply changes")
# ,("E","Apply changes and edit result")
# ,("e","Ignore request and edit permissions by hand")
# ,("r","Reject permissions.")])
# if createNewPermissions:
# del options["e"]
# for option,description in options.items():
# subuserlib.print.printWithoutCrashing(option+" - "+description)
# if self.alwaysAccept:
# subuserlib.print.printWithoutCrashing("A")
# choice = "A"
# else:
# choice = None
# while not choice in options:
# try:
# choice = input("Please select an option:")
# except EOFError:
# choice = "r"
# if (choice == "A") or (choice == "E"):
# if createNewPermissions:
# subuser.createPermissions(newDefaults)
# else:
# subuser.permissions.applyChanges(removedPermissions,additionsAndChanges)
# subuser.permissions.save()
# if (choice == "E") or (choice == "e"):
# subuser.editPermissionsCLI()
# if choice == "r":
# if createNewPermissions:
# subuser.createPermissions(subuserlib.permissions.load(permissionsString="{}",logger=self.user.registry))
# subuser.permissions.save()
. Output only the next line. | lockedUser = LockedUser() |
Given the following code snippet before the placeholder: <|code_start|> subuserNamesWithNoImageSource = list(subusersWithNoImageSource.keys())
subuserNamesWithNoImageSource.sort()
registry.log(" ".join(subuserNamesWithNoImageSource))
registry.log("Unregistering any non-existant installed images.",2)
user.installedImages.unregisterNonExistantImages()
registry.cleanOutOldPermissions()
if op.subusers or subusersWithNoImageSource:
registry.setChanged(True)
registry.log("Loading and approving permissions...",verbosityLevel=2)
(failedSubusers,permissionParsingExceptions) = approvePermissions(op)
for exception in permissionParsingExceptions:
registry.log(str(exception))
registry.log("Permissions set...",verbosityLevel=2)
for failedSubuser in failedSubusers:
registry.log("New permissions for subuser "+failedSubuser.name+" were not accepted.",5)
try:
failedSubuser.permissions
except subuserlib.classes.subuser.SubuserHasNoPermissionsException:
registry.log("Deleting subuser "+failedSubuser.name+" as it has no permissions.",verbosityLevel=3)
del registry.subusers[failedSubuser.name]
op.subusers.remove(failedSubuser)
registry.log("Setting up service subusers...",verbosityLevel=3)
op.subusers += ensureServiceSubusersAreSetup(op.user,op.subusers)
op.subusers += ensureServiceSubusersAreSetup(op.user,subusersWithNoImageSource.values())
registry.log("Service subusers set up...",verbosityLevel=3)
registry.log("Building images...",verbosityLevel=3)
if op.build:
<|code_end|>
, predict the next line using imports from the current file:
import shutil
import os
import subuserlib.classes.exceptions as exceptions
import subuserlib.classes.subuser
from subuserlib.classes.installationTask import InstallationTask
and context including class names, function names, and sometimes code from other files:
# Path: subuserlib/classes/installationTask.py
# class InstallationTask(UserOwnedObject):
# def __init__(self,op):
# UserOwnedObject.__init__(self,op.user)
# self.op = op
# self.__upToDateImageSources = set()
# self.__outOfDateImageSources = set()
# self.__outOfDateSubusers = None
# self.__subusersWhosImagesFailedToBuild = set()
#
# def getOutOfDateSubusers(self):
# """
# Returns a list of subusers which are out of date or have no InstalledImage associated with them.
# """
# if self.__outOfDateSubusers is None:
# self.user.registry.log("Checking if images need to be updated or installed...")
# self.__outOfDateSubusers = set()
# for subuser in self.op.subusers:
# try:
# if (not subuser.locked) and (not (subuser.imageSource.getLatestInstalledImage() is None)):
# self.user.registry.log("Checking if subuser "+subuser.name+" is up to date.")
# for imageSource in getTargetLineage(subuser.imageSource):
# if imageSource in self.__upToDateImageSources:
# continue
# if imageSource in self.__outOfDateImageSources:
# upToDate = False
# else:
# upToDate = self.isUpToDate(imageSource)
# if upToDate:
# self.__upToDateImageSources.add(imageSource)
# else:
# self.__outOfDateImageSources.add(imageSource)
# self.__outOfDateSubusers.add(subuser)
# break
# if subuser.imageSource.getLatestInstalledImage() is None or subuser.imageId is None or not subuser.isImageInstalled():
# if subuser.locked:
# self.user.registry.log("Subuser "+subuser.name+" has no image. But is locked. Marking for installation anyways.")
# self.__outOfDateSubusers.add(subuser)
# except (exceptions.ImageBuildException, subuserlib.classes.subuser.NoImageSourceException) as e :
# self.user.registry.log(str(e))
# self.__subusersWhosImagesFailedToBuild.add(subuser)
# outOfDateSubusers = list(self.__outOfDateSubusers)
# outOfDateSubusers.sort(key=lambda s:s.name)
# return outOfDateSubusers
#
# def isUpToDate(self,imageSource):
# installedImage = imageSource.getLatestInstalledImage()
# if installedImage is None:
# return False
# if not installedImage.isDockerImageThere():
# return False
# targetLineage = getTargetLineage(imageSource)
# installedLineage = installedImage.getImageLineage()
# if not (len(targetLineage) == len(installedLineage)):
# return False
# sideBySideLineages = zip(installedLineage,targetLineage)
# for installed,target in sideBySideLineages:
# if target in self.__outOfDateImageSources:
# return False
# if not installed.imageId == target.getLatestInstalledImage().imageId:
# return False
# if not installedImage.imageSourceHash == imageSource.getHash():
# return False
# if self.op.checkForUpdatesExternally and installedImage.checkForUpdates():
# return False
# return True
#
# def updateOutOfDateSubusers(self):
# """
# Install new images for those subusers which are out of date.
# """
# parent = None
# for subuser in self.getOutOfDateSubusers():
# try:
# for imageSource in getTargetLineage(subuser.imageSource):
# if imageSource in self.__upToDateImageSources:
# parent = imageSource.getLatestInstalledImage().imageId
# elif imageSource in self.__outOfDateImageSources:
# parent = installImage(imageSource,parent=parent)
# self.__outOfDateImageSources.remove(imageSource)
# self.__upToDateImageSources.add(imageSource)
# else:
# if not self.isUpToDate(imageSource):
# parent = installImage(imageSource,parent=parent)
# else:
# parent = imageSource.getLatestInstalledImage().imageId
# self.__upToDateImageSources.add(imageSource)
# if not subuser.imageId == parent:
# subuser.imageId = parent
# subuser.user.registry.logChange("Installed new image <"+subuser.imageId+"> for subuser "+subuser.name)
# except exceptions.ImageBuildException as e:
# self.user.registry.log(str(e))
# self.__subusersWhosImagesFailedToBuild.add(subuser)
#
# def getSubusersWhosImagesFailedToBuild(self):
# return self.__subusersWhosImagesFailedToBuild
. Output only the next line. | installationTask = InstallationTask(op) |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
#external imports
#internal imports
def parseCliArgs(sysargs):
usage = "usage: subuser repository [options] [add|remove] NAME <URL>"
description = """Add or remove a new named repository.
- EXAMPLE
Add a new repository named foo with the URI https://www.example.com/repo.git.
$ subuser repository add foo https://www.example.com/repo.git
$ #You can also add a local repository:
$ subuser repository add local-foo file:///home/timothy/my-local-repo/
- EXAMPLE
Remove the repository named foo.
$subuser repository remove foo
"""
parser=optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
return parser.parse_args(args=sysargs)
@subuserlib.profile.do_cprofile
def runCommand(sysargs):
"""
Manage named subuser repositories.
"""
options,args = parseCliArgs(sysargs)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import optparse
import subuserlib.resolve
import subuserlib.repository
import subuserlib.commandLineArguments
import subuserlib.profile
from subuserlib.classes.user import LockedUser
and context:
# Path: subuserlib/classes/user.py
# class LockedUser():
# def __init__(self,name=None,homeDir=None):
# self.lock = None
# self.__user = User(name=name,homeDir=homeDir,_locked=True)
#
# def __enter__(self):
# try:
# self.__user.endUser.makedirs(self.__user.config["lock-dir"])
# except OSError as exception:
# if exception.errno != errno.EEXIST:
# raise
# try:
# self.lock = subuserlib.lock.getLock(self.__user.endUser.get_file(os.path.join(self.__user.config["lock-dir"],"registry.lock"),'w'),timeout=1)
# self.lock.__enter__()
# except IOError as e:
# if e.errno != errno.EINTR:
# raise e
# sys.exit("Another subuser process is currently running and has a lock on the registry. Please try again later.")
# return self.__user
#
# def __exit__(self, type, value, traceback):
# self.lock.__exit__()
which might include code, classes, or functions. Output only the next line. | lockedUser = LockedUser() |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""
Runtime environments which are prepared for subusers to run in.
"""
#external imports
#internal imports
def getRecursiveDirectoryContents(directory):
files = []
for (directory,_,fileList) in os.walk(directory):
for fileName in fileList:
files.append(os.path.join(directory,fileName))
return files
<|code_end|>
. Use current file imports:
(import sys
import collections
import os
import time
import binascii
import struct
import shutil
import subuserlib.test
from subuserlib.classes.userOwnedObject import UserOwnedObject)
and context including class names, function names, or small code snippets from other files:
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
# self.user = user
. Output only the next line. | class Runtime(UserOwnedObject): |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
"""
Each time you run a command such as `subuser subuser add foo xterm` you are preforming an operation which modifies the subuser registry and which builds images. Many of the steps are repeated for each operation and many of the configuration options are global to all operations. This class defines an operation "super-object" which attempts to collate all of these parameters in one place.
"""
#external imports
#internal imports
class Operation(UserOwnedObject):
def __init__(self,user):
UserOwnedObject.__init__(self,user)
<|code_end|>
, predict the immediate next line with the help of imports:
import json
from subuserlib.classes.userOwnedObject import UserOwnedObject
from subuserlib.classes.permissionsAccepters.acceptPermissionsAtCLI import AcceptPermissionsAtCLI
and context (classes, functions, sometimes code) from other files:
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
# self.user = user
#
# Path: subuserlib/classes/permissionsAccepters/acceptPermissionsAtCLI.py
# class AcceptPermissionsAtCLI(PermissionsAccepter,UserOwnedObject):
# def __init__(self,user,alwaysAccept = False):
# self.alwaysAccept = alwaysAccept
# UserOwnedObject.__init__(self,user)
#
# def accept(self,subuser,newDefaults,oldDefaults,userApproved):
# if userApproved is None:
# subuserlib.print.printWithoutCrashing(subuser.name+u": would like to have the following permissions:")
# newDefaults.describe()
# createNewPermissions = True
# else:
# createNewPermissions = False
# (removedPermissions,additionsAndChanges) = subuserlib.permissions.compare(newDefaults = newDefaults, oldDefaults=oldDefaults, userApproved=userApproved)
# if additionsAndChanges == {} and removedPermissions == [] and not subuser.wereEntryPointsExposedThisRun():
# return
# if not additionsAndChanges == {}:
# subuserlib.print.printWithoutCrashing(subuser.name+" would like to add/change the following permissions:")
# subuserlib.print.printWithoutCrashing(subuserlib.permissions.getDescription(additionsAndChanges))
# if not removedPermissions == []:
# subuserlib.print.printWithoutCrashing("")
# subuserlib.print.printWithoutCrashing(subuser.name+" no longer needs the following permissions:")
# removedPermisisonsDict = {}
# for removedPermission in removedPermissions:
# removedPermisisonsDict[removedPermission] = oldDefaults[removedPermission]
# subuserlib.print.printWithoutCrashing(subuserlib.permissions.getDescription(removedPermisisonsDict))
# if "entrypoints" in additionsAndChanges and subuser.entryPointsExposed:
# subuserlib.print.printWithoutCrashing(subuser.name+" would like to expose the following entrypoints to the system PATH:")
# for entrypoint in additionsAndChanges["entrypoints"].keys():
# subuserlib.print.printWithoutCrashing(entrypoint)
# if subuser.wereEntryPointsExposedThisRun():
# if subuser.permissions["entrypoints"]:
# subuserlib.print.printWithoutCrashing(subuser.name+" would like to expose the following entrypoints to the system PATH:")
# for entrypoint in subuser.permissions["entrypoints"].keys():
# subuserlib.print.printWithoutCrashing(entrypoint)
# else:
# subuserlib.print.printWithoutCrashing("Entrypoints marked to be exposed, but nothing to expose.")
# if additionsAndChanges == {} and removedPermissions == []:
# return
# options = OrderedDict([("A","Accept and apply changes")
# ,("E","Apply changes and edit result")
# ,("e","Ignore request and edit permissions by hand")
# ,("r","Reject permissions.")])
# if createNewPermissions:
# del options["e"]
# for option,description in options.items():
# subuserlib.print.printWithoutCrashing(option+" - "+description)
# if self.alwaysAccept:
# subuserlib.print.printWithoutCrashing("A")
# choice = "A"
# else:
# choice = None
# while not choice in options:
# try:
# choice = input("Please select an option:")
# except EOFError:
# choice = "r"
# if (choice == "A") or (choice == "E"):
# if createNewPermissions:
# subuser.createPermissions(newDefaults)
# else:
# subuser.permissions.applyChanges(removedPermissions,additionsAndChanges)
# subuser.permissions.save()
# if (choice == "E") or (choice == "e"):
# subuser.editPermissionsCLI()
# if choice == "r":
# if createNewPermissions:
# subuser.createPermissions(subuserlib.permissions.load(permissionsString="{}",logger=self.user.registry))
# subuser.permissions.save()
. Output only the next line. | self.permissionsAccepter = AcceptPermissionsAtCLI(user) |
Based on the snippet: <|code_start|>
subusers
Updates the specified subusers
EXAMPLE:
$ subuser update subusers iceweasel git
lock-subuser-to SUBUSER GIT-COMMIT
Don't want a subuser to be updated? No problem, lock it to a given version with this update sub-command. Use subuser update log to see a list of possible hashes.
unlock-subuser SUBUSER
Unlock the subuser and ensure that it is up-to-date.
"""
parser=optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
parser.add_option("--accept",dest="accept",action="store_true",default=False,help="Accept permissions without asking.")
parser.add_option("--prompt",dest="prompt",action="store_true",default=False,help="Prompt before installing new images.")
parser.add_option("--use-cache",dest="useCache",action="store_true",default=False,help="Use the layer cache when building images.")
return parser.parse_args(args=realArgs)
#################################################################################################
@subuserlib.profile.do_cprofile
def runCommand(realArgs):
"""
Update your subuser installation.
"""
options,args = parseCliArgs(realArgs)
if len(args) < 1:
sys.exit("No arguments given. Please use subuser update -h for help.")
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import optparse
import subuserlib.commandLineArguments
import subuserlib.update
import subuserlib.profile
from subuserlib.classes.user import LockedUser
and context (classes, functions, sometimes code) from other files:
# Path: subuserlib/classes/user.py
# class LockedUser():
# def __init__(self,name=None,homeDir=None):
# self.lock = None
# self.__user = User(name=name,homeDir=homeDir,_locked=True)
#
# def __enter__(self):
# try:
# self.__user.endUser.makedirs(self.__user.config["lock-dir"])
# except OSError as exception:
# if exception.errno != errno.EEXIST:
# raise
# try:
# self.lock = subuserlib.lock.getLock(self.__user.endUser.get_file(os.path.join(self.__user.config["lock-dir"],"registry.lock"),'w'),timeout=1)
# self.lock.__enter__()
# except IOError as e:
# if e.errno != errno.EINTR:
# raise e
# sys.exit("Another subuser process is currently running and has a lock on the registry. Please try again later.")
# return self.__user
#
# def __exit__(self, type, value, traceback):
# self.lock.__exit__()
. Output only the next line. | lockedUser = LockedUser() |
Using the snippet: <|code_start|>#!/usr/bin/python3
# -*- coding: utf-8 -*-
#external imports
#internal imports
def parseCliArgs(realArgs):
usage = "usage: subuser remove-old-images"
description = """ Remove old, no longer used, installed images. Note, once you do this, you will no longer be able to return to previous configuration states with subuser registry rollback or subuser update lock-subuser-to."""
parser=optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
parser.add_option("--dry-run", dest="dryrun",action="store_true",default=False,help="Don't actually delete the images. Print which images would be deleted.")
parser.add_option("--repo", dest="repoId",default=None,help="Only remove images from the given repository.")
parser.add_option("--image-source", dest="imageSourceName",default=None,help="Remove old images from a specific image source. (Must be used in conjunction with --repo)")
parser.add_option("-y", dest="yes",default=False,action="store_true",help="Don't ask, just delete unneeded images.")
return parser.parse_args(args=realArgs)
@subuserlib.profile.do_cprofile
def runCommand(realArgs):
"""
Remove images that are installed, but are not associated with any subusers.
"""
options,args = parseCliArgs(realArgs)
<|code_end|>
, determine the next line of code. You have imports:
import sys
import optparse
import subuserlib.commandLineArguments
import subuserlib.removeOldImages
import subuserlib.profile
from subuserlib.classes.user import LockedUser
and context (class names, function names, or code) available:
# Path: subuserlib/classes/user.py
# class LockedUser():
# def __init__(self,name=None,homeDir=None):
# self.lock = None
# self.__user = User(name=name,homeDir=homeDir,_locked=True)
#
# def __enter__(self):
# try:
# self.__user.endUser.makedirs(self.__user.config["lock-dir"])
# except OSError as exception:
# if exception.errno != errno.EEXIST:
# raise
# try:
# self.lock = subuserlib.lock.getLock(self.__user.endUser.get_file(os.path.join(self.__user.config["lock-dir"],"registry.lock"),'w'),timeout=1)
# self.lock.__enter__()
# except IOError as e:
# if e.errno != errno.EINTR:
# raise e
# sys.exit("Another subuser process is currently running and has a lock on the registry. Please try again later.")
# return self.__user
#
# def __exit__(self, type, value, traceback):
# self.lock.__exit__()
. Output only the next line. | lockedUser = LockedUser() |
Here is a snippet: <|code_start|>#external imports
#internal imports
#####################################################################################
def parseCliArgs(realArgs):
usage = "usage: subuser registry [options]"
description = """Interact with the subuser registry.
log
Prints a log of recent .
rollback HASH
Subuser's undo function. Roll back to an old version of your subuser configuration. Find the commit hash using subuser update log. Note: This command is less usefull than lock-subuser-to.
livelog
Prints the hash of each new commit to the registry to standard output as the hashes appear. Type q <newline> to exit.
"""
parser=optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
parser.add_option("--json",dest="json",action="store_true",default=False,help="Print output in machine readable json format.")
return parser.parse_args(args=realArgs)
#################################################################################################
@subuserlib.profile.do_cprofile
def runCommand(realArgs):
"""
Interact with the subuser registry.
"""
options,args = parseCliArgs(realArgs)
<|code_end|>
. Write the next line using the current file imports:
import sys
import optparse
import os
import select
import json
import subuserlib.commandLineArguments
import subuserlib.profile
import subuserlib.registry
from subuserlib.classes.user import User, LockedUser
and context from other files:
# Path: subuserlib/classes/user.py
# class User(object):
# """
# This class provides a "base" User object used by subuser. This is the stem of a tree like data structure which holds all of the various objects owned by a given user.
#
# You create a new User object by passing the username and home dir of the user.
#
# >>> import subuserlib.classes.user
# >>> u = subuserlib.classes.user.User(name="root",homeDir="/root/")
# >>> u.homeDir
# '/root/'
# """
# def __init__(self,name=None,homeDir=None,_locked=False):
# self.__config = None
# self.__registry = None
# self.__installedImages = None
# self.__dockerDaemon = None
# self.__runtimeCache = None
# self.__operation = None
# self._has_lock = _locked
# self.name = name
# if homeDir:
# self.homeDir = homeDir
# elif test.testing:
# self.homeDir = os.getcwd()
# else:
# self.homeDir = os.path.expanduser("~")
# self.endUser = EndUser(self)
#
# @property
# def config(self):
# """
# Get the user's :doc:`Config <config>` object.
#
# Note: the user's config will be loaded the first time this is called.
# """
# if self.__config == None:
# self.__config = config.Config(self)
# return self.__config
#
# @property
# def registry(self):
# """
# Get the user's subuser :doc:`Registry <registry>`.
#
# Note: the registry will be loaded the first time this is called.
# """
# if self.__registry == None:
# self.__registry = registry.Registry(self)
# self.__registry.ensureGitRepoInitialized()
# return self.__registry
#
# @registry.setter
# def registry(self, registry):
# self.__registry = registry
#
# def reloadRegistry(self):
# """
# Reload registry from disk.
# """
# self.__registry = None
#
# @property
# def installedImages(self):
# """
# Get the user's :doc:`InstalledImages <installed-images>` list.
#
# Note: the installed images list will be loaded the first time this is called.
# """
# if self.__installedImages == None:
# self.__installedImages = installedImages.InstalledImages(self)
# return self.__installedImages
#
# @property
# def dockerDaemon(self):
# """
# Get the :doc:`DockerDaemon <docker>` object. You will use this to communicate with the Docker daemon.
# """
# if self.__dockerDaemon == None:
# self.__dockerDaemon = dockerDaemon.DockerDaemon(self)
# return self.__dockerDaemon
#
# @property
# def operation(self):
# """
# Get the :doc:`Operation <operation>` object. This object contains runtime data relating to the current "operation". This includes image building configuration data as well as UX options.
# """
# if self.__operation == None:
# self.__operation = Operation(self)
# return self.__operation
#
# class LockedUser():
# def __init__(self,name=None,homeDir=None):
# self.lock = None
# self.__user = User(name=name,homeDir=homeDir,_locked=True)
#
# def __enter__(self):
# try:
# self.__user.endUser.makedirs(self.__user.config["lock-dir"])
# except OSError as exception:
# if exception.errno != errno.EEXIST:
# raise
# try:
# self.lock = subuserlib.lock.getLock(self.__user.endUser.get_file(os.path.join(self.__user.config["lock-dir"],"registry.lock"),'w'),timeout=1)
# self.lock.__enter__()
# except IOError as e:
# if e.errno != errno.EINTR:
# raise e
# sys.exit("Another subuser process is currently running and has a lock on the registry. Please try again later.")
# return self.__user
#
# def __exit__(self, type, value, traceback):
# self.lock.__exit__()
, which may include functions, classes, or code. Output only the next line. | user = User() |
Given the following code snippet before the placeholder: <|code_start|>#internal imports
#####################################################################################
def parseCliArgs(realArgs):
usage = "usage: subuser registry [options]"
description = """Interact with the subuser registry.
log
Prints a log of recent .
rollback HASH
Subuser's undo function. Roll back to an old version of your subuser configuration. Find the commit hash using subuser update log. Note: This command is less usefull than lock-subuser-to.
livelog
Prints the hash of each new commit to the registry to standard output as the hashes appear. Type q <newline> to exit.
"""
parser=optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
parser.add_option("--json",dest="json",action="store_true",default=False,help="Print output in machine readable json format.")
return parser.parse_args(args=realArgs)
#################################################################################################
@subuserlib.profile.do_cprofile
def runCommand(realArgs):
"""
Interact with the subuser registry.
"""
options,args = parseCliArgs(realArgs)
user = User()
<|code_end|>
, predict the next line using imports from the current file:
import sys
import optparse
import os
import select
import json
import subuserlib.commandLineArguments
import subuserlib.profile
import subuserlib.registry
from subuserlib.classes.user import User, LockedUser
and context including class names, function names, and sometimes code from other files:
# Path: subuserlib/classes/user.py
# class User(object):
# """
# This class provides a "base" User object used by subuser. This is the stem of a tree like data structure which holds all of the various objects owned by a given user.
#
# You create a new User object by passing the username and home dir of the user.
#
# >>> import subuserlib.classes.user
# >>> u = subuserlib.classes.user.User(name="root",homeDir="/root/")
# >>> u.homeDir
# '/root/'
# """
# def __init__(self,name=None,homeDir=None,_locked=False):
# self.__config = None
# self.__registry = None
# self.__installedImages = None
# self.__dockerDaemon = None
# self.__runtimeCache = None
# self.__operation = None
# self._has_lock = _locked
# self.name = name
# if homeDir:
# self.homeDir = homeDir
# elif test.testing:
# self.homeDir = os.getcwd()
# else:
# self.homeDir = os.path.expanduser("~")
# self.endUser = EndUser(self)
#
# @property
# def config(self):
# """
# Get the user's :doc:`Config <config>` object.
#
# Note: the user's config will be loaded the first time this is called.
# """
# if self.__config == None:
# self.__config = config.Config(self)
# return self.__config
#
# @property
# def registry(self):
# """
# Get the user's subuser :doc:`Registry <registry>`.
#
# Note: the registry will be loaded the first time this is called.
# """
# if self.__registry == None:
# self.__registry = registry.Registry(self)
# self.__registry.ensureGitRepoInitialized()
# return self.__registry
#
# @registry.setter
# def registry(self, registry):
# self.__registry = registry
#
# def reloadRegistry(self):
# """
# Reload registry from disk.
# """
# self.__registry = None
#
# @property
# def installedImages(self):
# """
# Get the user's :doc:`InstalledImages <installed-images>` list.
#
# Note: the installed images list will be loaded the first time this is called.
# """
# if self.__installedImages == None:
# self.__installedImages = installedImages.InstalledImages(self)
# return self.__installedImages
#
# @property
# def dockerDaemon(self):
# """
# Get the :doc:`DockerDaemon <docker>` object. You will use this to communicate with the Docker daemon.
# """
# if self.__dockerDaemon == None:
# self.__dockerDaemon = dockerDaemon.DockerDaemon(self)
# return self.__dockerDaemon
#
# @property
# def operation(self):
# """
# Get the :doc:`Operation <operation>` object. This object contains runtime data relating to the current "operation". This includes image building configuration data as well as UX options.
# """
# if self.__operation == None:
# self.__operation = Operation(self)
# return self.__operation
#
# class LockedUser():
# def __init__(self,name=None,homeDir=None):
# self.lock = None
# self.__user = User(name=name,homeDir=homeDir,_locked=True)
#
# def __enter__(self):
# try:
# self.__user.endUser.makedirs(self.__user.config["lock-dir"])
# except OSError as exception:
# if exception.errno != errno.EEXIST:
# raise
# try:
# self.lock = subuserlib.lock.getLock(self.__user.endUser.get_file(os.path.join(self.__user.config["lock-dir"],"registry.lock"),'w'),timeout=1)
# self.lock.__enter__()
# except IOError as e:
# if e.errno != errno.EINTR:
# raise e
# sys.exit("Another subuser process is currently running and has a lock on the registry. Please try again later.")
# return self.__user
#
# def __exit__(self, type, value, traceback):
# self.lock.__exit__()
. Output only the next line. | lockedUser = LockedUser() |
Predict the next line for this snippet: <|code_start|> ret = func(*args, **kwargs)
elapsedTime = time.time() - startTime
print('function [{}] finished in {} ms'.format(
func.__name__, int(elapsedTime * 1000)))
return ret
return newfunc
class EndUser(UserOwnedObject,object):
def __init__(self,user,name=None):
UserOwnedObject.__init__(self,user)
self.proxiedByOtherUser = False
self.sudo = False
self.name = name
try:
self.name = self.user.config["user"]
self.proxiedByOtherUser = True
except KeyError:
try:
self.name = os.environ["SUDO_USER"]
self.sudo = True
self.proxiedByOtherUser = True
except KeyError:
try:
self.name = getpass.getuser()
except KeyError:
# We use a broken setup when generating documentation...
self.name = "I have no name!"
self.uid = 1000
self.gid = 1000
<|code_end|>
with the help of current file imports:
import getpass
import os
import sys
import pwd
import subprocess
import functools,time
from subuserlib import test
from subuserlib import paths
from subuserlib.classes.userOwnedObject import UserOwnedObject
and context from other files:
# Path: subuserlib/test.py
# def getUser():
#
# Path: subuserlib/paths.py
# def upNDirsInPath(path,n):
# def getSubuserDir():
# def getSubuserExecutable():
# def getSubuserDataFile(filename):
#
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
# self.user = user
, which may contain function names, class names, or code. Output only the next line. | if not test.testing: |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
"""
The ``EndUser`` object the object that represents the user account owned by the human user of the system. It is possible to run subuser using a different user account, in order to isolate root from the end user's user account.
"""
#external imports
#internal imports
def timeit(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
startTime = time.time()
ret = func(*args, **kwargs)
elapsedTime = time.time() - startTime
print('function [{}] finished in {} ms'.format(
func.__name__, int(elapsedTime * 1000)))
return ret
return newfunc
<|code_end|>
with the help of current file imports:
import getpass
import os
import sys
import pwd
import subprocess
import functools,time
from subuserlib import test
from subuserlib import paths
from subuserlib.classes.userOwnedObject import UserOwnedObject
and context from other files:
# Path: subuserlib/test.py
# def getUser():
#
# Path: subuserlib/paths.py
# def upNDirsInPath(path,n):
# def getSubuserDir():
# def getSubuserExecutable():
# def getSubuserDataFile(filename):
#
# Path: subuserlib/classes/userOwnedObject.py
# class UserOwnedObject(object):
# def __init__(self,user):
# self.user = user
, which may contain function names, class names, or code. Output only the next line. | class EndUser(UserOwnedObject,object): |
Using the snippet: <|code_start|> feature = self._build_obj(feature)
self.data_store[key] = feature
self.idx.insert(key, feature['geometry'].bounds)
class CachedLookup(SpatialLookup):
""" Cache results of spatial lookups """
geohash_cache = {}
def __init__(self, precision=7, *args, **kwargs):
super(CachedLookup, self).__init__(*args, **kwargs)
self.precision = precision
self.hit = 0
self.miss = 0
def get(self, point, buffer_size=0, multiple=False):
""" lookup state and county based on geohash of coordinates from tweet """
lon, lat = point
geohash = Geohash.encode(lat, lon, precision=self.precision)
key = (geohash, buffer_size, multiple)
if key in self.geohash_cache:
# cache hit on geohash
self.hit += 1
#print self.hit, self.miss
return self.geohash_cache[key]
self.miss += 1
# cache miss on geohash
# project point to ESRI:102005
lat, lon = Geohash.decode(geohash)
<|code_end|>
, determine the next line of code. You have imports:
import sys
import os
import hashlib
import json
import Geohash
import shapely
from shapely.geometry import shape
from shapely.geometry.point import Point
from rtree import index
from reader import FileReader
from .proj import project, rproject
and context (class names, function names, or code) available:
# Path: geotweet/mapreduce/utils/proj.py
# def project(lonlat):
# return transform(proj4326, proj102005, *lonlat)
#
# def rproject(lonlat):
# return transform(proj102005, proj4326, *lonlat)
. Output only the next line. | proj_point = project([float(lon), float(lat)]) |
Continue the code snippet: <|code_start|>
root = os.path.dirname(inspect.getfile(geotweet))
TEST_STREAM = os.path.join(root, 'data/twitter-api-stream-raw.log')
class GeoFilterStepTests(unittest.TestCase):
def setUp(self):
<|code_end|>
. Use current file imports:
import unittest
import json
import os
import sys
import inspect
import geotweet
from os.path import dirname
from geotweet.twitter.stream_steps import GeoFilterStep, ExtractStep
and context (classes, functions, or code) from other files:
# Path: geotweet/twitter/stream_steps.py
# class GeoFilterStep(ProcessStep):
# """
# Process output from Twitter Streaming API
#
# For each record output from the API will be called as argument to process.
# That function will validate and convert tweet to desired format.
#
# """
# def _validate(self, key, record):
# if key in record and record[key]:
# return True
# return False
#
# def validate_geotweet(self, record):
# """ check that stream record is actual tweet with coordinates """
# if record and self._validate('user', record) \
# and self._validate('coordinates', record):
# return True
# return False
#
# def process(self, tweet):
# """ Passes on tweet if missing 'geo' or 'user' property """
# if self.validate_geotweet(tweet):
# return self.next(tweet)
# return None
#
# class ExtractStep(ProcessStep):
# """ Extract interesting fields from Tweet """
#
# def process(self, tweet):
# if not tweet:
# return None
# user = tweet['user']
# data = dict(
# user_id=user['id'],
# name=user['name'],
# screen_name=user['screen_name'],
# description=user['description'],
# location=user['location'],
# friends_count=user['friends_count'],
# followers_count=user['followers_count'],
# text=tweet['text'],
# tweet_id=tweet['id_str'],
# source=tweet['source'],
# created_at=tweet['created_at'],
# timestamp=tweet['timestamp_ms'],
# lonlat=tweet['coordinates']['coordinates']
# )
# return self.next(data)
. Output only the next line. | self.step = GeoFilterStep() |
Predict the next line for this snippet: <|code_start|>
class GeoFilterStepTests(unittest.TestCase):
def setUp(self):
self.step = GeoFilterStep()
def test_invalid_empty(self):
data = dict()
error = "Expected validate_geotweet to return False"
self.assertFalse(self.step.validate_geotweet(data), error)
def test_invalid_none(self):
data = dict(coordinates=None, user=None)
error = "Expected validate_geotweet to return False"
self.assertFalse(self.step.validate_geotweet(data), error)
def test_invalid_geo(self):
data = dict(coordinates=None, user={'username':'jeff'})
error = "Expected validate_geotweet to return False"
self.assertFalse(self.step.validate_geotweet(data), error)
def test_valid(self):
data = dict(coordinates=[-122.5, 45.5], user={'username':'jeff'})
error = "Expected validate_geotweet to return True"
self.assertTrue(self.step.validate_geotweet(data), error)
class ExtractStepTests(unittest.TestCase):
def setUp(self):
<|code_end|>
with the help of current file imports:
import unittest
import json
import os
import sys
import inspect
import geotweet
from os.path import dirname
from geotweet.twitter.stream_steps import GeoFilterStep, ExtractStep
and context from other files:
# Path: geotweet/twitter/stream_steps.py
# class GeoFilterStep(ProcessStep):
# """
# Process output from Twitter Streaming API
#
# For each record output from the API will be called as argument to process.
# That function will validate and convert tweet to desired format.
#
# """
# def _validate(self, key, record):
# if key in record and record[key]:
# return True
# return False
#
# def validate_geotweet(self, record):
# """ check that stream record is actual tweet with coordinates """
# if record and self._validate('user', record) \
# and self._validate('coordinates', record):
# return True
# return False
#
# def process(self, tweet):
# """ Passes on tweet if missing 'geo' or 'user' property """
# if self.validate_geotweet(tweet):
# return self.next(tweet)
# return None
#
# class ExtractStep(ProcessStep):
# """ Extract interesting fields from Tweet """
#
# def process(self, tweet):
# if not tweet:
# return None
# user = tweet['user']
# data = dict(
# user_id=user['id'],
# name=user['name'],
# screen_name=user['screen_name'],
# description=user['description'],
# location=user['location'],
# friends_count=user['friends_count'],
# followers_count=user['followers_count'],
# text=tweet['text'],
# tweet_id=tweet['id_str'],
# source=tweet['source'],
# created_at=tweet['created_at'],
# timestamp=tweet['timestamp_ms'],
# lonlat=tweet['coordinates']['coordinates']
# )
# return self.next(data)
, which may contain function names, class names, or code. Output only the next line. | self.step = ExtractStep() |
Given snippet: <|code_start|>
root = dirname(dirname(dirname(dirname(os.path.abspath(__file__)))))
sys.path.append(root)
GEOTWEET_DIR = root
DATA_DIR = os.path.join(root, 'data', 'geo')
COUNTIES_GEOJSON_LOCAL = os.path.join(DATA_DIR,'us_counties102005.geojson')
os.environ['COUNTIES_GEOJSON_LOCAL'] = COUNTIES_GEOJSON_LOCAL
# COUNTIES_GEOJSON_LOCAL environment variable must be set before import
def build_input(text, desc="My Account", lonlat=[-122.5, 45.4]):
return dict(
description=desc,
text=text,
lonlat=lonlat
)
class MapperTweetTests(unittest.TestCase):
def setUp(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import os
import sys
import json
import Geohash
from os.path import dirname
from mrjob.job import MRJob
from geotweet.mapreduce.state_county_wordcount import StateCountyWordCountJob
from geotweet.mapreduce.state_county_wordcount import GEOHASH_PRECISION
and context:
# Path: geotweet/mapreduce/state_county_wordcount.py
# class StateCountyWordCountJob(MRJob):
# """
# Count word occurences for US tweets by entire county, by State and County
#
# A geojson file of US counties is downloaded from an S3 bucket. A RTree index
# is built using the bounding box of each county, and is used for determining
# State and County for each tweet.
#
# """
#
# INPUT_PROTOCOL = JSONValueProtocol
# INTERNAL_PROTOCOL = JSONProtocol
# OUTPUT_PROTOCOL = RawValueProtocol
#
# def steps(self):
# return [
# MRStep(
# mapper_init=self.mapper_init,
# mapper=self.mapper,
# combiner=self.combiner,
# reducer=self.reducer
# )
# ]
#
# def mapper_init(self):
# """ Download counties geojson from S3 and build spatial index and cache """
# self.counties = CachedCountyLookup(precision=GEOHASH_PRECISION)
# self.extractor = WordExtractor()
#
# def mapper(self, _, data):
# # ignore HR geo-tweets for job postings
# if data['description'] and self.hr_filter(data['description']):
# return
# lonlat = data['lonlat']
# # spatial lookup for state and county
# state, county = self.counties.get(lonlat)
# if not state or not county:
# return
# # count words
# for word in self.extractor.run(data['text']):
# yield (word, ), 1
# yield (word, state), 1
# yield (word, state, county), 1
#
# def hr_filter(self, text):
# """ check if description of twitter using contains job related key words """
# expr = "|".join(["(job)", "(hiring)", "(career)"])
# return re.findall(expr, text)
#
# def combiner(self, key, values):
# yield key, sum(values)
#
# def reducer(self, key, values):
# total = int(sum(values))
# if total < MIN_WORD_COUNT:
# return
# word = state = county = None
# word = key[0]
# if len(key) >= 2:
# state = key[1]
# if len(key) >= 3:
# county = key[2]
# output = "{0}\t{1}\t{2}\t{3}"
# word = word.encode('utf-8')
# state = state.encode('utf-8') if state else None
# county = county.encode('utf-8') if county else None
# yield None, output.format(word, state, county, total)
#
# Path: geotweet/mapreduce/state_county_wordcount.py
# GEOHASH_PRECISION = 7
which might include code, classes, or functions. Output only the next line. | self.mr = StateCountyWordCountJob() |
Given snippet: <|code_start|>
SCRIPTDIR = dirname(os.path.abspath(__file__))
DEFAULT_STATES = os.path.join(SCRIPTDIR, 'data/states.txt')
US_GEOFABRIK = 'http://download.geofabrik.de/north-america/us/{0}-latest.osm.pbf'
POI_TAGS = ["amenity", "builing", "shop", "office", "tourism"]
class OSMRunner(object):
"""
Downloads OSM extracts from GeoFabrik in pbf format
"""
def __init__(self, args):
self.states = args.states
if not args.states:
self.states = DEFAULT_STATES
self.output = args.output
self.overwrite = False
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import sys
import json
import logging
import requests
from os.path import dirname
from imposm.parser import OSMParser
from .twitter.load import S3Loader
and context:
# Path: geotweet/twitter/load.py
# class S3Loader(object):
#
# envvars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_DEFAULT_REGION']
#
# def __init__(self, bucket=None, region=None):
# self.bucket = bucket
# if region:
# os.environ['AWS_DEFAULT_REGION'] = region
#
# def valid(self):
# if not self.bucket:
# error = "Error: AWS Bucket not set"
# raise ValueError(error)
# for envvar in self.envvars:
# if not os.getenv(envvar):
# error = "Error: Environment variable {0} not set".format(envvar)
# raise EnvironmentError(error)
#
# def store(self, filepath):
# if not self.bucket:
# return False
# filename = filepath.rsplit('/', 1)[-1]
# s3 = boto3.resource('s3')
# s3.Object(self.bucket, filename).put(Body=open(filepath, 'rb'))
which might include code, classes, or functions. Output only the next line. | self.s3 = S3Loader(bucket=args.bucket, region=args.region) |
Given the code snippet: <|code_start|> # bb = r2.get(elf, "pdbj")
# r2.gets(elf, "pdb")
# for i in bb:
# if i["type"] == "cjmp":
# jump = i["jump"]
# offset = i["offset"]
# self.branch_ins = i
# self.valid = True
# if jump not in [ii["offset"] for ii in self.bbs]:
# # if does not jump back into loop
# self.finish_ins_addr = jump
# else:
# # where it goes if it fails is the next instruction
# self.finish_ins_addr = offset + i["size"]
# break
# elif i["type"] == "jmp":
# break
def __repr__(self):
if not self.valid:
return "<invalid longwrite @ 0x%x>" % self.start_ins_addr
else:
return "<longwrite [start=0x%x,write=0x%x,done=0x%x]>" % (self.start_ins_addr, self.write_ins_addr, self.finish_ins_addr)
# def __repr__(self):
# return str(self)
def longwrite_info(elf, addr, thumb):
<|code_end|>
, generate the next line using the imports in this file:
import fiddle.staticanalysis
import fiddle.r2_keeper as r2
import sys
from fiddle.staticanalysis import LongWriteInfo
and context (functions, classes, or occasionally code) from other files:
# Path: fiddle/staticanalysis.py
# class LongWriteInfo():
# def __init__(self, elf, start, end, thumb):
# r2.run_aab(elf) # run basic block analysis
# self.valid = True
# self.start_ins = None
# self.start_ins_addr = None
# self.write_ins = None
# self.write_ins_addr = None
# self.finish_ins = None
# self.finish_ins_addr = None
# self.start = start
# self.end = end
# self.elf = elf
# self.thumb = thumb
# self.valid = False
# self.branch_ins_addr = None
# self.branch_ins = None
# # grab basic blocks surrounding this region
# r2.gets(elf, "s 0x%x" % self.start)
# if self.thumb: # force r2 to use the correct instruction size. sigh.
# r2.gets(elf, "ahb 16")
# r2.gets(elf, "e asm.bits=16")
# else:
# r2.gets(elf, "ahb 32")
# r2.gets(elf, "e asm.bits=32")
# self.bbs = r2.get(elf, "pdbj")
# next = self.bbs[-1]["offset"] + self.bbs[-1]["size"]
# while next < end:
# r2.get(elf, "s 0x%x" % next)
# self.bbs.extend(r2.get(elf, "pdbj"))
# next = self.bbs[-1]["offset"] + self.bbs[-1]["size"]
# # grab one more basic block
# r2.get(elf, "s 0x%x" % next)
# self.bbs.extend(r2.get(elf, "pdbj"))
#
# def calculate_info(self):
# # lookup write instruction
# nwrites = 0
# elf = self.elf
# for i in self.bbs:
# mne = i["opcode"].split()[0]
# if InstructionAnalyzer._is_mne_memstore(mne):
# nwrites += 1
# if (self.start <= i["offset"]) and (i["offset"] <= self.end):
# if self.write_ins is not None:
# # if there are two write instruction in basic block, don't know what to do
# self.valid = False
# break
# else:
# self.write_ins = i
# self.valid = True
# self.write_ins_addr = self.write_ins["offset"]
# if nwrites > 1:
# print "Warning: %s write ins in these blocks" % nwrites
#
# if not self.valid:
# return
#
# # look for branch after write to find loop
# branch = None
# unconditional = False
# for b in self.bbs:
# if b["offset"] < self.write_ins_addr:
# continue
# if b["type"] == u"cjmp" or b["type"] == u"jmp":
# if b["type"] == "jmp":
# dst = b["jump"]
# r2.gets(elf, "s 0x%x" % dst)
# for next in r2.get(elf, "pdbj"):
# if next["type"] == u"cjmp" or next["type"] == "jmp":
# if next["type"] == "cjmp":
# jump = next["jump"]
# if jump not in [ii["offset"] for ii in self.bbs]:
# self.finish_ins_addr = jump
# else:
# self.finish_ins_addr = next["offset"] + next["size"]
# else:
# # don't handle this case yet
# self.valid = False
# break
# break
# #branch = r2.get(elf, "pdj 1")[0]
# #self.finish_ins_addr = branch["offset"] + branch["size"]
# else:
# branch = b
# jump = branch["jump"]
# if jump not in [ii["offset"] for ii in self.bbs]:
# self.finish_ins_addr = jump
# else:
# self.finish_ins_addr = branch["offset"] + branch["size"]
#
# r2.gets(elf, "s 0x%x" % self.finish_ins_addr)
# self.finish_ins = r2.get(elf, "pdj 1")[0]
# self.start_ins_addr = self.write_ins_addr
# self.start_ins = self.write_ins
#
# def __repr__(self):
# if not self.valid:
# return "<invalid longwrite @ 0x%x>" % self.start_ins_addr
# else:
# return "<longwrite [start=0x%x,write=0x%x,done=0x%x]>" % (self.start_ins_addr, self.write_ins_addr, self.finish_ins_addr)
. Output only the next line. | l = LongWriteInfo(elf, start, end, thumb) |
Given the following code snippet before the placeholder: <|code_start|> outf = open(path, "w")
outf.write("(setq substages '(%s))\n" % " ".join([str(i) for i in substage_linenos]))
outf.close()
@classmethod
def get_function_lineno(cls, fn, path, last=False):
if last:
out = run_cmd.Cmd().run_cmd("wc -l %s | awk '{print ($1);}'" % (path))
return long(out)
out = run_cmd.Cmd().run_cmd("egrep -no ' > %s( |$)' %s" % (fn, path))
if len(out) == 0:
return None
else:
return long(out.split(":")[0])
def get_raw_files(self, noprepare):
stage = self.stage
substages = self._substage_numbers()
name = self._substage_names()
substageresultsdir = getattr(Main.raw.postprocess.consolidate_writes.files.fn_lists, stage.stagename)
tracename = self.process_trace
#calltrace_path = getattr(Main.raw.runtime.trace.calltrace.files.org, stage.stagename)
calltrace_path = getattr(Main.raw.TraceMethod.calltrace.Files.org, stage.stagename)
if calltrace_path and os.path.exists(calltrace_path) and substageresultsdir:
pp = Main.raw.postprocess.consolidate_writes.files
if not noprepare:
el_path = getattr(pp.el_file, stage.stagename)
if os.path.exists(substageresultsdir):
return {}
try:
<|code_end|>
, predict the next line using imports from the current file:
import os
import csv
import sys
import re
import glob
import numpy
import tables
import pytable_utils
import run_cmd
import StringIO
import pickle
import hashlib
import subprocess
import substages_parser
import db_info
import testsuite_utils as utils
import addr_space
import logging
from memory_tree import intervaltree
from collections import Iterable
from config import Main
from fiddle_extra import pymacs_request
and context including class names, function names, and sometimes code from other files:
# Path: fiddle_extra/pymacs_request.py
# class Emacs():
# def __init__(self):
# def cleanup(self):
# def receive(self):
# def send(self, text):
# def ask_emacs(text, printer="prin1", execbuffer=""):
# def ask_emacs_in_buffer(cmd, bufname):
. Output only the next line. | pymacs_request.ask_emacs('(create-substage-calltraces "%s" "%s" "%s")' % |
Next line prediction: <|code_start|> # Load pretrained model
if pretrained_model is not None and pretrained_model.find("model") > -1:
logger.info("load pretrained model : "
+ os.path.join(db_model.trained_model_path, pretrained_model))
serializers.load_hdf5(os.path.join(db_model.trained_model_path, pretrained_model), model)
_backup_pretrained_model(db_model, pretrained_model)
_delete_old_models(db_model, pretrained_model)
# delete layer visualization cache
for f in os.listdir(db_model.trained_model_path):
if os.path.isdir(os.path.join(db_model.trained_model_path, f)):
try:
shutil.rmtree(os.path.join(db_model.trained_model_path, f))
except Exception as e:
logger.exception('Could not remove visualization cache. {0}'.format(e))
raise e
if db_model.gpu >= 0:
cuda.get_device(db_model.gpu).use()
model.to_gpu()
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
data_q = queue.Queue(maxsize=1)
res_q = queue.Queue()
db_model.is_trained = 1
db_model.update_and_commit()
<|code_end|>
. Use current file imports:
(import datetime
import json
import random
import multiprocessing
import threading
import time
import imp
import re
import os
import shutil
import math
import numpy as np
import six
import cPickle as pickle
import chainer
import tensorflow as tf
from logging import getLogger
from PIL import Image
from six.moves import queue
from chainer import computational_graph
from chainer import cuda
from chainer import optimizers
from chainer import serializers
from .utils import remove_resume_file)
and context including class names, function names, or small code snippets from other files:
# Path: src/deeplearning/train/utils.py
# def remove_resume_file(base_path):
# try:
# shutil.rmtree(os.path.join(base_path, 'resume'))
# except OSError:
# pass
. Output only the next line. | remove_resume_file(db_model.trained_model_path) |
Based on the snippet: <|code_start|>
datadir = os.path.join(os.path.dirname(dividebatur.__file__),
os.pardir, "dividebatur-aec")
class CandidateListTests(unittest.TestCase):
def test_fed2013(self):
all_candidates_csv = os.path.join(
datadir, "fed2013", "common",
"2013federalelection-all-candidates-nat-22-08.csv")
senate_candidates_csv = os.path.join(
datadir, "fed2013", "common",
"SenateCandidatesDownload-17496.csv")
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import unittest
import dividebatur
from dividebatur.aecdata import CandidateList
and context (classes, functions, sometimes code) from other files:
# Path: dividebatur/aecdata/candidatelist.py
# class CandidateList:
# """CandidateList provides a list of senate candidates and groups.
#
# The following members are publicly accessible:
#
# - candidates: a list of all candidates, in the order they appear
# on the ballot.
# - candidate_by_id: a dictionary mapping candidate IDs to the candidate.
# - groups: a list of groups, in the order they appear on the
# ballot. Does not include the ungrouped candidates.
# - group_by_id: a dictionary mapping group IDs (e.g. A, B, ...)
# to the group.
# """
#
# def __init__(self, state, all_candidates_csv, senate_candidates_csv):
# self.state = state
# self._load(all_candidates_csv, senate_candidates_csv)
#
# def _load(self, all_candidates_csv, senate_candidates_csv):
# self.candidates = []
# self.candidate_by_id = {}
# self.groups = []
# self.group_by_id = {}
#
# candidate_by_name_party, party_ab = self._load_senate_candidates(
# senate_candidates_csv)
# current_group = None
# current_party = None
# current_candidates = []
# for c in self._load_all_candidates(all_candidates_csv):
# if c.ticket != current_group:
# if current_group is not None and current_group != "UG":
# group = Group(current_group,
# current_party,
# party_ab[current_party],
# tuple(current_candidates))
# self.groups.append(group)
# self.group_by_id[group.group_id] = group
# current_group = c.ticket
# current_party = c.party_ballot_nm
# current_candidates = []
#
# candidate_id = candidate_by_name_party[
# c.surname, c.ballot_given_nm, c.party_ballot_nm]
# candidate = Candidate(candidate_id,
# c.surname,
# c.ballot_given_nm,
# len(self.candidates),
# current_group,
# c.ballot_position,
# c.party_ballot_nm,
# party_ab[c.party_ballot_nm])
# self.candidates.append(candidate)
# self.candidate_by_id[candidate.candidate_id] = candidate
# current_candidates.append(candidate)
#
# # Add the final group, assuming it isn't the ungrouped candidates
# if current_group is not None and current_group != "UG":
# group = Group(current_group,
# current_party,
# party_ab[current_party],
# tuple(current_candidates))
# self.groups.append(group)
# self.group_by_id[group.group_id] = group
#
# def _load_all_candidates(self, all_candidates_csv):
# candidates = []
# with open(all_candidates_csv, 'rt') as fd:
# reader = csv.reader(fd)
# header = next(reader)
# for candidate in sorted(named_tuple_iter('AllCandidate', reader, header, ballot_position=int), key=lambda row: (ticket_sort_key(row.ticket), row.ballot_position)):
# if candidate.state_ab != self.state:
# continue
# if candidate.nom_ty != 'S':
# continue
# candidates.append(candidate)
# return candidates
#
# def _load_senate_candidates(self, senate_candidates_csv):
# by_name_party = {}
# party_ab = {}
# seen_ids = set()
# with open(senate_candidates_csv, 'rt') as fd:
# reader = csv.reader(fd)
# next(reader) # skip the version
# header = next(reader)
# for candidate in named_tuple_iter(
# 'Candidate', reader, header, CandidateID=int):
# if candidate.StateAb != self.state:
# continue
# k = (candidate.Surname, candidate.GivenNm, candidate.PartyNm)
# assert candidate.CandidateID not in seen_ids
# assert k not in by_name_party
# by_name_party[k] = candidate.CandidateID
# seen_ids.add(candidate.CandidateID)
# party_ab[candidate.PartyNm] = candidate.PartyAb
# return by_name_party, party_ab
#
# def get_candidate_id(self, surname, given_name):
# for candidate in self.candidates:
# if candidate.surname == surname and candidate.given_name == given_name:
# return candidate.candidate_id
# raise KeyError((surname, given_name))
. Output only the next line. | cl = CandidateList("NSW", all_candidates_csv, senate_candidates_csv) |
Using the snippet: <|code_start|> current_candidates = []
candidate_id = candidate_by_name_party[
c.surname, c.ballot_given_nm, c.party_ballot_nm]
candidate = Candidate(candidate_id,
c.surname,
c.ballot_given_nm,
len(self.candidates),
current_group,
c.ballot_position,
c.party_ballot_nm,
party_ab[c.party_ballot_nm])
self.candidates.append(candidate)
self.candidate_by_id[candidate.candidate_id] = candidate
current_candidates.append(candidate)
# Add the final group, assuming it isn't the ungrouped candidates
if current_group is not None and current_group != "UG":
group = Group(current_group,
current_party,
party_ab[current_party],
tuple(current_candidates))
self.groups.append(group)
self.group_by_id[group.group_id] = group
def _load_all_candidates(self, all_candidates_csv):
candidates = []
with open(all_candidates_csv, 'rt') as fd:
reader = csv.reader(fd)
header = next(reader)
<|code_end|>
, determine the next line of code. You have imports:
import csv
import collections
from .utils import ticket_sort_key, named_tuple_iter
and context (class names, function names, or code) available:
# Path: dividebatur/aecdata/utils.py
# def ticket_sort_key(ticket):
# "sort key for an ATL ticket, eg. A..Z, AA..ZZ"
# return (len(ticket), ticket)
#
# def named_tuple_iter(name, reader, header, **kwargs):
# field_names = [t for t in [t.strip().replace('-', '_')
# for t in header] if t]
# typ = namedtuple(name, field_names)
# mappings = []
# for field_name in kwargs:
# idx = field_names.index(field_name)
# mappings.append((idx, kwargs[field_name]))
# for row in reader:
# for idx, map_fn in mappings:
# row[idx] = map_fn(row[idx])
# yield typ(*row)
. Output only the next line. | for candidate in sorted(named_tuple_iter('AllCandidate', reader, header, ballot_position=int), key=lambda row: (ticket_sort_key(row.ticket), row.ballot_position)): |
Predict the next line after this snippet: <|code_start|> current_candidates = []
candidate_id = candidate_by_name_party[
c.surname, c.ballot_given_nm, c.party_ballot_nm]
candidate = Candidate(candidate_id,
c.surname,
c.ballot_given_nm,
len(self.candidates),
current_group,
c.ballot_position,
c.party_ballot_nm,
party_ab[c.party_ballot_nm])
self.candidates.append(candidate)
self.candidate_by_id[candidate.candidate_id] = candidate
current_candidates.append(candidate)
# Add the final group, assuming it isn't the ungrouped candidates
if current_group is not None and current_group != "UG":
group = Group(current_group,
current_party,
party_ab[current_party],
tuple(current_candidates))
self.groups.append(group)
self.group_by_id[group.group_id] = group
def _load_all_candidates(self, all_candidates_csv):
candidates = []
with open(all_candidates_csv, 'rt') as fd:
reader = csv.reader(fd)
header = next(reader)
<|code_end|>
using the current file's imports:
import csv
import collections
from .utils import ticket_sort_key, named_tuple_iter
and any relevant context from other files:
# Path: dividebatur/aecdata/utils.py
# def ticket_sort_key(ticket):
# "sort key for an ATL ticket, eg. A..Z, AA..ZZ"
# return (len(ticket), ticket)
#
# def named_tuple_iter(name, reader, header, **kwargs):
# field_names = [t for t in [t.strip().replace('-', '_')
# for t in header] if t]
# typ = namedtuple(name, field_names)
# mappings = []
# for field_name in kwargs:
# idx = field_names.index(field_name)
# mappings.append((idx, kwargs[field_name]))
# for row in reader:
# for idx, map_fn in mappings:
# row[idx] = map_fn(row[idx])
# yield typ(*row)
. Output only the next line. | for candidate in sorted(named_tuple_iter('AllCandidate', reader, header, ballot_position=int), key=lambda row: (ticket_sort_key(row.ticket), row.ballot_position)): |
Here is a snippet: <|code_start|>
# put some paranoia around exclusion: we want to make sure that
# `candidates` is unique, and that none of these candidates have
# been previously excluded
for candidate_id in candidates:
assert(candidate_id not in self.candidates_excluded)
assert(len(set(candidates)) == len(candidates))
# determine the paper transfers to be run, and the candidates
# holding papers which are distributed in each transfer
transfers_applicable = defaultdict(set)
for candidate_id in candidates:
self.candidates_excluded[candidate_id] = True
for bundle_transaction in self.candidate_bundle_transactions.get(candidate_id):
value = bundle_transaction.transfer_value
transfers_applicable[value].add(candidate_id)
transfer_values = list(reversed(sorted(transfers_applicable)))
self.results.candidates_excluded(
CandidatesExcluded(
candidates=candidates,
transfer_values=transfer_values,
reason=reason))
for transfer_value in transfer_values:
self.exclusion_distributions_pending.append((list(transfers_applicable[transfer_value]), transfer_value))
def process_election(self, distribution, last_candidate_aggregates):
distributed_candidate_id, transfer_value, excess_votes = distribution
self.results.election_distribution_performed(
<|code_end|>
. Write the next line using the current file imports:
from collections import defaultdict, namedtuple
from .results import ElectionDistributionPerformed, \
ExclusionDistributionPerformed, ActProvision, \
CandidateElected, CandidatesExcluded, \
ExclusionReason
import itertools
import fractions
and context from other files:
# Path: dividebatur/results.py
# class ElectionDistributionPerformed:
# """
# Information on any election distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidate_id, transfer_value):
# """
# transfer_value is a Fraction instance
# """
# self.candidate_id = candidate_id
# self.transfer_value = transfer_value
#
# class ExclusionDistributionPerformed:
# """
# Information on any exclusion distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidates, transfer_value):
# """
# candidates: a List of candidate_ids
# transfer_value: transfer value, as a Fraction
# """
# self.candidates = candidates
# self.transfer_value = transfer_value
#
# class ActProvision:
# """
# Note that a provision of the Act has been used.
# """
#
# def __init__(self, text):
# """
# text: textual description of the provision used.
# """
#
# self.text = text
#
# class CandidateElected:
# """
# Information on the election of a candidate.
# """
#
# def __init__(self, candidate_id, order, excess_votes, paper_count, transfer_value):
# """
# candidate_id: the candidate elected
# order: the number of the spot the candidate was elected to [1..N_vacancies]
# excess_votes: the number of excess votes the candidate received
# paper_count: the number of papers the candidate held at time of elect
# transfer_value: the transfer value for the excess papers (0 if no excess papers)
# """
#
# self.candidate_id = candidate_id
# self.order = order
# self.excess_votes = excess_votes
# self.paper_count = paper_count
# self.transfer_value = transfer_value
#
# class CandidatesExcluded:
# """
# Information on the exclusion of one or more candidates.
# """
#
# def __init__(self, candidates, transfer_values, reason):
# """
# candidates: list of candidate_ids of those candidates elected
# transfer_values: the transfer values of papers to be distributed (a list of Fraction instances)
# reason: an instance of ExclusionReason
# """
# self.candidates = candidates
# self.transfer_values = transfer_values
# self.reason = reason
#
# class ExclusionReason:
# """
# information of interest about an exclusion of one or more
# candidates.
#
# simply to make this information available to users of this
# counter, this data is not used to make any decisions which
# affect count results.
# """
# def __init__(self, reason, info):
# """
# reason: a string identifying the reason
# info: additional information
# """
# self.reason, self.info = reason, info
, which may include functions, classes, or code. Output only the next line. | ElectionDistributionPerformed( |
Next line prediction: <|code_start|> transfer_values = list(reversed(sorted(transfers_applicable)))
self.results.candidates_excluded(
CandidatesExcluded(
candidates=candidates,
transfer_values=transfer_values,
reason=reason))
for transfer_value in transfer_values:
self.exclusion_distributions_pending.append((list(transfers_applicable[transfer_value]), transfer_value))
def process_election(self, distribution, last_candidate_aggregates):
distributed_candidate_id, transfer_value, excess_votes = distribution
self.results.election_distribution_performed(
ElectionDistributionPerformed(
candidate_id=distributed_candidate_id,
transfer_value=transfer_value))
candidate_votes = last_candidate_aggregates.get_candidate_votes()
bundle_transactions_to_distribute = [(distributed_candidate_id, self.candidate_bundle_transactions.get(distributed_candidate_id))]
exhausted_votes, exhausted_papers = self.distribute_bundle_transactions(
candidate_votes,
bundle_transactions_to_distribute,
transfer_value)
candidate_votes[distributed_candidate_id] = self.quota
return distributed_candidate_id, candidate_votes, exhausted_votes, exhausted_papers
def process_exclusion(self, distribution, last_candidate_aggregates):
distributed_candidates, transfer_value = distribution
self.results.exclusion_distribution_performed(
<|code_end|>
. Use current file imports:
(from collections import defaultdict, namedtuple
from .results import ElectionDistributionPerformed, \
ExclusionDistributionPerformed, ActProvision, \
CandidateElected, CandidatesExcluded, \
ExclusionReason
import itertools
import fractions)
and context including class names, function names, or small code snippets from other files:
# Path: dividebatur/results.py
# class ElectionDistributionPerformed:
# """
# Information on any election distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidate_id, transfer_value):
# """
# transfer_value is a Fraction instance
# """
# self.candidate_id = candidate_id
# self.transfer_value = transfer_value
#
# class ExclusionDistributionPerformed:
# """
# Information on any exclusion distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidates, transfer_value):
# """
# candidates: a List of candidate_ids
# transfer_value: transfer value, as a Fraction
# """
# self.candidates = candidates
# self.transfer_value = transfer_value
#
# class ActProvision:
# """
# Note that a provision of the Act has been used.
# """
#
# def __init__(self, text):
# """
# text: textual description of the provision used.
# """
#
# self.text = text
#
# class CandidateElected:
# """
# Information on the election of a candidate.
# """
#
# def __init__(self, candidate_id, order, excess_votes, paper_count, transfer_value):
# """
# candidate_id: the candidate elected
# order: the number of the spot the candidate was elected to [1..N_vacancies]
# excess_votes: the number of excess votes the candidate received
# paper_count: the number of papers the candidate held at time of elect
# transfer_value: the transfer value for the excess papers (0 if no excess papers)
# """
#
# self.candidate_id = candidate_id
# self.order = order
# self.excess_votes = excess_votes
# self.paper_count = paper_count
# self.transfer_value = transfer_value
#
# class CandidatesExcluded:
# """
# Information on the exclusion of one or more candidates.
# """
#
# def __init__(self, candidates, transfer_values, reason):
# """
# candidates: list of candidate_ids of those candidates elected
# transfer_values: the transfer values of papers to be distributed (a list of Fraction instances)
# reason: an instance of ExclusionReason
# """
# self.candidates = candidates
# self.transfer_values = transfer_values
# self.reason = reason
#
# class ExclusionReason:
# """
# information of interest about an exclusion of one or more
# candidates.
#
# simply to make this information available to users of this
# counter, this data is not used to make any decisions which
# affect count results.
# """
# def __init__(self, reason, info):
# """
# reason: a string identifying the reason
# info: additional information
# """
# self.reason, self.info = reason, info
. Output only the next line. | ExclusionDistributionPerformed( |
Here is a snippet: <|code_start|> return sorted_candidate_ids[self.election_tie_cb(candidates)]
def determine_quota(self):
self.total_papers = sum(count for _, count in self.papers_for_count)
self.quota = int(self.total_papers / (self.vacancies + 1)) + 1
def determine_elected_candidates_in_order(self, candidate_votes):
"""
determine all candidates with at least a quota of votes in `candidate_votes'. returns results in
order of decreasing vote count. Any ties are resolved within this method.
"""
eligible_by_vote = defaultdict(list)
for candidate_id, votes in candidate_votes.candidate_votes_iter():
if candidate_id in self.candidates_elected:
continue
if votes < self.quota:
continue
eligible_by_vote[votes].append(candidate_id)
elected = []
for votes in reversed(sorted(eligible_by_vote)):
candidate_ids = eligible_by_vote[votes]
# we sort here to ensure stability, so external callers can hard-coded their response
candidate_ids.sort(key=self.candidate_order_fn)
if len(candidate_ids) == 1:
elected.append(candidate_ids[0])
else:
tie_breaker_round = self.find_tie_breaker(candidate_ids)
if tie_breaker_round is not None:
self.results.provision_used(
<|code_end|>
. Write the next line using the current file imports:
from collections import defaultdict, namedtuple
from .results import ElectionDistributionPerformed, \
ExclusionDistributionPerformed, ActProvision, \
CandidateElected, CandidatesExcluded, \
ExclusionReason
import itertools
import fractions
and context from other files:
# Path: dividebatur/results.py
# class ElectionDistributionPerformed:
# """
# Information on any election distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidate_id, transfer_value):
# """
# transfer_value is a Fraction instance
# """
# self.candidate_id = candidate_id
# self.transfer_value = transfer_value
#
# class ExclusionDistributionPerformed:
# """
# Information on any exclusion distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidates, transfer_value):
# """
# candidates: a List of candidate_ids
# transfer_value: transfer value, as a Fraction
# """
# self.candidates = candidates
# self.transfer_value = transfer_value
#
# class ActProvision:
# """
# Note that a provision of the Act has been used.
# """
#
# def __init__(self, text):
# """
# text: textual description of the provision used.
# """
#
# self.text = text
#
# class CandidateElected:
# """
# Information on the election of a candidate.
# """
#
# def __init__(self, candidate_id, order, excess_votes, paper_count, transfer_value):
# """
# candidate_id: the candidate elected
# order: the number of the spot the candidate was elected to [1..N_vacancies]
# excess_votes: the number of excess votes the candidate received
# paper_count: the number of papers the candidate held at time of elect
# transfer_value: the transfer value for the excess papers (0 if no excess papers)
# """
#
# self.candidate_id = candidate_id
# self.order = order
# self.excess_votes = excess_votes
# self.paper_count = paper_count
# self.transfer_value = transfer_value
#
# class CandidatesExcluded:
# """
# Information on the exclusion of one or more candidates.
# """
#
# def __init__(self, candidates, transfer_values, reason):
# """
# candidates: list of candidate_ids of those candidates elected
# transfer_values: the transfer values of papers to be distributed (a list of Fraction instances)
# reason: an instance of ExclusionReason
# """
# self.candidates = candidates
# self.transfer_values = transfer_values
# self.reason = reason
#
# class ExclusionReason:
# """
# information of interest about an exclusion of one or more
# candidates.
#
# simply to make this information available to users of this
# counter, this data is not used to make any decisions which
# affect count results.
# """
# def __init__(self, reason, info):
# """
# reason: a string identifying the reason
# info: additional information
# """
# self.reason, self.info = reason, info
, which may include functions, classes, or code. Output only the next line. | ActProvision("Multiple candidates elected with %d votes. Tie broken from previous totals." % (votes))) |
Predict the next line for this snippet: <|code_start|> self.candidate_bundle_transactions.transfer_to(candidate_id, bundle_transaction)
candidate_votes[candidate_id] += bundle_transaction.votes
exhausted_votes = int(exhausted_papers * transfer_value)
return exhausted_votes, exhausted_papers
def elect(self, candidate_aggregates, candidate_id):
"""
Elect a candidate, updating internal state to track this.
Calculate the paper count to be transferred on to other candidates,
and if required schedule a distribution fo papers.
"""
# somewhat paranoid cross-check, but we've had this bug before..
assert(candidate_id not in self.candidates_elected)
elected_no = len(self.candidates_elected) + 1
self.candidates_elected[candidate_id] = True
transfer_value = 0
excess_votes = paper_count = None
if len(self.candidates_elected) != self.vacancies:
excess_votes = max(candidate_aggregates.get_vote_count(candidate_id) - self.quota, 0)
assert(excess_votes >= 0)
paper_count = self.candidate_bundle_transactions.get_paper_count(candidate_id)
if paper_count > 0:
transfer_value = fractions.Fraction(excess_votes, paper_count)
assert(transfer_value >= 0)
self.election_distributions_pending.append((candidate_id, transfer_value, excess_votes))
self.results.candidate_elected(
<|code_end|>
with the help of current file imports:
from collections import defaultdict, namedtuple
from .results import ElectionDistributionPerformed, \
ExclusionDistributionPerformed, ActProvision, \
CandidateElected, CandidatesExcluded, \
ExclusionReason
import itertools
import fractions
and context from other files:
# Path: dividebatur/results.py
# class ElectionDistributionPerformed:
# """
# Information on any election distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidate_id, transfer_value):
# """
# transfer_value is a Fraction instance
# """
# self.candidate_id = candidate_id
# self.transfer_value = transfer_value
#
# class ExclusionDistributionPerformed:
# """
# Information on any exclusion distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidates, transfer_value):
# """
# candidates: a List of candidate_ids
# transfer_value: transfer value, as a Fraction
# """
# self.candidates = candidates
# self.transfer_value = transfer_value
#
# class ActProvision:
# """
# Note that a provision of the Act has been used.
# """
#
# def __init__(self, text):
# """
# text: textual description of the provision used.
# """
#
# self.text = text
#
# class CandidateElected:
# """
# Information on the election of a candidate.
# """
#
# def __init__(self, candidate_id, order, excess_votes, paper_count, transfer_value):
# """
# candidate_id: the candidate elected
# order: the number of the spot the candidate was elected to [1..N_vacancies]
# excess_votes: the number of excess votes the candidate received
# paper_count: the number of papers the candidate held at time of elect
# transfer_value: the transfer value for the excess papers (0 if no excess papers)
# """
#
# self.candidate_id = candidate_id
# self.order = order
# self.excess_votes = excess_votes
# self.paper_count = paper_count
# self.transfer_value = transfer_value
#
# class CandidatesExcluded:
# """
# Information on the exclusion of one or more candidates.
# """
#
# def __init__(self, candidates, transfer_values, reason):
# """
# candidates: list of candidate_ids of those candidates elected
# transfer_values: the transfer values of papers to be distributed (a list of Fraction instances)
# reason: an instance of ExclusionReason
# """
# self.candidates = candidates
# self.transfer_values = transfer_values
# self.reason = reason
#
# class ExclusionReason:
# """
# information of interest about an exclusion of one or more
# candidates.
#
# simply to make this information available to users of this
# counter, this data is not used to make any decisions which
# affect count results.
# """
# def __init__(self, reason, info):
# """
# reason: a string identifying the reason
# info: additional information
# """
# self.reason, self.info = reason, info
, which may contain function names, class names, or code. Output only the next line. | CandidateElected( |
Given the following code snippet before the placeholder: <|code_start|> order=elected_no,
excess_votes=excess_votes,
paper_count=paper_count,
transfer_value=transfer_value))
def exclude_candidates(self, candidates, reason):
"""
mark one or more candidates as excluded from the count
candidates: list of candidate_ids to exclude
reason: the reason for the exclusion
"""
# put some paranoia around exclusion: we want to make sure that
# `candidates` is unique, and that none of these candidates have
# been previously excluded
for candidate_id in candidates:
assert(candidate_id not in self.candidates_excluded)
assert(len(set(candidates)) == len(candidates))
# determine the paper transfers to be run, and the candidates
# holding papers which are distributed in each transfer
transfers_applicable = defaultdict(set)
for candidate_id in candidates:
self.candidates_excluded[candidate_id] = True
for bundle_transaction in self.candidate_bundle_transactions.get(candidate_id):
value = bundle_transaction.transfer_value
transfers_applicable[value].add(candidate_id)
transfer_values = list(reversed(sorted(transfers_applicable)))
self.results.candidates_excluded(
<|code_end|>
, predict the next line using imports from the current file:
from collections import defaultdict, namedtuple
from .results import ElectionDistributionPerformed, \
ExclusionDistributionPerformed, ActProvision, \
CandidateElected, CandidatesExcluded, \
ExclusionReason
import itertools
import fractions
and context including class names, function names, and sometimes code from other files:
# Path: dividebatur/results.py
# class ElectionDistributionPerformed:
# """
# Information on any election distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidate_id, transfer_value):
# """
# transfer_value is a Fraction instance
# """
# self.candidate_id = candidate_id
# self.transfer_value = transfer_value
#
# class ExclusionDistributionPerformed:
# """
# Information on any exclusion distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidates, transfer_value):
# """
# candidates: a List of candidate_ids
# transfer_value: transfer value, as a Fraction
# """
# self.candidates = candidates
# self.transfer_value = transfer_value
#
# class ActProvision:
# """
# Note that a provision of the Act has been used.
# """
#
# def __init__(self, text):
# """
# text: textual description of the provision used.
# """
#
# self.text = text
#
# class CandidateElected:
# """
# Information on the election of a candidate.
# """
#
# def __init__(self, candidate_id, order, excess_votes, paper_count, transfer_value):
# """
# candidate_id: the candidate elected
# order: the number of the spot the candidate was elected to [1..N_vacancies]
# excess_votes: the number of excess votes the candidate received
# paper_count: the number of papers the candidate held at time of elect
# transfer_value: the transfer value for the excess papers (0 if no excess papers)
# """
#
# self.candidate_id = candidate_id
# self.order = order
# self.excess_votes = excess_votes
# self.paper_count = paper_count
# self.transfer_value = transfer_value
#
# class CandidatesExcluded:
# """
# Information on the exclusion of one or more candidates.
# """
#
# def __init__(self, candidates, transfer_values, reason):
# """
# candidates: list of candidate_ids of those candidates elected
# transfer_values: the transfer values of papers to be distributed (a list of Fraction instances)
# reason: an instance of ExclusionReason
# """
# self.candidates = candidates
# self.transfer_values = transfer_values
# self.reason = reason
#
# class ExclusionReason:
# """
# information of interest about an exclusion of one or more
# candidates.
#
# simply to make this information available to users of this
# counter, this data is not used to make any decisions which
# affect count results.
# """
# def __init__(self, reason, info):
# """
# reason: a string identifying the reason
# info: additional information
# """
# self.reason, self.info = reason, info
. Output only the next line. | CandidatesExcluded( |
Predict the next line for this snippet: <|code_start|>
candidates_for_exclusion = []
for candidate_id in candidate_ids:
if candidate_aggregates.get_vote_count(candidate_id) == min_votes:
candidates_for_exclusion.append(candidate_id)
next_to_min_votes = None
candidates_next_excluded = None
if len(candidates_for_exclusion) > 1:
tie_breaker_round = self.find_tie_breaker(candidates_for_exclusion)
if tie_breaker_round is not None:
excluded_votes = dict((tie_breaker_round.get_vote_count(candidate_id), candidate_id) for candidate_id in candidates_for_exclusion)
self.results.provision_used(
ActProvision("Multiple candidates for exclusion holding %d votes. Tie broken from previous totals." % (min_votes)))
lowest_vote = min(excluded_votes)
excluded_candidate_id = excluded_votes[lowest_vote]
else:
self.results.provision_used(
ActProvision("Multiple candidates for exclusion holding %d votes. Input required from Australian Electoral Officer." % (min_votes)))
excluded_candidate_id = self.resolve_exclusion_tie(candidates_for_exclusion)
else:
excluded_candidate_id = candidates_for_exclusion[0]
candidates_with_more_than_min = [candidate_aggregates.get_vote_count(t) for t in candidate_ids if candidate_aggregates.get_vote_count(t) > min_votes]
if len(candidates_with_more_than_min) > 0:
next_to_min_votes = min(candidate_aggregates.get_vote_count(t) for t in candidate_ids if candidate_aggregates.get_vote_count(t) > min_votes)
candidates_next_excluded = [t for t in candidate_ids if candidate_aggregates.get_vote_count(t) == next_to_min_votes]
margin = None
if next_to_min_votes is not None:
margin = next_to_min_votes - min_votes
<|code_end|>
with the help of current file imports:
from collections import defaultdict, namedtuple
from .results import ElectionDistributionPerformed, \
ExclusionDistributionPerformed, ActProvision, \
CandidateElected, CandidatesExcluded, \
ExclusionReason
import itertools
import fractions
and context from other files:
# Path: dividebatur/results.py
# class ElectionDistributionPerformed:
# """
# Information on any election distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidate_id, transfer_value):
# """
# transfer_value is a Fraction instance
# """
# self.candidate_id = candidate_id
# self.transfer_value = transfer_value
#
# class ExclusionDistributionPerformed:
# """
# Information on any exclusion distribution which is performed during a
# counting round.
# """
#
# def __init__(self, candidates, transfer_value):
# """
# candidates: a List of candidate_ids
# transfer_value: transfer value, as a Fraction
# """
# self.candidates = candidates
# self.transfer_value = transfer_value
#
# class ActProvision:
# """
# Note that a provision of the Act has been used.
# """
#
# def __init__(self, text):
# """
# text: textual description of the provision used.
# """
#
# self.text = text
#
# class CandidateElected:
# """
# Information on the election of a candidate.
# """
#
# def __init__(self, candidate_id, order, excess_votes, paper_count, transfer_value):
# """
# candidate_id: the candidate elected
# order: the number of the spot the candidate was elected to [1..N_vacancies]
# excess_votes: the number of excess votes the candidate received
# paper_count: the number of papers the candidate held at time of elect
# transfer_value: the transfer value for the excess papers (0 if no excess papers)
# """
#
# self.candidate_id = candidate_id
# self.order = order
# self.excess_votes = excess_votes
# self.paper_count = paper_count
# self.transfer_value = transfer_value
#
# class CandidatesExcluded:
# """
# Information on the exclusion of one or more candidates.
# """
#
# def __init__(self, candidates, transfer_values, reason):
# """
# candidates: list of candidate_ids of those candidates elected
# transfer_values: the transfer values of papers to be distributed (a list of Fraction instances)
# reason: an instance of ExclusionReason
# """
# self.candidates = candidates
# self.transfer_values = transfer_values
# self.reason = reason
#
# class ExclusionReason:
# """
# information of interest about an exclusion of one or more
# candidates.
#
# simply to make this information available to users of this
# counter, this data is not used to make any decisions which
# affect count results.
# """
# def __init__(self, reason, info):
# """
# reason: a string identifying the reason
# info: additional information
# """
# self.reason, self.info = reason, info
, which may contain function names, class names, or code. Output only the next line. | return excluded_candidate_id, ExclusionReason("exclusion", { |
Given snippet: <|code_start|>
def json_log(self, candidate_aggregates):
if self.test_log_dir is None:
return
log = []
for candidate_id in self.candidate_ids_display(candidate_aggregates):
log.append((self.get_candidate_title(candidate_id), candidate_aggregates.get_vote_count(candidate_id)))
with open(os.path.join(self.test_log_dir, 'round_%d.json' % (self.current_round)), 'w') as fd:
json.dump(log, fd)
def write_json(self):
params = {
'total_papers': self.total_papers,
'quota': self.quota,
'vacancies': self.vacancies,
'started': self._start_time.strftime("%Y-%m-%d %H:%M"),
'finished': self._end_time.strftime("%Y-%m-%d %H:%M")
}
params.update(self.template_variables)
obj = {
'candidates': self.candidate_json(),
'parties': self.party_json(),
'parameters': params,
'rounds': self.rounds,
'summary': self.summary(),
}
with open(self.filename, 'w') as fd:
try:
json.dump(obj, fd)
except TypeError:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import datetime
import json
import abc
import os
from .common import logger
and context:
# Path: dividebatur/common.py
# def make_logger(name: str) -> logging.Logger:
which might include code, classes, or functions. Output only the next line. | logger.error("failed to serialise data") |
Continue the code snippet: <|code_start|> prefs = []
for ticket_entry in g:
prefs.append(ticket_entry.CandidateID)
self.gvt[ticket].append(tuple(prefs))
def load_first_preferences(self, state_name, firstprefs_csv):
with open(firstprefs_csv, 'rt') as fd:
reader = csv.reader(fd)
next(reader) # skip the version
header = next(reader)
for idx, row in enumerate(
named_tuple_iter(
'StateFirstPrefs', reader, header, TotalVotes=int, CandidateID=int)):
if row.StateAb == state_name:
if row.CandidateDetails.endswith('Ticket Votes'):
self.ticket_votes.append((row.Ticket, row.TotalVotes))
elif row.CandidateDetails != 'Unapportioned':
assert(row.CandidateID not in self.btl_firstprefs)
self.btl_firstprefs[row.CandidateID] = row.TotalVotes
def get_tickets(self):
# GVT handling: see s272 of the electoral act (as collated in 2013)
for group, n in self.ticket_votes:
size = len(self.gvt[group])
assert(size <= 3)
remainder = n % size
# TODO: implement some randomization here
remainder_pattern = [1] * remainder + [0] * (size - remainder)
remainder_pattern = [0] * (size - remainder) + [1] * (remainder)
if remainder:
<|code_end|>
. Use current file imports:
import itertools
import gzip
import csv
from collections import defaultdict
from ..common import logger
from .utils import int_or_none, named_tuple_iter, ticket_sort_key
and context (classes, functions, or code) from other files:
# Path: dividebatur/common.py
# def make_logger(name: str) -> logging.Logger:
#
# Path: dividebatur/aecdata/utils.py
# def int_or_none(s):
# if s == '':
# return None
# try:
# return int(s)
# except ValueError:
# return None
#
# def named_tuple_iter(name, reader, header, **kwargs):
# field_names = [t for t in [t.strip().replace('-', '_')
# for t in header] if t]
# typ = namedtuple(name, field_names)
# mappings = []
# for field_name in kwargs:
# idx = field_names.index(field_name)
# mappings.append((idx, kwargs[field_name]))
# for row in reader:
# for idx, map_fn in mappings:
# row[idx] = map_fn(row[idx])
# yield typ(*row)
#
# def ticket_sort_key(ticket):
# "sort key for an ATL ticket, eg. A..Z, AA..ZZ"
# return (len(ticket), ticket)
. Output only the next line. | logger.info("GVT split ticket remainder, AEO input needed: %s" % (remainder_pattern)) |
Here is a snippet: <|code_start|> remainder_pattern = [0] * (size - remainder) + [1] * (remainder)
if remainder:
logger.info("GVT split ticket remainder, AEO input needed: %s" % (remainder_pattern))
# remainder_pattern = [0] * (size-remainder) + [1] * remainder
for ticket, extra in zip(self.gvt[group], remainder_pattern):
yield ticket, int(n / size) + extra
class SenateBTL:
"""
build up Ticket instances for each below-the-line vote in the AEC dataset
aggregate BTL votes with the same preference flow together
(pre-2015)
"""
def __init__(self, btl_csv):
self.ticket_votes = defaultdict(int)
self.load_btl(btl_csv)
def load_btl(self, btl_csv):
with gzip.open(btl_csv, 'rt') as fd:
reader = csv.reader(fd)
next(reader) # skip the version
header = next(reader)
it = named_tuple_iter(
'BtlRow',
reader,
header,
Batch=int,
Paper=int,
<|code_end|>
. Write the next line using the current file imports:
import itertools
import gzip
import csv
from collections import defaultdict
from ..common import logger
from .utils import int_or_none, named_tuple_iter, ticket_sort_key
and context from other files:
# Path: dividebatur/common.py
# def make_logger(name: str) -> logging.Logger:
#
# Path: dividebatur/aecdata/utils.py
# def int_or_none(s):
# if s == '':
# return None
# try:
# return int(s)
# except ValueError:
# return None
#
# def named_tuple_iter(name, reader, header, **kwargs):
# field_names = [t for t in [t.strip().replace('-', '_')
# for t in header] if t]
# typ = namedtuple(name, field_names)
# mappings = []
# for field_name in kwargs:
# idx = field_names.index(field_name)
# mappings.append((idx, kwargs[field_name]))
# for row in reader:
# for idx, map_fn in mappings:
# row[idx] = map_fn(row[idx])
# yield typ(*row)
#
# def ticket_sort_key(ticket):
# "sort key for an ATL ticket, eg. A..Z, AA..ZZ"
# return (len(ticket), ticket)
, which may include functions, classes, or code. Output only the next line. | Preference=int_or_none, |
Using the snippet: <|code_start|>
class SenateATL:
"parses AEC candidates Senate ATL data file (pre-2015)"
def __init__(self, state_name, gvt_csv, firstprefs_csv):
self.state_name = state_name
self.gvt = defaultdict(list)
self.ticket_votes = []
self.btl_firstprefs = {}
self.load_tickets(gvt_csv)
self.load_first_preferences(state_name, firstprefs_csv)
def load_tickets(self, gvt_csv):
with open(gvt_csv, 'rt') as fd:
reader = csv.reader(fd)
# skip introduction line
next(reader)
header = next(reader)
# note - this assume the GVT data is formal. FIXME: add a check for this.
it = sorted(
<|code_end|>
, determine the next line of code. You have imports:
import itertools
import gzip
import csv
from collections import defaultdict
from ..common import logger
from .utils import int_or_none, named_tuple_iter, ticket_sort_key
and context (class names, function names, or code) available:
# Path: dividebatur/common.py
# def make_logger(name: str) -> logging.Logger:
#
# Path: dividebatur/aecdata/utils.py
# def int_or_none(s):
# if s == '':
# return None
# try:
# return int(s)
# except ValueError:
# return None
#
# def named_tuple_iter(name, reader, header, **kwargs):
# field_names = [t for t in [t.strip().replace('-', '_')
# for t in header] if t]
# typ = namedtuple(name, field_names)
# mappings = []
# for field_name in kwargs:
# idx = field_names.index(field_name)
# mappings.append((idx, kwargs[field_name]))
# for row in reader:
# for idx, map_fn in mappings:
# row[idx] = map_fn(row[idx])
# yield typ(*row)
#
# def ticket_sort_key(ticket):
# "sort key for an ATL ticket, eg. A..Z, AA..ZZ"
# return (len(ticket), ticket)
. Output only the next line. | named_tuple_iter('GvtRow', reader, header, PreferenceNo=int, TicketNo=int, CandidateID=int, OwnerTicket=lambda t: t.strip()), |
Based on the snippet: <|code_start|>
class SenateATL:
"parses AEC candidates Senate ATL data file (pre-2015)"
def __init__(self, state_name, gvt_csv, firstprefs_csv):
self.state_name = state_name
self.gvt = defaultdict(list)
self.ticket_votes = []
self.btl_firstprefs = {}
self.load_tickets(gvt_csv)
self.load_first_preferences(state_name, firstprefs_csv)
def load_tickets(self, gvt_csv):
with open(gvt_csv, 'rt') as fd:
reader = csv.reader(fd)
# skip introduction line
next(reader)
header = next(reader)
# note - this assume the GVT data is formal. FIXME: add a check for this.
it = sorted(
named_tuple_iter('GvtRow', reader, header, PreferenceNo=int, TicketNo=int, CandidateID=int, OwnerTicket=lambda t: t.strip()),
<|code_end|>
, predict the immediate next line with the help of imports:
import itertools
import gzip
import csv
from collections import defaultdict
from ..common import logger
from .utils import int_or_none, named_tuple_iter, ticket_sort_key
and context (classes, functions, sometimes code) from other files:
# Path: dividebatur/common.py
# def make_logger(name: str) -> logging.Logger:
#
# Path: dividebatur/aecdata/utils.py
# def int_or_none(s):
# if s == '':
# return None
# try:
# return int(s)
# except ValueError:
# return None
#
# def named_tuple_iter(name, reader, header, **kwargs):
# field_names = [t for t in [t.strip().replace('-', '_')
# for t in header] if t]
# typ = namedtuple(name, field_names)
# mappings = []
# for field_name in kwargs:
# idx = field_names.index(field_name)
# mappings.append((idx, kwargs[field_name]))
# for row in reader:
# for idx, map_fn in mappings:
# row[idx] = map_fn(row[idx])
# yield typ(*row)
#
# def ticket_sort_key(ticket):
# "sort key for an ATL ticket, eg. A..Z, AA..ZZ"
# return (len(ticket), ticket)
. Output only the next line. | key=lambda gvt: (gvt.State, ticket_sort_key(gvt.OwnerTicket), gvt.TicketNo, gvt.PreferenceNo)) |
Here is a snippet: <|code_start|>
_logger = logging.getLogger('weixin_pay_notificaiton')
def process_notify(request):
_logger.info('received weixin pay notification.body:{}'.format(request.body))
<|code_end|>
. Write the next line using the current file imports:
import logging
from django.http import HttpResponse
from openunipay.models import PAY_WAY_WEIXIN
from openunipay.paygateway import unipay
and context from other files:
# Path: openunipay/models.py
# PAY_WAY_WEIXIN = 'WEIXIN'
#
# Path: openunipay/paygateway/unipay.py
# _PAY_GATEWAY = {PAY_WAY_WEIXIN: weixin.WeiXinPayGateway(),
# PAY_WAY_ALI: alipay.AliPayGateway(), }
# def create_order(orderno, payway, clientIp, product_desc, product_detail, fee, user=None, attach=None, expire=1440, **kwargs):
# def query_order(orderno):
# def process_notify(payway, requestContent):
# def is_supportted_payway(payway):
# def _update_order_pay_result(payResult):
# def generate_qr_pay_url(payway, productid):
# def process_qr_pay_notify(payway, requestContent):
, which may include functions, classes, or code. Output only the next line. | unipay.process_notify(PAY_WAY_WEIXIN, request.body)
|
Here is a snippet: <|code_start|>
_logger = logging.getLogger('weixin_pay_notificaiton')
def process_notify(request):
_logger.info('received weixin pay notification.body:{}'.format(request.body))
<|code_end|>
. Write the next line using the current file imports:
import logging
from django.http import HttpResponse
from openunipay.models import PAY_WAY_WEIXIN
from openunipay.paygateway import unipay
and context from other files:
# Path: openunipay/models.py
# PAY_WAY_WEIXIN = 'WEIXIN'
#
# Path: openunipay/paygateway/unipay.py
# _PAY_GATEWAY = {PAY_WAY_WEIXIN: weixin.WeiXinPayGateway(),
# PAY_WAY_ALI: alipay.AliPayGateway(), }
# def create_order(orderno, payway, clientIp, product_desc, product_detail, fee, user=None, attach=None, expire=1440, **kwargs):
# def query_order(orderno):
# def process_notify(payway, requestContent):
# def is_supportted_payway(payway):
# def _update_order_pay_result(payResult):
# def generate_qr_pay_url(payway, productid):
# def process_qr_pay_notify(payway, requestContent):
, which may include functions, classes, or code. Output only the next line. | unipay.process_notify(PAY_WAY_WEIXIN, request.body)
|
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
_SERVICE = 'mobile.securitypay.pay'
_CHARSET = 'utf-8'
_SIGN_TYPE = 'RSA'
_PAYMENT_TYPE = '1'
_ALIPAY_ORDER_FIELD = ('out_trade_no', 'subject', 'body', 'total_fee', 'it_b_pay',)
class AliPayOrder(models.Model):
out_trade_no = models.CharField(verbose_name='商户订单号', max_length=32, db_index=True, editable=False)
subject = models.CharField(verbose_name='商品名称', max_length=128, editable=False)
body = models.CharField(verbose_name='商品详情', max_length=512, editable=False)
total_fee = models.DecimalField(verbose_name='总金额(单位:元)', max_digits=6, decimal_places=2, editable=False)
it_b_pay = models.CharField(verbose_name='交易有效期', max_length=19, editable=False)
date_create = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
class Meta:
verbose_name = u'支付宝订单'
verbose_name_plural = u'支付宝订单'
def __str__(self):
return self.out_trade_no
def compose_interface_data(self):
# sign data
data = self._compose_data()
<|code_end|>
. Write the next line using the current file imports:
from django.db import models
from django.conf import settings
from urllib.parse import quote_plus
from openunipay.ali_pay import security
and context from other files:
# Path: openunipay/ali_pay/security.py
# def sign(data):
# def verify(data, sign, pemKeyfile):
# def verify_ali_data(valueDict):
# def _load_private_key(pemKeyfile):
# def _load_public_key(pemKeyfile):
, which may include functions, classes, or code. Output only the next line. | return '{}&sign_type="RSA"&sign="{}"'.format(data, quote_plus(security.sign(data)))
|
Predict the next line for this snippet: <|code_start|> obj.initial_orlder()
admin.ModelAdmin.save_model(self, request, obj, form, change)
########################### product #########################
class ProductResource(resources.ModelResource):
class Meta:
model = OrderItem
import_id_fields = ('productid', )
class ProductAdmin(ImportExportModelAdmin):
resource_class = ProductResource
# list page
list_display = ('productid',
'product_desc',
'fee',
'weinxin_qrurl',
'date_create',
'date_update', )
ordering = ('-date_create', )
search_fields = ['=productid', ]
def save_model(self, request, obj, form, change):
if not change:
obj.weinxin_qrurl = unipay.generate_qr_pay_url(PAY_WAY_WEIXIN, obj.productid)
admin.ModelAdmin.save_model(self, request, obj, form, change)
admin.site.register(OrderItem, OrderItemAdmin)
<|code_end|>
with the help of current file imports:
from django.contrib import admin
from .ali_pay.admin import *
from .weixin_pay.admin import *
from .models import OrderItem, Product, PAY_WAY_WEIXIN
from openunipay.paygateway import unipay
and context from other files:
# Path: openunipay/models.py
# class OrderItem(models.Model):
# orderno = models.CharField(verbose_name=u'订单号', max_length=50, primary_key=True, editable=False)
# user = models.CharField(verbose_name=u'用户标识', max_length=50, null=True, blank=True)
# product_desc = models.CharField(verbose_name=u'商品描述', max_length=128, null=False, blank=False)
# product_detail = models.TextField(verbose_name=u'商品详情', max_length=1000, null=False, blank=False)
# fee = models.DecimalField(verbose_name=u'金额(单位:分)', max_digits=6, decimal_places=0, null=False, blank=False)
# attach = models.CharField(verbose_name=u'附加数据', max_length=127, null=True, blank=True)
# dt_start = models.DateTimeField(verbose_name=u'交易开始时间', null=False, blank=False, editable=False)
# dt_end = models.DateTimeField(verbose_name=u'交易失效时间', null=False, blank=False, editable=False)
# dt_pay = models.DateTimeField(verbose_name=u'付款时间', null=True, blank=True, editable=False)
# paied = models.BooleanField(verbose_name=u'已收款', null=False, blank=False, default=False, editable=False)
# lapsed = models.BooleanField(verbose_name=u'已失效', null=False, blank=False, default=False, editable=False)
# payway = models.CharField(verbose_name=u'支付方式', max_length=10, null=False, blank=False, choices=PAY_WAY, default=PAY_WAY[0][0])
# date_create = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
# date_update = models.DateTimeField(verbose_name=u'修改时间', auto_now=True)
#
# class Meta:
# verbose_name = '付款单'
# verbose_name_plural = '付款单'
#
# def __str__(self):
# return self.orderno
#
# def _set_expire_time(self, expire):
# self.dt_start = datetime.local_now()
# self.dt_end = self.dt_start + timedelta(minutes=expire)
#
# def initial_orlder(self, expire):
# self._set_expire_time(expire)
#
# class Product(models.Model):
# productid = models.CharField(verbose_name=u'商品ID', max_length=50, primary_key=True)
# product_desc = models.CharField(verbose_name=u'商品描述', max_length=128, null=False, blank=False)
# product_detail = models.TextField(verbose_name=u'商品详情', max_length=1000, null=False, blank=False)
# fee = models.DecimalField(verbose_name=u'金额(单位:分)', max_digits=6, decimal_places=0, null=False, blank=False)
# weinxin_qrurl = models.CharField(verbose_name=u'微信扫码支付URL', max_length=500, null=True, blank=True)
# date_create = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
# date_update = models.DateTimeField(verbose_name=u'修改时间', auto_now=True)
#
# class Meta:
# verbose_name = '商品'
# verbose_name_plural = '商品'
#
# PAY_WAY_WEIXIN = 'WEIXIN'
#
# Path: openunipay/paygateway/unipay.py
# _PAY_GATEWAY = {PAY_WAY_WEIXIN: weixin.WeiXinPayGateway(),
# PAY_WAY_ALI: alipay.AliPayGateway(), }
# def create_order(orderno, payway, clientIp, product_desc, product_detail, fee, user=None, attach=None, expire=1440, **kwargs):
# def query_order(orderno):
# def process_notify(payway, requestContent):
# def is_supportted_payway(payway):
# def _update_order_pay_result(payResult):
# def generate_qr_pay_url(payway, productid):
# def process_qr_pay_notify(payway, requestContent):
, which may contain function names, class names, or code. Output only the next line. | admin.site.register(Product, ProductAdmin)
|
Given the following code snippet before the placeholder: <|code_start|> 'paied',
'product_desc', )
def save_model(self, request, obj, form, change):
if not obj.orderno:
obj.initial_orlder()
admin.ModelAdmin.save_model(self, request, obj, form, change)
########################### product #########################
class ProductResource(resources.ModelResource):
class Meta:
model = OrderItem
import_id_fields = ('productid', )
class ProductAdmin(ImportExportModelAdmin):
resource_class = ProductResource
# list page
list_display = ('productid',
'product_desc',
'fee',
'weinxin_qrurl',
'date_create',
'date_update', )
ordering = ('-date_create', )
search_fields = ['=productid', ]
def save_model(self, request, obj, form, change):
if not change:
<|code_end|>
, predict the next line using imports from the current file:
from django.contrib import admin
from .ali_pay.admin import *
from .weixin_pay.admin import *
from .models import OrderItem, Product, PAY_WAY_WEIXIN
from openunipay.paygateway import unipay
and context including class names, function names, and sometimes code from other files:
# Path: openunipay/models.py
# class OrderItem(models.Model):
# orderno = models.CharField(verbose_name=u'订单号', max_length=50, primary_key=True, editable=False)
# user = models.CharField(verbose_name=u'用户标识', max_length=50, null=True, blank=True)
# product_desc = models.CharField(verbose_name=u'商品描述', max_length=128, null=False, blank=False)
# product_detail = models.TextField(verbose_name=u'商品详情', max_length=1000, null=False, blank=False)
# fee = models.DecimalField(verbose_name=u'金额(单位:分)', max_digits=6, decimal_places=0, null=False, blank=False)
# attach = models.CharField(verbose_name=u'附加数据', max_length=127, null=True, blank=True)
# dt_start = models.DateTimeField(verbose_name=u'交易开始时间', null=False, blank=False, editable=False)
# dt_end = models.DateTimeField(verbose_name=u'交易失效时间', null=False, blank=False, editable=False)
# dt_pay = models.DateTimeField(verbose_name=u'付款时间', null=True, blank=True, editable=False)
# paied = models.BooleanField(verbose_name=u'已收款', null=False, blank=False, default=False, editable=False)
# lapsed = models.BooleanField(verbose_name=u'已失效', null=False, blank=False, default=False, editable=False)
# payway = models.CharField(verbose_name=u'支付方式', max_length=10, null=False, blank=False, choices=PAY_WAY, default=PAY_WAY[0][0])
# date_create = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
# date_update = models.DateTimeField(verbose_name=u'修改时间', auto_now=True)
#
# class Meta:
# verbose_name = '付款单'
# verbose_name_plural = '付款单'
#
# def __str__(self):
# return self.orderno
#
# def _set_expire_time(self, expire):
# self.dt_start = datetime.local_now()
# self.dt_end = self.dt_start + timedelta(minutes=expire)
#
# def initial_orlder(self, expire):
# self._set_expire_time(expire)
#
# class Product(models.Model):
# productid = models.CharField(verbose_name=u'商品ID', max_length=50, primary_key=True)
# product_desc = models.CharField(verbose_name=u'商品描述', max_length=128, null=False, blank=False)
# product_detail = models.TextField(verbose_name=u'商品详情', max_length=1000, null=False, blank=False)
# fee = models.DecimalField(verbose_name=u'金额(单位:分)', max_digits=6, decimal_places=0, null=False, blank=False)
# weinxin_qrurl = models.CharField(verbose_name=u'微信扫码支付URL', max_length=500, null=True, blank=True)
# date_create = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
# date_update = models.DateTimeField(verbose_name=u'修改时间', auto_now=True)
#
# class Meta:
# verbose_name = '商品'
# verbose_name_plural = '商品'
#
# PAY_WAY_WEIXIN = 'WEIXIN'
#
# Path: openunipay/paygateway/unipay.py
# _PAY_GATEWAY = {PAY_WAY_WEIXIN: weixin.WeiXinPayGateway(),
# PAY_WAY_ALI: alipay.AliPayGateway(), }
# def create_order(orderno, payway, clientIp, product_desc, product_detail, fee, user=None, attach=None, expire=1440, **kwargs):
# def query_order(orderno):
# def process_notify(payway, requestContent):
# def is_supportted_payway(payway):
# def _update_order_pay_result(payResult):
# def generate_qr_pay_url(payway, productid):
# def process_qr_pay_notify(payway, requestContent):
. Output only the next line. | obj.weinxin_qrurl = unipay.generate_qr_pay_url(PAY_WAY_WEIXIN, obj.productid)
|
Given the code snippet: <|code_start|> 'paied',
'product_desc', )
def save_model(self, request, obj, form, change):
if not obj.orderno:
obj.initial_orlder()
admin.ModelAdmin.save_model(self, request, obj, form, change)
########################### product #########################
class ProductResource(resources.ModelResource):
class Meta:
model = OrderItem
import_id_fields = ('productid', )
class ProductAdmin(ImportExportModelAdmin):
resource_class = ProductResource
# list page
list_display = ('productid',
'product_desc',
'fee',
'weinxin_qrurl',
'date_create',
'date_update', )
ordering = ('-date_create', )
search_fields = ['=productid', ]
def save_model(self, request, obj, form, change):
if not change:
<|code_end|>
, generate the next line using the imports in this file:
from django.contrib import admin
from .ali_pay.admin import *
from .weixin_pay.admin import *
from .models import OrderItem, Product, PAY_WAY_WEIXIN
from openunipay.paygateway import unipay
and context (functions, classes, or occasionally code) from other files:
# Path: openunipay/models.py
# class OrderItem(models.Model):
# orderno = models.CharField(verbose_name=u'订单号', max_length=50, primary_key=True, editable=False)
# user = models.CharField(verbose_name=u'用户标识', max_length=50, null=True, blank=True)
# product_desc = models.CharField(verbose_name=u'商品描述', max_length=128, null=False, blank=False)
# product_detail = models.TextField(verbose_name=u'商品详情', max_length=1000, null=False, blank=False)
# fee = models.DecimalField(verbose_name=u'金额(单位:分)', max_digits=6, decimal_places=0, null=False, blank=False)
# attach = models.CharField(verbose_name=u'附加数据', max_length=127, null=True, blank=True)
# dt_start = models.DateTimeField(verbose_name=u'交易开始时间', null=False, blank=False, editable=False)
# dt_end = models.DateTimeField(verbose_name=u'交易失效时间', null=False, blank=False, editable=False)
# dt_pay = models.DateTimeField(verbose_name=u'付款时间', null=True, blank=True, editable=False)
# paied = models.BooleanField(verbose_name=u'已收款', null=False, blank=False, default=False, editable=False)
# lapsed = models.BooleanField(verbose_name=u'已失效', null=False, blank=False, default=False, editable=False)
# payway = models.CharField(verbose_name=u'支付方式', max_length=10, null=False, blank=False, choices=PAY_WAY, default=PAY_WAY[0][0])
# date_create = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
# date_update = models.DateTimeField(verbose_name=u'修改时间', auto_now=True)
#
# class Meta:
# verbose_name = '付款单'
# verbose_name_plural = '付款单'
#
# def __str__(self):
# return self.orderno
#
# def _set_expire_time(self, expire):
# self.dt_start = datetime.local_now()
# self.dt_end = self.dt_start + timedelta(minutes=expire)
#
# def initial_orlder(self, expire):
# self._set_expire_time(expire)
#
# class Product(models.Model):
# productid = models.CharField(verbose_name=u'商品ID', max_length=50, primary_key=True)
# product_desc = models.CharField(verbose_name=u'商品描述', max_length=128, null=False, blank=False)
# product_detail = models.TextField(verbose_name=u'商品详情', max_length=1000, null=False, blank=False)
# fee = models.DecimalField(verbose_name=u'金额(单位:分)', max_digits=6, decimal_places=0, null=False, blank=False)
# weinxin_qrurl = models.CharField(verbose_name=u'微信扫码支付URL', max_length=500, null=True, blank=True)
# date_create = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
# date_update = models.DateTimeField(verbose_name=u'修改时间', auto_now=True)
#
# class Meta:
# verbose_name = '商品'
# verbose_name_plural = '商品'
#
# PAY_WAY_WEIXIN = 'WEIXIN'
#
# Path: openunipay/paygateway/unipay.py
# _PAY_GATEWAY = {PAY_WAY_WEIXIN: weixin.WeiXinPayGateway(),
# PAY_WAY_ALI: alipay.AliPayGateway(), }
# def create_order(orderno, payway, clientIp, product_desc, product_detail, fee, user=None, attach=None, expire=1440, **kwargs):
# def query_order(orderno):
# def process_notify(payway, requestContent):
# def is_supportted_payway(payway):
# def _update_order_pay_result(payResult):
# def generate_qr_pay_url(payway, productid):
# def process_qr_pay_notify(payway, requestContent):
. Output only the next line. | obj.weinxin_qrurl = unipay.generate_qr_pay_url(PAY_WAY_WEIXIN, obj.productid)
|
Based on the snippet: <|code_start|>
_logger = logging.getLogger('openunipay_ali_pay_notificaiton')
def process_notify(request):
_logger.info('received ali pay notification.body:{}'.format(request.body))
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
from django.http import HttpResponse
from openunipay.models import PAY_WAY_ALI
from openunipay.paygateway import unipay
and context (classes, functions, sometimes code) from other files:
# Path: openunipay/models.py
# PAY_WAY_ALI = 'ALI'
#
# Path: openunipay/paygateway/unipay.py
# _PAY_GATEWAY = {PAY_WAY_WEIXIN: weixin.WeiXinPayGateway(),
# PAY_WAY_ALI: alipay.AliPayGateway(), }
# def create_order(orderno, payway, clientIp, product_desc, product_detail, fee, user=None, attach=None, expire=1440, **kwargs):
# def query_order(orderno):
# def process_notify(payway, requestContent):
# def is_supportted_payway(payway):
# def _update_order_pay_result(payResult):
# def generate_qr_pay_url(payway, productid):
# def process_qr_pay_notify(payway, requestContent):
. Output only the next line. | unipay.process_notify(PAY_WAY_ALI, request)
|
Given the following code snippet before the placeholder: <|code_start|>
_logger = logging.getLogger('openunipay_ali_pay_notificaiton')
def process_notify(request):
_logger.info('received ali pay notification.body:{}'.format(request.body))
<|code_end|>
, predict the next line using imports from the current file:
import logging
from django.http import HttpResponse
from openunipay.models import PAY_WAY_ALI
from openunipay.paygateway import unipay
and context including class names, function names, and sometimes code from other files:
# Path: openunipay/models.py
# PAY_WAY_ALI = 'ALI'
#
# Path: openunipay/paygateway/unipay.py
# _PAY_GATEWAY = {PAY_WAY_WEIXIN: weixin.WeiXinPayGateway(),
# PAY_WAY_ALI: alipay.AliPayGateway(), }
# def create_order(orderno, payway, clientIp, product_desc, product_detail, fee, user=None, attach=None, expire=1440, **kwargs):
# def query_order(orderno):
# def process_notify(payway, requestContent):
# def is_supportted_payway(payway):
# def _update_order_pay_result(payResult):
# def generate_qr_pay_url(payway, productid):
# def process_qr_pay_notify(payway, requestContent):
. Output only the next line. | unipay.process_notify(PAY_WAY_ALI, request)
|
Given the code snippet: <|code_start|>PAY_WAY_WEIXIN = 'WEIXIN'
PAY_WAY_ALI = 'ALI'
PAY_WAY = ((PAY_WAY_WEIXIN, u'微信支付'),
(PAY_WAY_ALI, u'支付宝支付'), )
class OrderItem(models.Model):
orderno = models.CharField(verbose_name=u'订单号', max_length=50, primary_key=True, editable=False)
user = models.CharField(verbose_name=u'用户标识', max_length=50, null=True, blank=True)
product_desc = models.CharField(verbose_name=u'商品描述', max_length=128, null=False, blank=False)
product_detail = models.TextField(verbose_name=u'商品详情', max_length=1000, null=False, blank=False)
fee = models.DecimalField(verbose_name=u'金额(单位:分)', max_digits=6, decimal_places=0, null=False, blank=False)
attach = models.CharField(verbose_name=u'附加数据', max_length=127, null=True, blank=True)
dt_start = models.DateTimeField(verbose_name=u'交易开始时间', null=False, blank=False, editable=False)
dt_end = models.DateTimeField(verbose_name=u'交易失效时间', null=False, blank=False, editable=False)
dt_pay = models.DateTimeField(verbose_name=u'付款时间', null=True, blank=True, editable=False)
paied = models.BooleanField(verbose_name=u'已收款', null=False, blank=False, default=False, editable=False)
lapsed = models.BooleanField(verbose_name=u'已失效', null=False, blank=False, default=False, editable=False)
payway = models.CharField(verbose_name=u'支付方式', max_length=10, null=False, blank=False, choices=PAY_WAY, default=PAY_WAY[0][0])
date_create = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
date_update = models.DateTimeField(verbose_name=u'修改时间', auto_now=True)
class Meta:
verbose_name = '付款单'
verbose_name_plural = '付款单'
def __str__(self):
return self.orderno
def _set_expire_time(self, expire):
<|code_end|>
, generate the next line using the imports in this file:
from django.db import models
from openunipay.ali_pay.models import *
from openunipay.weixin_pay.models import *
from openunipay.util import datetime
from datetime import timedelta
and context (functions, classes, or occasionally code) from other files:
# Path: openunipay/util/datetime.py
# def utc_now():
# def local_now():
# def now_str():
# def get_timestamp():
# def get_unix_timestamp():
. Output only the next line. | self.dt_start = datetime.local_now()
|
Using the snippet: <|code_start|>"""uimbank_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
urlpatterns = [
url(r'^notify/weixin/$', views_weixin.process_notify),
<|code_end|>
, determine the next line of code. You have imports:
from django.conf.urls import url
from openunipay.api import views_alipay, views_weixin
and context (class names, function names, or code) available:
# Path: openunipay/api/views_alipay.py
# def process_notify(request):
#
# Path: openunipay/api/views_weixin.py
# def process_notify(request):
# def process_qr_notify(request):
. Output only the next line. | url(r'^notify/alipay/$', views_alipay.process_notify), |
Predict the next line for this snippet: <|code_start|>"""uimbank_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
urlpatterns = [
<|code_end|>
with the help of current file imports:
from django.conf.urls import url
from openunipay.api import views_alipay, views_weixin
and context from other files:
# Path: openunipay/api/views_alipay.py
# def process_notify(request):
#
# Path: openunipay/api/views_weixin.py
# def process_notify(request):
# def process_qr_notify(request):
, which may contain function names, class names, or code. Output only the next line. | url(r'^notify/weixin/$', views_weixin.process_notify), |
Predict the next line for this snippet: <|code_start|>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class TestEFSiteConfig(unittest.TestCase):
"""
TestEFSiteConfig class for ef_site_config testing.
"""
def setUp(self):
test_ef_site_config_file = os.path.join(os.path.dirname(__file__), '../test_data/ef_site_config.yml')
self.test_config = open(test_ef_site_config_file).read()
def test_site_load_local_file(self):
"""Test parsing a site config"""
with patch('__builtin__.open', mock_open(read_data=self.test_config)) as mock_file:
<|code_end|>
with the help of current file imports:
import os
import unittest
from mock import MagicMock, Mock, patch, mock_open
from efopen import ef_site_config
and context from other files:
# Path: efopen/ef_site_config.py
# class EFSiteConfig(object):
# def __init__(self):
# def load(self):
# def load_from_ssm(self):
# def load_from_local_file(self):
, which may contain function names, class names, or code. Output only the next line. | test_config = ef_site_config.EFSiteConfig().load_from_local_file() |
Continue the code snippet: <|code_start|>
class TestMarkdown(TestCase):
def test_render_md_to_html(self):
scenarios = (
{'md': 'title\n===', 'expect': '<div class="highlight"><h1>title</h1>\n</div>'},
{'md': '#1\n##2', 'expect': '<div class="highlight"><h1>1</h1>\n\n<h2 id="2">2</h2>\n</div>'},
)
for scena in scenarios:
self.assertEqual(render_md_to_html(scena['md']), scena['expect'])
class TestEncrypt(TestCase):
def generate_random(self):
return ''.join([random.choice(ascii_lowercase) for _ in range(15)])
def test_bcrypt(self):
passwd = self.generate_random()
wrong = passwd[:-1] + '0'
<|code_end|>
. Use current file imports:
import random
import jwt
from string import ascii_lowercase
from unittest import TestCase
from gargantua.utils import render_md_to_html, \
generate_passwd, validate_passwd, generate_token, validate_token
and context (classes, functions, or code) from other files:
# Path: gargantua/utils/encryt.py
# def generate_passwd(passwd):
# return bcrypt.hashpw(passwd.encode('utf8'), bcrypt.gensalt(len(passwd))).decode('utf8')
#
# def validate_passwd(passwd, hashed):
# return bcrypt.hashpw(passwd.encode('utf8'), hashed.encode('utf8')) == hashed.encode('utf8')
#
# def generate_token(json_, secret=SECRET_KEY):
# return jwt.encode(json_, secret, algorithm='HS512').decode()
#
# def validate_token(token, secret=SECRET_KEY):
# return decode_token(token, secret=secret) is not None
#
# Path: gargantua/utils/markdown.py
# def render_md_to_html(content, is_extract_menu=False):
# html = markdown2.markdown(content, extras=['fenced-code-blocks', 'footnotes', 'tables'])
# html = '<div>{}</div>'.format(html)
# html = TITLE_REG.sub(r'<\2 id="\3">\3</\2>', html)
# html = html.replace('class="codehilite"', 'class="codehilite highlight"')
# h2_count = h3_count = 1
# if is_extract_menu:
# title_menu = TitleMenu()
# for cont, level, title in TITLE_REG.findall(html):
# title_menu.add_title(level, title)
# if level == 'h2': # 给 h2 添加中文序号
# serial = convert2chn_serial(h2_count)
# h2_count += 1
# h3_count = 1
# elif level == 'h3': # 给 h3 添加序号
# serial = '{}'.format(h3_count)
# h3_count += 1
#
# new_title = '<{level} id="{title}">{serial}、{title}</{level}>'\
# .format(level=level, title=title, serial=serial)
# html = html.replace(cont, new_title)
#
# return html, title_menu.render()
# else:
# return html
. Output only the next line. | hashed = generate_passwd(passwd) |
Predict the next line after this snippet: <|code_start|>
class TestMarkdown(TestCase):
def test_render_md_to_html(self):
scenarios = (
{'md': 'title\n===', 'expect': '<div class="highlight"><h1>title</h1>\n</div>'},
{'md': '#1\n##2', 'expect': '<div class="highlight"><h1>1</h1>\n\n<h2 id="2">2</h2>\n</div>'},
)
for scena in scenarios:
self.assertEqual(render_md_to_html(scena['md']), scena['expect'])
class TestEncrypt(TestCase):
def generate_random(self):
return ''.join([random.choice(ascii_lowercase) for _ in range(15)])
def test_bcrypt(self):
passwd = self.generate_random()
wrong = passwd[:-1] + '0'
hashed = generate_passwd(passwd)
<|code_end|>
using the current file's imports:
import random
import jwt
from string import ascii_lowercase
from unittest import TestCase
from gargantua.utils import render_md_to_html, \
generate_passwd, validate_passwd, generate_token, validate_token
and any relevant context from other files:
# Path: gargantua/utils/encryt.py
# def generate_passwd(passwd):
# return bcrypt.hashpw(passwd.encode('utf8'), bcrypt.gensalt(len(passwd))).decode('utf8')
#
# def validate_passwd(passwd, hashed):
# return bcrypt.hashpw(passwd.encode('utf8'), hashed.encode('utf8')) == hashed.encode('utf8')
#
# def generate_token(json_, secret=SECRET_KEY):
# return jwt.encode(json_, secret, algorithm='HS512').decode()
#
# def validate_token(token, secret=SECRET_KEY):
# return decode_token(token, secret=secret) is not None
#
# Path: gargantua/utils/markdown.py
# def render_md_to_html(content, is_extract_menu=False):
# html = markdown2.markdown(content, extras=['fenced-code-blocks', 'footnotes', 'tables'])
# html = '<div>{}</div>'.format(html)
# html = TITLE_REG.sub(r'<\2 id="\3">\3</\2>', html)
# html = html.replace('class="codehilite"', 'class="codehilite highlight"')
# h2_count = h3_count = 1
# if is_extract_menu:
# title_menu = TitleMenu()
# for cont, level, title in TITLE_REG.findall(html):
# title_menu.add_title(level, title)
# if level == 'h2': # 给 h2 添加中文序号
# serial = convert2chn_serial(h2_count)
# h2_count += 1
# h3_count = 1
# elif level == 'h3': # 给 h3 添加序号
# serial = '{}'.format(h3_count)
# h3_count += 1
#
# new_title = '<{level} id="{title}">{serial}、{title}</{level}>'\
# .format(level=level, title=title, serial=serial)
# html = html.replace(cont, new_title)
#
# return html, title_menu.render()
# else:
# return html
. Output only the next line. | self.assertTrue(validate_passwd(passwd, hashed)) |
Here is a snippet: <|code_start|>
class TestMarkdown(TestCase):
def test_render_md_to_html(self):
scenarios = (
{'md': 'title\n===', 'expect': '<div class="highlight"><h1>title</h1>\n</div>'},
{'md': '#1\n##2', 'expect': '<div class="highlight"><h1>1</h1>\n\n<h2 id="2">2</h2>\n</div>'},
)
for scena in scenarios:
self.assertEqual(render_md_to_html(scena['md']), scena['expect'])
class TestEncrypt(TestCase):
def generate_random(self):
return ''.join([random.choice(ascii_lowercase) for _ in range(15)])
def test_bcrypt(self):
passwd = self.generate_random()
wrong = passwd[:-1] + '0'
hashed = generate_passwd(passwd)
self.assertTrue(validate_passwd(passwd, hashed))
self.assertFalse(validate_passwd(wrong, hashed))
def test_jwt(self):
passwd = self.generate_random()
wrong = passwd[:-1] + '0'
j = {'username': 'laisky'}
<|code_end|>
. Write the next line using the current file imports:
import random
import jwt
from string import ascii_lowercase
from unittest import TestCase
from gargantua.utils import render_md_to_html, \
generate_passwd, validate_passwd, generate_token, validate_token
and context from other files:
# Path: gargantua/utils/encryt.py
# def generate_passwd(passwd):
# return bcrypt.hashpw(passwd.encode('utf8'), bcrypt.gensalt(len(passwd))).decode('utf8')
#
# def validate_passwd(passwd, hashed):
# return bcrypt.hashpw(passwd.encode('utf8'), hashed.encode('utf8')) == hashed.encode('utf8')
#
# def generate_token(json_, secret=SECRET_KEY):
# return jwt.encode(json_, secret, algorithm='HS512').decode()
#
# def validate_token(token, secret=SECRET_KEY):
# return decode_token(token, secret=secret) is not None
#
# Path: gargantua/utils/markdown.py
# def render_md_to_html(content, is_extract_menu=False):
# html = markdown2.markdown(content, extras=['fenced-code-blocks', 'footnotes', 'tables'])
# html = '<div>{}</div>'.format(html)
# html = TITLE_REG.sub(r'<\2 id="\3">\3</\2>', html)
# html = html.replace('class="codehilite"', 'class="codehilite highlight"')
# h2_count = h3_count = 1
# if is_extract_menu:
# title_menu = TitleMenu()
# for cont, level, title in TITLE_REG.findall(html):
# title_menu.add_title(level, title)
# if level == 'h2': # 给 h2 添加中文序号
# serial = convert2chn_serial(h2_count)
# h2_count += 1
# h3_count = 1
# elif level == 'h3': # 给 h3 添加序号
# serial = '{}'.format(h3_count)
# h3_count += 1
#
# new_title = '<{level} id="{title}">{serial}、{title}</{level}>'\
# .format(level=level, title=title, serial=serial)
# html = html.replace(cont, new_title)
#
# return html, title_menu.render()
# else:
# return html
, which may include functions, classes, or code. Output only the next line. | token = generate_token(j, passwd) |
Predict the next line for this snippet: <|code_start|>
class TestMarkdown(TestCase):
def test_render_md_to_html(self):
scenarios = (
{'md': 'title\n===', 'expect': '<div class="highlight"><h1>title</h1>\n</div>'},
{'md': '#1\n##2', 'expect': '<div class="highlight"><h1>1</h1>\n\n<h2 id="2">2</h2>\n</div>'},
)
for scena in scenarios:
self.assertEqual(render_md_to_html(scena['md']), scena['expect'])
class TestEncrypt(TestCase):
def generate_random(self):
return ''.join([random.choice(ascii_lowercase) for _ in range(15)])
def test_bcrypt(self):
passwd = self.generate_random()
wrong = passwd[:-1] + '0'
hashed = generate_passwd(passwd)
self.assertTrue(validate_passwd(passwd, hashed))
self.assertFalse(validate_passwd(wrong, hashed))
def test_jwt(self):
passwd = self.generate_random()
wrong = passwd[:-1] + '0'
j = {'username': 'laisky'}
token = generate_token(j, passwd)
<|code_end|>
with the help of current file imports:
import random
import jwt
from string import ascii_lowercase
from unittest import TestCase
from gargantua.utils import render_md_to_html, \
generate_passwd, validate_passwd, generate_token, validate_token
and context from other files:
# Path: gargantua/utils/encryt.py
# def generate_passwd(passwd):
# return bcrypt.hashpw(passwd.encode('utf8'), bcrypt.gensalt(len(passwd))).decode('utf8')
#
# def validate_passwd(passwd, hashed):
# return bcrypt.hashpw(passwd.encode('utf8'), hashed.encode('utf8')) == hashed.encode('utf8')
#
# def generate_token(json_, secret=SECRET_KEY):
# return jwt.encode(json_, secret, algorithm='HS512').decode()
#
# def validate_token(token, secret=SECRET_KEY):
# return decode_token(token, secret=secret) is not None
#
# Path: gargantua/utils/markdown.py
# def render_md_to_html(content, is_extract_menu=False):
# html = markdown2.markdown(content, extras=['fenced-code-blocks', 'footnotes', 'tables'])
# html = '<div>{}</div>'.format(html)
# html = TITLE_REG.sub(r'<\2 id="\3">\3</\2>', html)
# html = html.replace('class="codehilite"', 'class="codehilite highlight"')
# h2_count = h3_count = 1
# if is_extract_menu:
# title_menu = TitleMenu()
# for cont, level, title in TITLE_REG.findall(html):
# title_menu.add_title(level, title)
# if level == 'h2': # 给 h2 添加中文序号
# serial = convert2chn_serial(h2_count)
# h2_count += 1
# h3_count = 1
# elif level == 'h3': # 给 h3 添加序号
# serial = '{}'.format(h3_count)
# h3_count += 1
#
# new_title = '<{level} id="{title}">{serial}、{title}</{level}>'\
# .format(level=level, title=title, serial=serial)
# html = html.replace(cont, new_title)
#
# return html, title_menu.render()
# else:
# return html
, which may contain function names, class names, or code. Output only the next line. | self.assertTrue(validate_token(token, passwd)) |
Continue the code snippet: <|code_start|>
class TestMarkdown(TestCase):
def test_render_md_to_html(self):
scenarios = (
{'md': 'title\n===', 'expect': '<div class="highlight"><h1>title</h1>\n</div>'},
{'md': '#1\n##2', 'expect': '<div class="highlight"><h1>1</h1>\n\n<h2 id="2">2</h2>\n</div>'},
)
for scena in scenarios:
<|code_end|>
. Use current file imports:
import random
import jwt
from string import ascii_lowercase
from unittest import TestCase
from gargantua.utils import render_md_to_html, \
generate_passwd, validate_passwd, generate_token, validate_token
and context (classes, functions, or code) from other files:
# Path: gargantua/utils/encryt.py
# def generate_passwd(passwd):
# return bcrypt.hashpw(passwd.encode('utf8'), bcrypt.gensalt(len(passwd))).decode('utf8')
#
# def validate_passwd(passwd, hashed):
# return bcrypt.hashpw(passwd.encode('utf8'), hashed.encode('utf8')) == hashed.encode('utf8')
#
# def generate_token(json_, secret=SECRET_KEY):
# return jwt.encode(json_, secret, algorithm='HS512').decode()
#
# def validate_token(token, secret=SECRET_KEY):
# return decode_token(token, secret=secret) is not None
#
# Path: gargantua/utils/markdown.py
# def render_md_to_html(content, is_extract_menu=False):
# html = markdown2.markdown(content, extras=['fenced-code-blocks', 'footnotes', 'tables'])
# html = '<div>{}</div>'.format(html)
# html = TITLE_REG.sub(r'<\2 id="\3">\3</\2>', html)
# html = html.replace('class="codehilite"', 'class="codehilite highlight"')
# h2_count = h3_count = 1
# if is_extract_menu:
# title_menu = TitleMenu()
# for cont, level, title in TITLE_REG.findall(html):
# title_menu.add_title(level, title)
# if level == 'h2': # 给 h2 添加中文序号
# serial = convert2chn_serial(h2_count)
# h2_count += 1
# h3_count = 1
# elif level == 'h3': # 给 h3 添加序号
# serial = '{}'.format(h3_count)
# h3_count += 1
#
# new_title = '<{level} id="{title}">{serial}、{title}</{level}>'\
# .format(level=level, title=title, serial=serial)
# html = html.replace(cont, new_title)
#
# return html, title_menu.render()
# else:
# return html
. Output only the next line. | self.assertEqual(render_md_to_html(scena['md']), scena['expect']) |
Continue the code snippet: <|code_start|>
GA_ID = 'UA-65521906-1'
class GargantuaTestCase(AsyncHTTPTestCase):
def get_app(self):
options.debug = True
options.dbhost = 'localhost'
options.dbport = 27017
<|code_end|>
. Use current file imports:
from urllib.parse import urlparse
from tornado.testing import AsyncHTTPTestCase
from tornado.ioloop import IOLoop
from tornado.options import options
from gargantua.app import Application
and context (classes, functions, or code) from other files:
# Path: gargantua/app.py
# class Application(tornado.web.Application):
#
# def get_static_url_prefix(self):
# return '/static/dist/'
#
# def __init__(self):
# settings = {
# 'static_path': str(Path(CWD, 'static', 'dist')),
# 'static_url_prefix': self.get_static_url_prefix(),
# 'template_path': str(Path(CWD, 'html')),
# 'cookie_secret': get_default_config('SECRET_KEY'),
# 'login_url': '/login/',
# 'xsrf_cookies': True,
# 'autoescape': None,
# 'debug': options.debug
# }
# handlers = [
# # ---------------- rss ----------------
# url(r'^/(rss)/$', PostsHandler, name='post:rss'),
# # ---------------- old api ----------------
# url(r'^/(api/posts/.*)/$', PostsHandler, name='api:post'),
# url(r'^/(api/user/.*)/$', UserHandler, name='api:user:login'),
# # ---------------- rest api ----------------
# url(r'^/api/v2/post/category/([a-zA-Z0-9\-_%]+)?/?$', PostCategoriesAPIHandler, name='rest:post_category'),
# url(r'^/api/v2/post/([a-zA-Z0-9\-_%]+)?/?$', PostAPIHandler, name='rest:post'),
# url(r'^/api/v2/tweets/([a-zA-Z0-9\-_]+)?/?$', TweetsAPIHandler, name='rest:tweets'),
# # ---------------- react-router ----------------
# url(r'/.*', ReactRender, name='root'),
# ]
# # handlers.append(('/(.*)', PageNotFound))
# self.setup_db()
# if not options.debug:
# self.setup_mail_handler()
#
# super(Application, self).__init__(handlers, **settings)
#
# def setup_mail_handler(self):
# # set mail handler
# mh = LogMailHandler(mailhost=(options.mail_host, options.mail_port),
# fromaddr=options.mail_from_addr,
# toaddrs=options.mail_to_addrs,
# credentials=(options.mail_username, options.mail_passwd),
# subject=options.mail_subject)
# mh.setFormatter(LogMailFormatter())
# mh.setLevel(logging.ERROR)
# logging.getLogger().addHandler(mh)
#
# def setup_db(self):
# logger.debug('connect database at {}:{}'
# .format(options.dbhost, options.dbport))
#
# model = BaseBlogModel.make_connection(
# host=options.dbhost,
# port=options.dbport,
# db=options.dbname,
# username=options.dbuser,
# passwd=options.dbpasswd,
# )
# self.conn = model.conn
# self.db = model.db
# self.mongo_conn = model.mongo_conn
# self.mongo_db = model.mongo_db
#
# # ensure index
# # posts
# # posts_idx = pymongo.IndexModel([('post_name',)], unique=True)
# # self.mongo_db.posts.create_indexes([posts_idx])
# self.mongo_db.posts.ensure_index([('post_name', pymongo.ASCENDING)], unique=True) # PyMongo2.8
# # users
# # account_idx = pymongo.IndexModel([('account',)], unique=True)
# # username_idx = pymongo.IndexModel([('username',)], unique=True)
# # self.mongo_db.users.create_indexes([account_idx, username_idx])
# self.mongo_db.users.ensure_index([('account', pymongo.ASCENDING)], unique=True) # PyMongo2.8
# self.mongo_db.users.ensure_index([('username', pymongo.ASCENDING)], unique=True) # PyMongo2.8
. Output only the next line. | self.app = Application() |
Using the snippet: <|code_start|>
class MultiAdapter(BankAdapter):
name = 'Multiple adapters'
@property
def adapters(self):
for name in self.config.get('bank_adapters', []):
yield get_bank_adapter(name)(self.config, self.filename)
@property
def fetch_type(self):
for adapter in self.adapters:
if adapter.fetch_type == 'file':
return 'file'
return 'web'
def fetch_accounts(self):
accounts = []
for adapter in self.adapters:
<|code_end|>
, determine the next line of code. You have imports:
from base import *
from ..data import update_accounts, update_transactions
and context (class names, function names, or code) available:
# Path: budgettracker/data.py
# def update_accounts(old_accounts, new_accounts):
# old = {acc.id: acc for acc in old_accounts}
# new_ids = [acc.id for acc in new_accounts]
# final = []
# for acc in new_accounts:
# if acc.id in old:
# final.append(old[acc.id].update(amount=acc.amount))
# else:
# final.append(acc)
# for acc in old_accounts:
# if acc.id not in new_ids:
# final.append(acc)
# return final
#
# def update_transactions(old_transactions, new_transactions):
# old = {tx.id: tx for tx in old_transactions}
# final = []
# for tx in new_transactions:
# if tx.id in old:
# final.append(tx.update(
# categories=list(set(old[tx.id].categories or []) | set(tx.categories or [])),
# goal=old[tx.id].goal
# ))
# old.pop(tx.id)
# else:
# final.append(tx)
# return old.values() + final
. Output only the next line. | accounts = update_accounts(accounts, list(adapter.fetch_accounts())) |
Based on the snippet: <|code_start|>
class MultiAdapter(BankAdapter):
name = 'Multiple adapters'
@property
def adapters(self):
for name in self.config.get('bank_adapters', []):
yield get_bank_adapter(name)(self.config, self.filename)
@property
def fetch_type(self):
for adapter in self.adapters:
if adapter.fetch_type == 'file':
return 'file'
return 'web'
def fetch_accounts(self):
accounts = []
for adapter in self.adapters:
accounts = update_accounts(accounts, list(adapter.fetch_accounts()))
return accounts
def fetch_transactions(self, account, start_date=None, end_date=None):
transactions = []
for adapter in self.adapters:
<|code_end|>
, predict the immediate next line with the help of imports:
from base import *
from ..data import update_accounts, update_transactions
and context (classes, functions, sometimes code) from other files:
# Path: budgettracker/data.py
# def update_accounts(old_accounts, new_accounts):
# old = {acc.id: acc for acc in old_accounts}
# new_ids = [acc.id for acc in new_accounts]
# final = []
# for acc in new_accounts:
# if acc.id in old:
# final.append(old[acc.id].update(amount=acc.amount))
# else:
# final.append(acc)
# for acc in old_accounts:
# if acc.id not in new_ids:
# final.append(acc)
# return final
#
# def update_transactions(old_transactions, new_transactions):
# old = {tx.id: tx for tx in old_transactions}
# final = []
# for tx in new_transactions:
# if tx.id in old:
# final.append(tx.update(
# categories=list(set(old[tx.id].categories or []) | set(tx.categories or [])),
# goal=old[tx.id].goal
# ))
# old.pop(tx.id)
# else:
# final.append(tx)
# return old.values() + final
. Output only the next line. | transactions = update_transactions(transactions, adapter.fetch_transactions(account, start_date, end_date)) |
Continue the code snippet: <|code_start|> new_completed - completed, goal.label, new_save, used[goal.label], target - new_completed))
if total_savings < 0:
_debug(' ! Not enough savings to cover this month (remaining=%s)' % total_savings)
if not remaining_goals:
break
savings_after_goals = max(total_savings - sum(saved.values()), 0)
_debug('END COMPUTING OF GOALS (savings=%s, after goals=%s)' % (total_savings, savings_after_goals))
computed = []
for goal in budget_goals.values():
computed.append(ComputedBudgetGoal.from_savings_goal(goal,
saved=saved[goal.label], used=used[goal.label]))
return computed, savings_after_goals
def budgetize(transactions, start_date, end_date, *args, **kwargs):
budgets = BudgetList()
for date in period_to_months(start_date, end_date):
budgets.append(budgetize_month(transactions, date, *args, **kwargs))
return budgets
def budgetize_month(transactions, date, income_sources=None, planned_expenses=None, budget_goals=None, income_delay=0):
start_date = date.replace(day=1)
end_date = start_date + monthdelta(1)
if income_delay:
<|code_end|>
. Use current file imports:
from collections import namedtuple, OrderedDict
from .data import (split_income_expenses, extract_transactions_by_label, filter_transactions_period,
sort_transactions, period_to_months)
from monthdelta import monthdelta
import datetime
and context (classes, functions, or code) from other files:
# Path: budgettracker/data.py
# def split_income_expenses(transactions):
# income = filter(lambda tx: tx.amount > 0.0, transactions)
# expenses = filter(lambda tx: tx.amount <= 0.0, transactions)
# return income, expenses
#
# def extract_transactions_by_label(transactions, labels):
# def match_label(tx):
# for exp in labels:
# if re.match(exp, tx.label):
# return True
# return False
# matching = filter(match_label, transactions)
# transactions = filter_out_transactions(transactions, matching)
# return matching, transactions
#
# def filter_transactions_period(transactions, start_date=None, end_date=None):
# if not start_date and not end_date:
# return transactions
# return filter(
# lambda tx: (not start_date or tx.date >= start_date) and (not end_date or tx.date < end_date),
# transactions)
#
# def sort_transactions(transactions):
# return sorted(transactions, key=lambda tx: tx.date, reverse=True)
#
# def period_to_months(start_date, end_date):
# dates = [start_date.replace(day=1)]
# end_date = end_date.replace(day=1) - monthdelta(1)
# while dates[-1] < end_date:
# dates.append(dates[-1] + monthdelta(1))
# return dates
. Output only the next line. | income_transactions, _ = split_income_expenses(filter_transactions_period( |
Predict the next line after this snippet: <|code_start|> budgets = BudgetList()
for date in period_to_months(start_date, end_date):
budgets.append(budgetize_month(transactions, date, *args, **kwargs))
return budgets
def budgetize_month(transactions, date, income_sources=None, planned_expenses=None, budget_goals=None, income_delay=0):
start_date = date.replace(day=1)
end_date = start_date + monthdelta(1)
if income_delay:
income_transactions, _ = split_income_expenses(filter_transactions_period(
transactions, start_date + datetime.timedelta(days=income_delay), end_date + datetime.timedelta(days=income_delay)))
_, expenses_transactions = split_income_expenses(filter_transactions_period(transactions, start_date, end_date))
transactions = sort_transactions(income_transactions + expenses_transactions)
else:
transactions = sort_transactions(filter_transactions_period(transactions, start_date, end_date))
income_transactions, expenses_transactions = split_income_expenses(transactions)
expected_income = 0
if income_sources:
income_sources = filter_period(income_sources, start_date, end_date)
expected_income = sum([src.amount for src in income_sources])
planned_expenses_transactions = []
expected_planned_expenses = 0
if planned_expenses:
planned_expenses = filter_period(planned_expenses, start_date, end_date)
planned_expenses_labels = [exp.match for exp in planned_expenses if exp.match]
expected_planned_expenses = sum([exp.amount_per_month for exp in planned_expenses])
<|code_end|>
using the current file's imports:
from collections import namedtuple, OrderedDict
from .data import (split_income_expenses, extract_transactions_by_label, filter_transactions_period,
sort_transactions, period_to_months)
from monthdelta import monthdelta
import datetime
and any relevant context from other files:
# Path: budgettracker/data.py
# def split_income_expenses(transactions):
# income = filter(lambda tx: tx.amount > 0.0, transactions)
# expenses = filter(lambda tx: tx.amount <= 0.0, transactions)
# return income, expenses
#
# def extract_transactions_by_label(transactions, labels):
# def match_label(tx):
# for exp in labels:
# if re.match(exp, tx.label):
# return True
# return False
# matching = filter(match_label, transactions)
# transactions = filter_out_transactions(transactions, matching)
# return matching, transactions
#
# def filter_transactions_period(transactions, start_date=None, end_date=None):
# if not start_date and not end_date:
# return transactions
# return filter(
# lambda tx: (not start_date or tx.date >= start_date) and (not end_date or tx.date < end_date),
# transactions)
#
# def sort_transactions(transactions):
# return sorted(transactions, key=lambda tx: tx.date, reverse=True)
#
# def period_to_months(start_date, end_date):
# dates = [start_date.replace(day=1)]
# end_date = end_date.replace(day=1) - monthdelta(1)
# while dates[-1] < end_date:
# dates.append(dates[-1] + monthdelta(1))
# return dates
. Output only the next line. | planned_expenses_transactions, expenses_transactions = extract_transactions_by_label( |
Predict the next line for this snippet: <|code_start|> new_completed - completed, goal.label, new_save, used[goal.label], target - new_completed))
if total_savings < 0:
_debug(' ! Not enough savings to cover this month (remaining=%s)' % total_savings)
if not remaining_goals:
break
savings_after_goals = max(total_savings - sum(saved.values()), 0)
_debug('END COMPUTING OF GOALS (savings=%s, after goals=%s)' % (total_savings, savings_after_goals))
computed = []
for goal in budget_goals.values():
computed.append(ComputedBudgetGoal.from_savings_goal(goal,
saved=saved[goal.label], used=used[goal.label]))
return computed, savings_after_goals
def budgetize(transactions, start_date, end_date, *args, **kwargs):
budgets = BudgetList()
for date in period_to_months(start_date, end_date):
budgets.append(budgetize_month(transactions, date, *args, **kwargs))
return budgets
def budgetize_month(transactions, date, income_sources=None, planned_expenses=None, budget_goals=None, income_delay=0):
start_date = date.replace(day=1)
end_date = start_date + monthdelta(1)
if income_delay:
<|code_end|>
with the help of current file imports:
from collections import namedtuple, OrderedDict
from .data import (split_income_expenses, extract_transactions_by_label, filter_transactions_period,
sort_transactions, period_to_months)
from monthdelta import monthdelta
import datetime
and context from other files:
# Path: budgettracker/data.py
# def split_income_expenses(transactions):
# income = filter(lambda tx: tx.amount > 0.0, transactions)
# expenses = filter(lambda tx: tx.amount <= 0.0, transactions)
# return income, expenses
#
# def extract_transactions_by_label(transactions, labels):
# def match_label(tx):
# for exp in labels:
# if re.match(exp, tx.label):
# return True
# return False
# matching = filter(match_label, transactions)
# transactions = filter_out_transactions(transactions, matching)
# return matching, transactions
#
# def filter_transactions_period(transactions, start_date=None, end_date=None):
# if not start_date and not end_date:
# return transactions
# return filter(
# lambda tx: (not start_date or tx.date >= start_date) and (not end_date or tx.date < end_date),
# transactions)
#
# def sort_transactions(transactions):
# return sorted(transactions, key=lambda tx: tx.date, reverse=True)
#
# def period_to_months(start_date, end_date):
# dates = [start_date.replace(day=1)]
# end_date = end_date.replace(day=1) - monthdelta(1)
# while dates[-1] < end_date:
# dates.append(dates[-1] + monthdelta(1))
# return dates
, which may contain function names, class names, or code. Output only the next line. | income_transactions, _ = split_income_expenses(filter_transactions_period( |
Next line prediction: <|code_start|> _debug(' ! Not enough savings to cover this month (remaining=%s)' % total_savings)
if not remaining_goals:
break
savings_after_goals = max(total_savings - sum(saved.values()), 0)
_debug('END COMPUTING OF GOALS (savings=%s, after goals=%s)' % (total_savings, savings_after_goals))
computed = []
for goal in budget_goals.values():
computed.append(ComputedBudgetGoal.from_savings_goal(goal,
saved=saved[goal.label], used=used[goal.label]))
return computed, savings_after_goals
def budgetize(transactions, start_date, end_date, *args, **kwargs):
budgets = BudgetList()
for date in period_to_months(start_date, end_date):
budgets.append(budgetize_month(transactions, date, *args, **kwargs))
return budgets
def budgetize_month(transactions, date, income_sources=None, planned_expenses=None, budget_goals=None, income_delay=0):
start_date = date.replace(day=1)
end_date = start_date + monthdelta(1)
if income_delay:
income_transactions, _ = split_income_expenses(filter_transactions_period(
transactions, start_date + datetime.timedelta(days=income_delay), end_date + datetime.timedelta(days=income_delay)))
_, expenses_transactions = split_income_expenses(filter_transactions_period(transactions, start_date, end_date))
<|code_end|>
. Use current file imports:
(from collections import namedtuple, OrderedDict
from .data import (split_income_expenses, extract_transactions_by_label, filter_transactions_period,
sort_transactions, period_to_months)
from monthdelta import monthdelta
import datetime)
and context including class names, function names, or small code snippets from other files:
# Path: budgettracker/data.py
# def split_income_expenses(transactions):
# income = filter(lambda tx: tx.amount > 0.0, transactions)
# expenses = filter(lambda tx: tx.amount <= 0.0, transactions)
# return income, expenses
#
# def extract_transactions_by_label(transactions, labels):
# def match_label(tx):
# for exp in labels:
# if re.match(exp, tx.label):
# return True
# return False
# matching = filter(match_label, transactions)
# transactions = filter_out_transactions(transactions, matching)
# return matching, transactions
#
# def filter_transactions_period(transactions, start_date=None, end_date=None):
# if not start_date and not end_date:
# return transactions
# return filter(
# lambda tx: (not start_date or tx.date >= start_date) and (not end_date or tx.date < end_date),
# transactions)
#
# def sort_transactions(transactions):
# return sorted(transactions, key=lambda tx: tx.date, reverse=True)
#
# def period_to_months(start_date, end_date):
# dates = [start_date.replace(day=1)]
# end_date = end_date.replace(day=1) - monthdelta(1)
# while dates[-1] < end_date:
# dates.append(dates[-1] + monthdelta(1))
# return dates
. Output only the next line. | transactions = sort_transactions(income_transactions + expenses_transactions) |
Continue the code snippet: <|code_start|> saved[goal.label] = new_save
remaining_goals.remove(goal.label)
elif new_completed < completed:
take_back = saved[goal.label] - new_save
saved[goal.label] = new_save
_debug(' - Taking %s from %s (saved=%s, used=%s, remaining=%s)' % (
take_back, goal.label, new_save, used[goal.label], target - new_completed))
else:
saved[goal.label] = new_save
_debug(' + Giving %s to %s (saved=%s, used=%s, remaining=%s)' % (
new_completed - completed, goal.label, new_save, used[goal.label], target - new_completed))
if total_savings < 0:
_debug(' ! Not enough savings to cover this month (remaining=%s)' % total_savings)
if not remaining_goals:
break
savings_after_goals = max(total_savings - sum(saved.values()), 0)
_debug('END COMPUTING OF GOALS (savings=%s, after goals=%s)' % (total_savings, savings_after_goals))
computed = []
for goal in budget_goals.values():
computed.append(ComputedBudgetGoal.from_savings_goal(goal,
saved=saved[goal.label], used=used[goal.label]))
return computed, savings_after_goals
def budgetize(transactions, start_date, end_date, *args, **kwargs):
budgets = BudgetList()
<|code_end|>
. Use current file imports:
from collections import namedtuple, OrderedDict
from .data import (split_income_expenses, extract_transactions_by_label, filter_transactions_period,
sort_transactions, period_to_months)
from monthdelta import monthdelta
import datetime
and context (classes, functions, or code) from other files:
# Path: budgettracker/data.py
# def split_income_expenses(transactions):
# income = filter(lambda tx: tx.amount > 0.0, transactions)
# expenses = filter(lambda tx: tx.amount <= 0.0, transactions)
# return income, expenses
#
# def extract_transactions_by_label(transactions, labels):
# def match_label(tx):
# for exp in labels:
# if re.match(exp, tx.label):
# return True
# return False
# matching = filter(match_label, transactions)
# transactions = filter_out_transactions(transactions, matching)
# return matching, transactions
#
# def filter_transactions_period(transactions, start_date=None, end_date=None):
# if not start_date and not end_date:
# return transactions
# return filter(
# lambda tx: (not start_date or tx.date >= start_date) and (not end_date or tx.date < end_date),
# transactions)
#
# def sort_transactions(transactions):
# return sorted(transactions, key=lambda tx: tx.date, reverse=True)
#
# def period_to_months(start_date, end_date):
# dates = [start_date.replace(day=1)]
# end_date = end_date.replace(day=1) - monthdelta(1)
# while dates[-1] < end_date:
# dates.append(dates[-1] + monthdelta(1))
# return dates
. Output only the next line. | for date in period_to_months(start_date, end_date): |
Here is a snippet: <|code_start|> def to_dict(self):
return {
'name': self.name,
'color': self.color,
'keywords': self.keywords,
'warning_threshold': self.warning_threshold
}
class ComputedCategory(namedtuple('ComputedCategory', ['name', 'color', 'keywords', 'warning_threshold', 'amount', 'pct'])):
@classmethod
def from_category(cls, category, **kwargs):
warning_threshold_multiplier = kwargs.pop('warning_threshold_multiplier', 1)
warning_threshold = category.warning_threshold * warning_threshold_multiplier if category.warning_threshold else None
return cls(name=category.name, color=category.color, keywords=category.keywords,
warning_threshold=warning_threshold, **kwargs)
@property
def has_warning(self):
return self.warning_threshold and self.amount > self.warning_threshold
def to_str(self, famount):
return "%s = %s (%s%%)%s" % (self.name or 'Uncategorized', famount(self.amount), self.pct,
' /!\ %s' % (famount(self.warning_threshold)) if self.has_warning else '')
def compute_categories(transactions, categories=None, start_date=None, end_date=None, warning_threshold_multiplier=1):
categories = {c.name: c for c in categories or []}
amounts = {}
total = 0
<|code_end|>
. Write the next line using the current file imports:
from collections import namedtuple
from .data import filter_transactions_period
import re
and context from other files:
# Path: budgettracker/data.py
# def filter_transactions_period(transactions, start_date=None, end_date=None):
# if not start_date and not end_date:
# return transactions
# return filter(
# lambda tx: (not start_date or tx.date >= start_date) and (not end_date or tx.date < end_date),
# transactions)
, which may include functions, classes, or code. Output only the next line. | for tx in filter_transactions_period(transactions, start_date, end_date): |
Here is a snippet: <|code_start|> def fetch_transactions_from_all_accounts(self, start_date=None, end_date=None):
transactions = []
for account in self.fetch_accounts():
transactions.extend(self.fetch_transactions(account, start_date, end_date))
return sorted(transactions, key=lambda i: i.date, reverse=True)
def create_request_session(self, reuse=True, filename='session.json'):
if reuse and getattr(self, 'request_session_cache', None):
return self.request_session_cache
session = requests.Session()
exp = time.time() - 1800 # cookie jar expires after 30min
if reuse and os.path.exists(filename) and os.path.getmtime(filename) > exp:
with open(filename) as f:
cookies = json.load(f)
session.cookies.update(cookies)
else:
self.login(session)
with open(filename, 'w') as f:
json.dump(session.cookies.get_dict(), f)
self.request_session_cache = session
return session
def make_transaction(self, **kwargs):
if not kwargs.get('id'):
kwargs['id'] = str(uuid.uuid4())
kwargs['label'] = re.sub("\s+", " ", kwargs['label'].replace("\n", " ").strip())
kwargs.setdefault('categories', match_categories(self.categories, kwargs['label']))
kwargs.setdefault('goal', None)
<|code_end|>
. Write the next line using the current file imports:
import json, requests, os, time, uuid, re, inspect
from importlib import import_module
from ..data import Account, Transaction
from ..categories import Category, match_categories
and context from other files:
# Path: budgettracker/data.py
# class Account(namedtuple('Account', ['id', 'title', 'amount'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(**dct)
#
# def update(self, **kwargs):
# return Account(**dict(self._asdict(), **kwargs))
#
# def to_dict(self):
# return {
# 'id': self.id,
# 'title': self.title,
# 'amount': self.amount
# }
#
# class Transaction(namedtuple('Transaction', ['id', 'label', 'date', 'amount', 'account', 'categories', 'goal'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(
# id=dct['id'],
# label=dct['label'],
# date=datetime.datetime.strptime(dct['date'], "%Y-%m-%d").date(),
# amount=float(dct['amount']),
# account=dct['account'],
# categories=dct.get('categories') or [],
# goal=dct.get('goal')
# )
#
# def update(self, **kwargs):
# return Transaction(**dict(self._asdict(), **kwargs))
#
# def to_dict(self):
# return {
# 'id': self.id,
# 'label': self.label,
# 'date': self.date.isoformat(),
# 'amount': self.amount,
# 'account': self.account,
# 'categories': self.categories,
# 'goal': self.goal
# }
#
# def to_str(self, famount):
# return u"%s - %s = %s%s%s" % (self.date.isoformat(), self.label, famount(self.amount),
# ' #%s' % ', #'.join(self.categories) if self.categories else '',
# ' [%s%s]' % (famount(self.goal)) if self.goal else '')
#
# def __unicode__(self):
# return self.to_str()
#
# Path: budgettracker/categories.py
# class Category(namedtuple('Category', ['name', 'color', 'keywords', 'warning_threshold'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(name=dct['name'], color=dct.get('color'), keywords=dct.get('keywords', []),
# warning_threshold=dct.get('warning_threshold'))
#
# def to_dict(self):
# return {
# 'name': self.name,
# 'color': self.color,
# 'keywords': self.keywords,
# 'warning_threshold': self.warning_threshold
# }
#
# def match_categories(categories, label):
# matches = []
# for category in categories:
# for keyword in (category.keywords or []):
# if re.search(r"\b%s\b" % keyword, label, re.I):
# matches.append(category.name)
# continue
# return matches
, which may include functions, classes, or code. Output only the next line. | return Transaction(**kwargs) |
Given snippet: <|code_start|>
def get_bank_adapter(name):
module = import_module('budgettracker.bank_adapters.' + name)
for obj in module.__dict__.values():
if inspect.isclass(obj) and issubclass(obj, BankAdapter) and obj is not BankAdapter:
return obj
class BankAdapter(object):
fetch_type = 'file'
def __init__(self, config, filename=None):
self.config = config
self.filename = filename
@property
def categories(self):
if not self.__dict__.get('categories'):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json, requests, os, time, uuid, re, inspect
from importlib import import_module
from ..data import Account, Transaction
from ..categories import Category, match_categories
and context:
# Path: budgettracker/data.py
# class Account(namedtuple('Account', ['id', 'title', 'amount'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(**dct)
#
# def update(self, **kwargs):
# return Account(**dict(self._asdict(), **kwargs))
#
# def to_dict(self):
# return {
# 'id': self.id,
# 'title': self.title,
# 'amount': self.amount
# }
#
# class Transaction(namedtuple('Transaction', ['id', 'label', 'date', 'amount', 'account', 'categories', 'goal'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(
# id=dct['id'],
# label=dct['label'],
# date=datetime.datetime.strptime(dct['date'], "%Y-%m-%d").date(),
# amount=float(dct['amount']),
# account=dct['account'],
# categories=dct.get('categories') or [],
# goal=dct.get('goal')
# )
#
# def update(self, **kwargs):
# return Transaction(**dict(self._asdict(), **kwargs))
#
# def to_dict(self):
# return {
# 'id': self.id,
# 'label': self.label,
# 'date': self.date.isoformat(),
# 'amount': self.amount,
# 'account': self.account,
# 'categories': self.categories,
# 'goal': self.goal
# }
#
# def to_str(self, famount):
# return u"%s - %s = %s%s%s" % (self.date.isoformat(), self.label, famount(self.amount),
# ' #%s' % ', #'.join(self.categories) if self.categories else '',
# ' [%s%s]' % (famount(self.goal)) if self.goal else '')
#
# def __unicode__(self):
# return self.to_str()
#
# Path: budgettracker/categories.py
# class Category(namedtuple('Category', ['name', 'color', 'keywords', 'warning_threshold'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(name=dct['name'], color=dct.get('color'), keywords=dct.get('keywords', []),
# warning_threshold=dct.get('warning_threshold'))
#
# def to_dict(self):
# return {
# 'name': self.name,
# 'color': self.color,
# 'keywords': self.keywords,
# 'warning_threshold': self.warning_threshold
# }
#
# def match_categories(categories, label):
# matches = []
# for category in categories:
# for keyword in (category.keywords or []):
# if re.search(r"\b%s\b" % keyword, label, re.I):
# matches.append(category.name)
# continue
# return matches
which might include code, classes, or functions. Output only the next line. | self.__dict__['categories'] = map(Category.from_dict, self.config.get('categories', [])) |
Next line prediction: <|code_start|> return self.__dict__['categories']
def fetch_transactions_from_all_accounts(self, start_date=None, end_date=None):
transactions = []
for account in self.fetch_accounts():
transactions.extend(self.fetch_transactions(account, start_date, end_date))
return sorted(transactions, key=lambda i: i.date, reverse=True)
def create_request_session(self, reuse=True, filename='session.json'):
if reuse and getattr(self, 'request_session_cache', None):
return self.request_session_cache
session = requests.Session()
exp = time.time() - 1800 # cookie jar expires after 30min
if reuse and os.path.exists(filename) and os.path.getmtime(filename) > exp:
with open(filename) as f:
cookies = json.load(f)
session.cookies.update(cookies)
else:
self.login(session)
with open(filename, 'w') as f:
json.dump(session.cookies.get_dict(), f)
self.request_session_cache = session
return session
def make_transaction(self, **kwargs):
if not kwargs.get('id'):
kwargs['id'] = str(uuid.uuid4())
kwargs['label'] = re.sub("\s+", " ", kwargs['label'].replace("\n", " ").strip())
<|code_end|>
. Use current file imports:
(import json, requests, os, time, uuid, re, inspect
from importlib import import_module
from ..data import Account, Transaction
from ..categories import Category, match_categories)
and context including class names, function names, or small code snippets from other files:
# Path: budgettracker/data.py
# class Account(namedtuple('Account', ['id', 'title', 'amount'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(**dct)
#
# def update(self, **kwargs):
# return Account(**dict(self._asdict(), **kwargs))
#
# def to_dict(self):
# return {
# 'id': self.id,
# 'title': self.title,
# 'amount': self.amount
# }
#
# class Transaction(namedtuple('Transaction', ['id', 'label', 'date', 'amount', 'account', 'categories', 'goal'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(
# id=dct['id'],
# label=dct['label'],
# date=datetime.datetime.strptime(dct['date'], "%Y-%m-%d").date(),
# amount=float(dct['amount']),
# account=dct['account'],
# categories=dct.get('categories') or [],
# goal=dct.get('goal')
# )
#
# def update(self, **kwargs):
# return Transaction(**dict(self._asdict(), **kwargs))
#
# def to_dict(self):
# return {
# 'id': self.id,
# 'label': self.label,
# 'date': self.date.isoformat(),
# 'amount': self.amount,
# 'account': self.account,
# 'categories': self.categories,
# 'goal': self.goal
# }
#
# def to_str(self, famount):
# return u"%s - %s = %s%s%s" % (self.date.isoformat(), self.label, famount(self.amount),
# ' #%s' % ', #'.join(self.categories) if self.categories else '',
# ' [%s%s]' % (famount(self.goal)) if self.goal else '')
#
# def __unicode__(self):
# return self.to_str()
#
# Path: budgettracker/categories.py
# class Category(namedtuple('Category', ['name', 'color', 'keywords', 'warning_threshold'])):
# @classmethod
# def from_dict(cls, dct):
# return cls(name=dct['name'], color=dct.get('color'), keywords=dct.get('keywords', []),
# warning_threshold=dct.get('warning_threshold'))
#
# def to_dict(self):
# return {
# 'name': self.name,
# 'color': self.color,
# 'keywords': self.keywords,
# 'warning_threshold': self.warning_threshold
# }
#
# def match_categories(categories, label):
# matches = []
# for category in categories:
# for keyword in (category.keywords or []):
# if re.search(r"\b%s\b" % keyword, label, re.I):
# matches.append(category.name)
# continue
# return matches
. Output only the next line. | kwargs.setdefault('categories', match_categories(self.categories, kwargs['label'])) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.