Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line for this snippet: <|code_start|> if not twitter_options:
return 'Module is not configured. You must set `twitter_keys` in settings'
api = getattr(bot, 'twitter_api', None)
if api is None:
api = bot.twitter_api = twitter.Api(
consumer_key=twitter_options['consumer_key'],
consumer_secret=twitter_options['consumer_secret'],
access_token_key=twitter_options['access_token_key'],
access_token_secret=twitter_options['access_token_secret']
)
message = kwargs['update']['message']
reply = message.get('reply_to_message', None)
media = None
if reply is None:
text = ' '.join(args)
elif reply.get('photo'):
file = reply.get('photo')[-1]
file_id = file['file_id']
data = {'file_id': file_id}
file_info = bot.call(
'getFile',
'GET',
data=data
)
file_path = file_info.get('file_path')
file_url = "https://api.telegram.org/file/bot{}/{}".format(bot.settings.token, file_path)
<|code_end|>
with the help of current file imports:
import twitter
from modules.utils.data import prepare_binary_from_url
and context from other files:
# Path: modules/utils/data.py
# def prepare_binary_from_url(url, verify=True):
# try:
# content = requests.get(url, timeout=(1, 3), verify=verify).content
# except RequestException:
# pass
# else:
# return BytesIO(content)
, which may contain function names, class names, or code. Output only the next line. | media = prepare_binary_from_url(file_url) |
Using the snippet: <|code_start|> if current_color in palette:
pos = palette.index(current_color)
if pos == len(palette) - 1:
return palette[pos]
else:
return palette[pos + 1]
return 0xFFFFFF
class CPU(object):
def __init__(self):
self.registers = {}
self.st = {'ST0':0, 'ST1':0, 'ST2':0, 'ST3':0, 'ST4':0, 'ST5':0, 'ST6':0, 'ST7':0}
self.ctrl = {'CTRL':0}
self.segments = {'CS', 'DS', 'ES', 'FS', 'GS', 'SS'}
self.registers = {'RAX', 'RBX', 'RCX', 'RDX', 'RSI', 'RDI', 'RBP', 'RSP', 'R8', 'R9', 'R10', 'R11', 'R12', 'R13',
'R14', 'R15'}
self.flags = {}
self.multimedia = {'XMM0':0, 'XMM1':0, 'XMM2':0, 'XMM3':0, 'XMM4':0, 'XMM5':0, 'XMM6':0, 'XMM7':0, 'XMM8':0, 'XMM9':0, 'XMM10':0, 'XMM11':0,
'XMM12':0, 'XMM13':0, 'XMM14':0, 'XMM15':0, 'MXCSR':0, 'MM0':0, 'MM1':0, 'MM2':0, 'MM3':0, 'MM4':0, 'MM5':0, 'MM6':0, 'MM7':0}
def get_reg(reg_string, reg_size):
"""
returns the register name to be used as key with a Traceline.ctx object
:param reg_string: any string representing a reg, e.g. rax, RAX, eax, ah, al, etc.
:param reg_size: size in bit of the registers in Traceline.ctx, e.g. 64, 32, 16
:return: reg_string of the ctx keys, e.g. rax
"""
<|code_end|>
, determine the next line of code. You have imports:
import idaapi
from idc import *
from idautils import *
from lib.Register import get_reg_class, get_reg_by_size
and context (class names, function names, or code) available:
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
#
# def get_reg_by_size(reg_class, reg_size):
# """
# @brief Determines the register by its size and class
# @param reg_class The register class of the register
# @param reg_size The size of the register
# @return Name of the register
# """
# if reg_class >= len(_registerClasses):
# return None
# num_regs = len(_registerClasses[reg_class])
# if num_regs < 4:
# return None
# reg_index = -1
# if reg_size > 32: # 64-bit regs
# reg_index = num_regs - 1
# elif reg_size > 16: # 32-bit regs
# reg_index = num_regs - 2
# elif reg_size > 8: # 16-bit regs
# reg_index = num_regs - 3
# elif reg_size > 0: # 8-bit regs
# reg_index = 0
# else:
# return None
# return _registerClasses[reg_class][reg_index]
. Output only the next line. | return get_reg_by_size(get_reg_class(reg_string), reg_size) |
Given snippet: <|code_start|> if current_color in palette:
pos = palette.index(current_color)
if pos == len(palette) - 1:
return palette[pos]
else:
return palette[pos + 1]
return 0xFFFFFF
class CPU(object):
def __init__(self):
self.registers = {}
self.st = {'ST0':0, 'ST1':0, 'ST2':0, 'ST3':0, 'ST4':0, 'ST5':0, 'ST6':0, 'ST7':0}
self.ctrl = {'CTRL':0}
self.segments = {'CS', 'DS', 'ES', 'FS', 'GS', 'SS'}
self.registers = {'RAX', 'RBX', 'RCX', 'RDX', 'RSI', 'RDI', 'RBP', 'RSP', 'R8', 'R9', 'R10', 'R11', 'R12', 'R13',
'R14', 'R15'}
self.flags = {}
self.multimedia = {'XMM0':0, 'XMM1':0, 'XMM2':0, 'XMM3':0, 'XMM4':0, 'XMM5':0, 'XMM6':0, 'XMM7':0, 'XMM8':0, 'XMM9':0, 'XMM10':0, 'XMM11':0,
'XMM12':0, 'XMM13':0, 'XMM14':0, 'XMM15':0, 'MXCSR':0, 'MM0':0, 'MM1':0, 'MM2':0, 'MM3':0, 'MM4':0, 'MM5':0, 'MM6':0, 'MM7':0}
def get_reg(reg_string, reg_size):
"""
returns the register name to be used as key with a Traceline.ctx object
:param reg_string: any string representing a reg, e.g. rax, RAX, eax, ah, al, etc.
:param reg_size: size in bit of the registers in Traceline.ctx, e.g. 64, 32, 16
:return: reg_string of the ctx keys, e.g. rax
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import idaapi
from idc import *
from idautils import *
from lib.Register import get_reg_class, get_reg_by_size
and context:
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
#
# def get_reg_by_size(reg_class, reg_size):
# """
# @brief Determines the register by its size and class
# @param reg_class The register class of the register
# @param reg_size The size of the register
# @return Name of the register
# """
# if reg_class >= len(_registerClasses):
# return None
# num_regs = len(_registerClasses[reg_class])
# if num_regs < 4:
# return None
# reg_index = -1
# if reg_size > 32: # 64-bit regs
# reg_index = num_regs - 1
# elif reg_size > 16: # 32-bit regs
# reg_index = num_regs - 2
# elif reg_size > 8: # 16-bit regs
# reg_index = num_regs - 3
# elif reg_size > 0: # 8-bit regs
# reg_index = 0
# else:
# return None
# return _registerClasses[reg_class][reg_index]
which might include code, classes, or functions. Output only the next line. | return get_reg_by_size(get_reg_class(reg_string), reg_size) |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
"""
@author: Tobias
"""
class Instruction(object):
"""
@brief Implements the interface to distorm3 Instructions
"""
def __init__(self, offset, code, type = distorm3.Decode32Bits, feature = 0):
"""
@param offset Address of the instruction
@param code Opcode bytes of the instruction
@param type Dissassemble 32 or 64 bit code
@param feature Possible settings for distrom3
not used at the moment
"""
self.valid = False
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import distorm3
from lib import StartVal as SV
and context:
# Path: lib/StartVal.py
# ASSEMBLER_64 = 64
# ASSEMBLER_32 = 32
which might include code, classes, or functions. Output only the next line. | if SV.dissassm_type == 64: |
Using the snippet: <|code_start|> is_mov_ebp(ps_lst, 0, len(ps_lst)-1)) or
not has_loc):
val_arr.append(PI.PseudoOperand(PI.EXP_T, 'RET_ADDR', 0))
val_arr.append(PI.PseudoOperand(PI.EXP_T, 'ARGS', 0))
new_op = PI.ArrayOperand(
PI.ARRAY_T, ps_lst[push_poss[0]].size,
len(val_arr), val_arr)
new_inst = PI.PseudoInstruction(
item.mnem, item.addr,
[new_op], item.size,
item.inst_type, item.inst_class)
#new_inst.comment = comment
ret.append(new_inst)
else:
ret.append(item)
return ret
#just do this after reduction
def return_push_ebp(ps_lst):
"""
@brief Replace all array operands, which are not part of an assignement,
with 'push ebp' or 'push rbp'
@param ps_lst List of PseudoInstructions
@remark Just do this after 'replace_push_ebp' and 'reduce_assignements'
"""
for inst in ps_lst:
if (inst.inst_type == PI.PUSH_T and
inst.op_lst[0].type == PI.ARRAY_T):
reg_class = get_reg_class('ebp')
<|code_end|>
, determine the next line of code. You have imports:
import lib.PseudoInstruction as PI
from lib import StartVal as SV
from lib.Register import (get_reg_class, get_reg_by_size)
and context (class names, function names, or code) available:
# Path: lib/StartVal.py
# ASSEMBLER_64 = 64
# ASSEMBLER_32 = 32
#
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
#
# def get_reg_by_size(reg_class, reg_size):
# """
# @brief Determines the register by its size and class
# @param reg_class The register class of the register
# @param reg_size The size of the register
# @return Name of the register
# """
# if reg_class >= len(_registerClasses):
# return None
# num_regs = len(_registerClasses[reg_class])
# if num_regs < 4:
# return None
# reg_index = -1
# if reg_size > 32: # 64-bit regs
# reg_index = num_regs - 1
# elif reg_size > 16: # 32-bit regs
# reg_index = num_regs - 2
# elif reg_size > 8: # 16-bit regs
# reg_index = num_regs - 3
# elif reg_size > 0: # 8-bit regs
# reg_index = 0
# else:
# return None
# return _registerClasses[reg_class][reg_index]
. Output only the next line. | register = get_reg_by_size(reg_class, SV.dissassm_type) |
Given the code snippet: <|code_start|> @brief Starts recursiv search for jmp addresses
@param pp_lst List of PseudoInstructions push/pop represtentation
@param jmp_pos Index of jump instruction
@return List of Tuple: (position of jump addr, address of jump instruction)
"""
jmp_inst = pp_lst[jmp_pos]
if jmp_inst.list_len == 0:
print 'could not find jmp address'
return []
jmp_op = jmp_inst.op_lst[0]
pos_lst = rec_find_addr(pp_lst, jmp_pos, jmp_op, 20)
ret_lst = []
for x in pos_lst:
ret_lst.append((x, pp_lst[jmp_pos].addr))
return ret_lst
def rec_find_addr(pp_lst, pos, op, max_rec_depth):
"""
@brief Recursiv search for finding jmp addresses
@param pp_lst List of PseudoInstructions push/pop represtentation
@param pos Index of jump instruction
@param op Operand of jump instruction
@param max_rec_depth Maximal recursion depth
@return List positions which are used to calc jump address
"""
if max_rec_depth == 0:
return []
if(op.type == PI.IMMEDIATE_T or
(op.type == PI.REGISTER_T and
<|code_end|>
, generate the next line using the imports in this file:
import lib.PseudoInstruction as PI
from lib import StartVal as SV
from lib.Register import (get_reg_class, get_reg_by_size)
and context (functions, classes, or occasionally code) from other files:
# Path: lib/StartVal.py
# ASSEMBLER_64 = 64
# ASSEMBLER_32 = 32
#
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
#
# def get_reg_by_size(reg_class, reg_size):
# """
# @brief Determines the register by its size and class
# @param reg_class The register class of the register
# @param reg_size The size of the register
# @return Name of the register
# """
# if reg_class >= len(_registerClasses):
# return None
# num_regs = len(_registerClasses[reg_class])
# if num_regs < 4:
# return None
# reg_index = -1
# if reg_size > 32: # 64-bit regs
# reg_index = num_regs - 1
# elif reg_size > 16: # 32-bit regs
# reg_index = num_regs - 2
# elif reg_size > 8: # 16-bit regs
# reg_index = num_regs - 3
# elif reg_size > 0: # 8-bit regs
# reg_index = 0
# else:
# return None
# return _registerClasses[reg_class][reg_index]
. Output only the next line. | get_reg_class(op.register) == get_reg_class('ebp'))): |
Given snippet: <|code_start|> is_mov_ebp(ps_lst, 0, len(ps_lst)-1)) or
not has_loc):
val_arr.append(PI.PseudoOperand(PI.EXP_T, 'RET_ADDR', 0))
val_arr.append(PI.PseudoOperand(PI.EXP_T, 'ARGS', 0))
new_op = PI.ArrayOperand(
PI.ARRAY_T, ps_lst[push_poss[0]].size,
len(val_arr), val_arr)
new_inst = PI.PseudoInstruction(
item.mnem, item.addr,
[new_op], item.size,
item.inst_type, item.inst_class)
#new_inst.comment = comment
ret.append(new_inst)
else:
ret.append(item)
return ret
#just do this after reduction
def return_push_ebp(ps_lst):
"""
@brief Replace all array operands, which are not part of an assignement,
with 'push ebp' or 'push rbp'
@param ps_lst List of PseudoInstructions
@remark Just do this after 'replace_push_ebp' and 'reduce_assignements'
"""
for inst in ps_lst:
if (inst.inst_type == PI.PUSH_T and
inst.op_lst[0].type == PI.ARRAY_T):
reg_class = get_reg_class('ebp')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import lib.PseudoInstruction as PI
from lib import StartVal as SV
from lib.Register import (get_reg_class, get_reg_by_size)
and context:
# Path: lib/StartVal.py
# ASSEMBLER_64 = 64
# ASSEMBLER_32 = 32
#
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
#
# def get_reg_by_size(reg_class, reg_size):
# """
# @brief Determines the register by its size and class
# @param reg_class The register class of the register
# @param reg_size The size of the register
# @return Name of the register
# """
# if reg_class >= len(_registerClasses):
# return None
# num_regs = len(_registerClasses[reg_class])
# if num_regs < 4:
# return None
# reg_index = -1
# if reg_size > 32: # 64-bit regs
# reg_index = num_regs - 1
# elif reg_size > 16: # 32-bit regs
# reg_index = num_regs - 2
# elif reg_size > 8: # 16-bit regs
# reg_index = num_regs - 3
# elif reg_size > 0: # 8-bit regs
# reg_index = 0
# else:
# return None
# return _registerClasses[reg_class][reg_index]
which might include code, classes, or functions. Output only the next line. | register = get_reg_by_size(reg_class, SV.dissassm_type) |
Here is a snippet: <|code_start|># coding=utf-8
__author__ = 'Anatoli Kalysch'
# from PyQt5 import QtGui, QtCore, QtWidgets
####################
### STACK CHANGE ###
####################
class StackChangeViewer(PluginViewer):
def __init__(self, vr, sorted, stack_changes, title='Stack Changes Analysis'):
# context should be a dictionary containing the backward traced result of each relevant register
super(StackChangeViewer, self).__init__(title)
self.vr = vr
self.sorted = sorted
self.stack_changes = stack_changes
def PopulateModel(self):
for key in self.sorted:
<|code_end|>
. Write the next line using the current file imports:
from ui.PluginViewer import PluginViewer
from ui.UIManager import QtGui, QtCore, QtWidgets
and context from other files:
# Path: ui/PluginViewer.py
# class PluginViewer(PluginForm):
# def __init__(self, title):
# super(PluginViewer, self).__init__()
# self.title = title
#
# def Show(self, **kwargs):
# return PluginForm.Show(self, self.title, options=PluginForm.FORM_PERSIST)
#
# def OnCreate(self, form):
# # Get parent widget
# self.parent = form_to_widget(form)
# self.PopulateForm()
#
# def PopulateForm(self):
# ### do stuff
# pass
#
# def OnClose(self, form):
# msg("Closed %s.\n" % self.title)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
, which may include functions, classes, or code. Output only the next line. | sa = QtGui.QStandardItem('%s' % key) |
Next line prediction: <|code_start|> super(StackChangeViewer, self).__init__(title)
self.vr = vr
self.sorted = sorted
self.stack_changes = stack_changes
def PopulateModel(self):
for key in self.sorted:
sa = QtGui.QStandardItem('%s' % key)
chg = QtGui.QStandardItem('%s' % self.stack_changes[key])
if key in self.vr.values():
reg = QtGui.QStandardItem('%s' % [k for k in self.vr.keys() if self.vr[k] == key][0])
else:
reg = QtGui.QStandardItem(' ')
self.sim.appendRow([sa, reg, chg])
self.treeView.resizeColumnToContents(0)
self.treeView.resizeColumnToContents(1)
self.treeView.resizeColumnToContents(2)
def PopulateForm(self):
### init widgets
# model
self.sim = QtGui.QStandardItemModel()
self.sim.setHorizontalHeaderLabels(['Stack Address', 'Address Mapped to CPU Reg', 'Value Changes during Execution'])
# tree view
<|code_end|>
. Use current file imports:
(from ui.PluginViewer import PluginViewer
from ui.UIManager import QtGui, QtCore, QtWidgets)
and context including class names, function names, or small code snippets from other files:
# Path: ui/PluginViewer.py
# class PluginViewer(PluginForm):
# def __init__(self, title):
# super(PluginViewer, self).__init__()
# self.title = title
#
# def Show(self, **kwargs):
# return PluginForm.Show(self, self.title, options=PluginForm.FORM_PERSIST)
#
# def OnCreate(self, form):
# # Get parent widget
# self.parent = form_to_widget(form)
# self.PopulateForm()
#
# def PopulateForm(self):
# ### do stuff
# pass
#
# def OnClose(self, form):
# msg("Closed %s.\n" % self.title)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
. Output only the next line. | self.treeView = QtWidgets.QTreeView() |
Next line prediction: <|code_start|># coding=utf-8
__author__ = 'Anatoli Kalysch'
####################
### STACK CHANGE ###
####################
class StackChangeViewer(PluginViewer):
def __init__(self, vr, sorted, stack_changes, title='Stack Changes Analysis (legacy)'):
# context should be a dictionary containing the backward traced result of each relevant register
super(StackChangeViewer, self).__init__(title)
self.vr = vr
self.sorted = sorted
self.stack_changes = stack_changes
def PopulateModel(self):
for key in self.sorted:
<|code_end|>
. Use current file imports:
(from ui.PluginViewer import PluginViewer
from ui.UIManager import QtGui)
and context including class names, function names, or small code snippets from other files:
# Path: ui/PluginViewer.py
# class PluginViewer(PluginForm):
# def __init__(self, title):
# super(PluginViewer, self).__init__()
# self.title = title
#
# def Show(self, **kwargs):
# return PluginForm.Show(self, self.title, options=PluginForm.FORM_PERSIST)
#
# def OnCreate(self, form):
# # Get parent widget
# self.parent = form_to_widget(form)
# self.PopulateForm()
#
# def PopulateForm(self):
# ### do stuff
# pass
#
# def OnClose(self, form):
# msg("Closed %s.\n" % self.title)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
. Output only the next line. | sa = QtGui.QStandardItem('%s' % key) |
Given the code snippet: <|code_start|> clustering_analysis()
except Exception, e:
print '[*] Exception occured while running Clustering analysis!\n %s' % e.message
# optimizations
try:
optimization_analysis()
except Exception, e:
print '[*] Exception occured while running optimization analysis!\n %s' % e.message
# grade the trace line
try:
grading_automaton()
except Exception, e:
print '[*] Exception occured while running grading analysis!\n %s' % e.message
# Virtualization obfuscated interpretation
class VMAttack(plugin_t):
flags = PLUGIN_PROC
comment = "This Framework is supposed to help with the analysis of virtualization obfuscated binaries."
help = "HELP!"
wanted_name = "VMAttack"
wanted_hotkey = ""
def init(self):
self.vma_mgr = None
try:
self.vma_mgr = get_mgr()
self.vma_mgr.extend_menu()
#self.vma_mgr.welcome()
msg('[*] Starting VMAttack plugin...\n')
<|code_end|>
, generate the next line using the imports in this file:
from lib.Logging import get_log
from dynamic.dynamic_deobfuscate import *
from lib.VMRepresentation import *
from static.static_deobfuscate import *
from ui.AboutWindow import AboutWindow
from ui.UIManager import UIManager
import ui.SettingsWindow as SettingsWindow
and context (functions, classes, or occasionally code) from other files:
# Path: lib/Logging.py
# def get_log():
# global logEng
# if not logEng:
# logEng = LoggingEngine()
#
# return logEng
#
# Path: ui/AboutWindow.py
# class AboutWindow(QtWidgets.QDialog):
# def __init__(self, *args, **kwargs):
# super(AboutWindow, self).__init__(*args, **kwargs)
# self.setFixedSize(600, 250)
# self.setWindowTitle("About ...")
# self.title = "VMAttack IDA PRO Plugin"
# self.subtitle = "IDA Pro Plugin for static and dynamic virtualization-obfuscation analysis and deobfuscation"
# self.author = u"Anatoli Kalysch and Tobias Krauß"
# self.thanks = u"Special thanks to Johannes Götzfried for conceptual help along the way!"
# self.version = "Version 0.2"
# self.address = "Friedrich-Alexander University Erlangen-Nuremberg\n i1 Software Security Research Group \n"
#
# try:
# title = self.config_label(self.title, 16, True)
# subtitle = self.config_label(self.subtitle, 14)
# subtitle.move(0, title.height() + title.y() + 10)
# version = self.config_label(self.version, 12)
# version.move(0, subtitle.height() + subtitle.y() + 30)
# author = self.config_label(self.author, 12)
# author.move(0, version.height() + version.y())
# thanks = self.config_label(self.thanks, 12)
# thanks.move(0, author.height() + author.y())
# except Exception, e:
# print e.message
#
# self.show()
#
# def config_label(self, name, size, bold=False, alignment="center"):
# label = QtWidgets.QLabel(name, self)
# label.setWordWrap(True)
# font = label.font()
# font.setPointSize(size)
# font.setBold(bold)
# label.setFont(font)
# if alignment == "center":
# label.setAlignment(QtCore.Qt.AlignCenter)
# elif alignment == "right":
# label.setAlignment(QtCore.Qt.AlignRight)
# elif alignment == "left":
# label.setAlignment(QtCore.Qt.AlignLeft)
# label.setFixedWidth(600)
#
# return label
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
#
# self.window = None
# self.widget = None
# self.menu = None
# self.menu_dict = {}
# self.get_init_menu()
#
# # initial menu grab
# def get_init_menu(self):
# try:
# self.widget = form_to_widget(idaapi.get_current_tform())
# if self.widget is None:
# raise Exception()
# except:
# self.widget = form_to_widget(idaapi.find_tform('Output window'))
# self.window = self.widget.window()
# self.menu = self.window.findChild(QtWidgets.QMenuBar)
#
# # add top level menu
# def add_menu(self, name):
# if name in self.menu_dict:
# raise Exception("Menu name %s already exists." % name)
# menu = self.menu.addMenu(name)
# self.menu_dict[name] = menu
# # remove top level menu
# def remove_menu(self, name):
# if name not in self.menu_dict:
# raise Exception("Menu %s was not found. It might be deleted, or belong to another menu manager." % name)
#
# self.menu.removeAction(self.menu_dict[name].menuAction())
# del self.menu_dict[name]
#
# # remove all menus currently in dict
# def clear(self):
# for menu in self.menu_dict.itervalues():
# self.menu.removeAction(menu.menuAction())
# self.menu_dict = {}
#
# def add_view(self, view):
# pass
. Output only the next line. | get_log().log('[VMA] Starting VMAttack and initiating variables ...\n') |
Here is a snippet: <|code_start|>
@vm_returns.setter
def vm_returns(self, value):
self.vmr._vm_returns = value
@property
def vm_ctx(self):
return self.vmr._vm_ctx
@vm_ctx.setter
def vm_ctx(self, value):
self.vmr._vm_ctx = value
def select_debugger(self):
c = Choose([], "Choose your preferred debugger:", 1)
c.list = ["Currently selected IDA Debugger", "Bochs Dbg", "Win32 Dbg"] # TODO , "OllyDbg", "Immunity Dbg"]
c.width = 33
# choose() starts counting at 1, not 0
self.choice = c.choose() - 1
if self.choice == 1:
LoadDebugger('Bochs', 0)
elif self.choice == 2:
LoadDebugger('Win32', 0)
def update_vmr(self):
self._vmr = get_vmr()
### UI MANAGEMENT ###
@staticmethod
def show_about():
<|code_end|>
. Write the next line using the current file imports:
from lib.Logging import get_log
from dynamic.dynamic_deobfuscate import *
from lib.VMRepresentation import *
from static.static_deobfuscate import *
from ui.AboutWindow import AboutWindow
from ui.UIManager import UIManager
import ui.SettingsWindow as SettingsWindow
and context from other files:
# Path: lib/Logging.py
# def get_log():
# global logEng
# if not logEng:
# logEng = LoggingEngine()
#
# return logEng
#
# Path: ui/AboutWindow.py
# class AboutWindow(QtWidgets.QDialog):
# def __init__(self, *args, **kwargs):
# super(AboutWindow, self).__init__(*args, **kwargs)
# self.setFixedSize(600, 250)
# self.setWindowTitle("About ...")
# self.title = "VMAttack IDA PRO Plugin"
# self.subtitle = "IDA Pro Plugin for static and dynamic virtualization-obfuscation analysis and deobfuscation"
# self.author = u"Anatoli Kalysch and Tobias Krauß"
# self.thanks = u"Special thanks to Johannes Götzfried for conceptual help along the way!"
# self.version = "Version 0.2"
# self.address = "Friedrich-Alexander University Erlangen-Nuremberg\n i1 Software Security Research Group \n"
#
# try:
# title = self.config_label(self.title, 16, True)
# subtitle = self.config_label(self.subtitle, 14)
# subtitle.move(0, title.height() + title.y() + 10)
# version = self.config_label(self.version, 12)
# version.move(0, subtitle.height() + subtitle.y() + 30)
# author = self.config_label(self.author, 12)
# author.move(0, version.height() + version.y())
# thanks = self.config_label(self.thanks, 12)
# thanks.move(0, author.height() + author.y())
# except Exception, e:
# print e.message
#
# self.show()
#
# def config_label(self, name, size, bold=False, alignment="center"):
# label = QtWidgets.QLabel(name, self)
# label.setWordWrap(True)
# font = label.font()
# font.setPointSize(size)
# font.setBold(bold)
# label.setFont(font)
# if alignment == "center":
# label.setAlignment(QtCore.Qt.AlignCenter)
# elif alignment == "right":
# label.setAlignment(QtCore.Qt.AlignRight)
# elif alignment == "left":
# label.setAlignment(QtCore.Qt.AlignLeft)
# label.setFixedWidth(600)
#
# return label
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
#
# self.window = None
# self.widget = None
# self.menu = None
# self.menu_dict = {}
# self.get_init_menu()
#
# # initial menu grab
# def get_init_menu(self):
# try:
# self.widget = form_to_widget(idaapi.get_current_tform())
# if self.widget is None:
# raise Exception()
# except:
# self.widget = form_to_widget(idaapi.find_tform('Output window'))
# self.window = self.widget.window()
# self.menu = self.window.findChild(QtWidgets.QMenuBar)
#
# # add top level menu
# def add_menu(self, name):
# if name in self.menu_dict:
# raise Exception("Menu name %s already exists." % name)
# menu = self.menu.addMenu(name)
# self.menu_dict[name] = menu
# # remove top level menu
# def remove_menu(self, name):
# if name not in self.menu_dict:
# raise Exception("Menu %s was not found. It might be deleted, or belong to another menu manager." % name)
#
# self.menu.removeAction(self.menu_dict[name].menuAction())
# del self.menu_dict[name]
#
# # remove all menus currently in dict
# def clear(self):
# for menu in self.menu_dict.itervalues():
# self.menu.removeAction(menu.menuAction())
# self.menu_dict = {}
#
# def add_view(self, view):
# pass
, which may include functions, classes, or code. Output only the next line. | AboutWindow().exec_() |
Here is a snippet: <|code_start|># coding=utf-8
__author__ = 'Anatoli Kalysch'
class VMAttack_Manager(object):
def __init__(self):
self.choice = None
self._vmr = get_vmr()
# UI Management
<|code_end|>
. Write the next line using the current file imports:
from lib.Logging import get_log
from dynamic.dynamic_deobfuscate import *
from lib.VMRepresentation import *
from static.static_deobfuscate import *
from ui.AboutWindow import AboutWindow
from ui.UIManager import UIManager
import ui.SettingsWindow as SettingsWindow
and context from other files:
# Path: lib/Logging.py
# def get_log():
# global logEng
# if not logEng:
# logEng = LoggingEngine()
#
# return logEng
#
# Path: ui/AboutWindow.py
# class AboutWindow(QtWidgets.QDialog):
# def __init__(self, *args, **kwargs):
# super(AboutWindow, self).__init__(*args, **kwargs)
# self.setFixedSize(600, 250)
# self.setWindowTitle("About ...")
# self.title = "VMAttack IDA PRO Plugin"
# self.subtitle = "IDA Pro Plugin for static and dynamic virtualization-obfuscation analysis and deobfuscation"
# self.author = u"Anatoli Kalysch and Tobias Krauß"
# self.thanks = u"Special thanks to Johannes Götzfried for conceptual help along the way!"
# self.version = "Version 0.2"
# self.address = "Friedrich-Alexander University Erlangen-Nuremberg\n i1 Software Security Research Group \n"
#
# try:
# title = self.config_label(self.title, 16, True)
# subtitle = self.config_label(self.subtitle, 14)
# subtitle.move(0, title.height() + title.y() + 10)
# version = self.config_label(self.version, 12)
# version.move(0, subtitle.height() + subtitle.y() + 30)
# author = self.config_label(self.author, 12)
# author.move(0, version.height() + version.y())
# thanks = self.config_label(self.thanks, 12)
# thanks.move(0, author.height() + author.y())
# except Exception, e:
# print e.message
#
# self.show()
#
# def config_label(self, name, size, bold=False, alignment="center"):
# label = QtWidgets.QLabel(name, self)
# label.setWordWrap(True)
# font = label.font()
# font.setPointSize(size)
# font.setBold(bold)
# label.setFont(font)
# if alignment == "center":
# label.setAlignment(QtCore.Qt.AlignCenter)
# elif alignment == "right":
# label.setAlignment(QtCore.Qt.AlignRight)
# elif alignment == "left":
# label.setAlignment(QtCore.Qt.AlignLeft)
# label.setFixedWidth(600)
#
# return label
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
#
# self.window = None
# self.widget = None
# self.menu = None
# self.menu_dict = {}
# self.get_init_menu()
#
# # initial menu grab
# def get_init_menu(self):
# try:
# self.widget = form_to_widget(idaapi.get_current_tform())
# if self.widget is None:
# raise Exception()
# except:
# self.widget = form_to_widget(idaapi.find_tform('Output window'))
# self.window = self.widget.window()
# self.menu = self.window.findChild(QtWidgets.QMenuBar)
#
# # add top level menu
# def add_menu(self, name):
# if name in self.menu_dict:
# raise Exception("Menu name %s already exists." % name)
# menu = self.menu.addMenu(name)
# self.menu_dict[name] = menu
# # remove top level menu
# def remove_menu(self, name):
# if name not in self.menu_dict:
# raise Exception("Menu %s was not found. It might be deleted, or belong to another menu manager." % name)
#
# self.menu.removeAction(self.menu_dict[name].menuAction())
# del self.menu_dict[name]
#
# # remove all menus currently in dict
# def clear(self):
# for menu in self.menu_dict.itervalues():
# self.menu.removeAction(menu.menuAction())
# self.menu_dict = {}
#
# def add_view(self, view):
# pass
, which may include functions, classes, or code. Output only the next line. | self.ui_mgr = UIManager() |
Given the code snippet: <|code_start|> return '%s\t%s' % (self.disasm[0], self.disasm[1])
else:
return self.disasm[0]
def to_str_line(self):
return "%x %x %s\t\t%s\t\t%s" % (self.thread_id,
self.addr,
self.disasm_str(),
''.join(c for c in self.comment if self.comment is not None),
''.join('%s:%s ' % (c, self.ctx[c]) for c in self.ctx.keys() if isinstance(self.ctx, dict)))
@property
def is_mov(self):
return self._line[2][0].__contains__('mov')
@property
def is_pop(self):
return self._line[2][0].startswith('pop')
@property
def is_push(self):
return self._line[2][0].startswith('push')
@property
def is_jmp(self):
# returns true for conditional AND non-cond jumps
return self._line[2][0].startswith('j')
@property
def is_op1_reg(self):
try:
<|code_end|>
, generate the next line using the imports in this file:
from lib.Register import get_reg_class
and context (functions, classes, or occasionally code) from other files:
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
. Output only the next line. | return get_reg_class(self._line[2][1]) is not None |
Here is a snippet: <|code_start|> except:
pass
w.pbar_update(5)
### RECURSION ###
try:
recursion = 0
vm_func = find_vm_addr(orig_trace)
for line in orig_trace:
if line.disasm[0].startswith('call') and line.disasm[1].__contains__(vm_func):
recursion = recursion + 1
except:
pass
w.close()
grades = set([line.grade for line in trace])
max_grade = max(grades)
# raise the trace lines grade containing calls to maximum grade
try:
# such nach call und vm_addr
for line in trace:
if line.disasm[0].startswith('call') and line.disasm[1].__contains__(vm_func):
line.grade = max_grade
elif line.disasm[1].__contains__('ss:') or line.disasm[2].__contains('ss:'):
line.grade = max_grade
except:
pass
if visualization == 0:
<|code_end|>
. Write the next line using the current file imports:
from threading import Thread
from ui.UIManager import GradingViewer
from ui.UIManager import OptimizationViewer
from ui.UIManager import StackChangeViewer
from ui.UIManager import VMInputOuputViewer
from DebuggerHandler import load, save, get_dh
from lib.TraceAnalysis import *
from lib.VMRepresentation import get_vmr
from ui.NotifyProgress import NotifyProgress
from ui.UIManager import ClusterViewer
from IDADebugger import IDADebugger
from OllyDebugger import OllyDebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
and context from other files:
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# Path: ui/NotifyProgress.py
# class NotifyProgress(QtWidgets.QWidget):
# def __init__(self, name='current', *args, **kwargs):
# super(NotifyProgress, self).__init__(*args, **kwargs)
# self.analysis = name
# self.pbar = QtWidgets.QProgressBar(self)
# self.pbar.setGeometry(30, 40, 370, 25)
# self.value = 0
# self.setFixedSize(400, 100)
# self.setWindowTitle('Running %s Analysis...' % self.analysis)
#
# def pbar_update(self, value):
# self.value += value
# if self.value > 100:
# self.value = 100
# self.close()
# self.pbar.setValue(self.value)
#
# def pbar_set(self, value):
# self.pbar.setValue(value)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
, which may include functions, classes, or code. Output only the next line. | v = GradingViewer(trace, save=save) |
Continue the code snippet: <|code_start|>
v0 = ClusterViewer(cluster, create_bb_diff, trace.ctx_reg_size, save_func=save)
w.pbar_update(24)
v0.Show()
prev_ctx = defaultdict(lambda: 0)
stack_changes = defaultdict(lambda: 0)
for line in cluster:
if isinstance(line, Traceline):
prev_ctx = line.ctx
else:
stack_changes = create_cluster_gist(line, trace.ctx_reg_size, prev_ctx, stack_changes)
prev_ctx = line[-1].ctx
# sort the stack_changes by address
sorted_result = sorted(stack_changes.keys())
sorted_result.reverse()
w.close()
v1 = StackChangeViewer(vr, sorted_result, stack_changes)
v1.Show()
else:
w.close()
visualize_cli(cluster)
except:
w.close()
def optimization_analysis():
"""
Opens the Optimization Viewer to let the user dynamically interact with the trace.
"""
trace = prepare_trace()
<|code_end|>
. Use current file imports:
from threading import Thread
from ui.UIManager import GradingViewer
from ui.UIManager import OptimizationViewer
from ui.UIManager import StackChangeViewer
from ui.UIManager import VMInputOuputViewer
from DebuggerHandler import load, save, get_dh
from lib.TraceAnalysis import *
from lib.VMRepresentation import get_vmr
from ui.NotifyProgress import NotifyProgress
from ui.UIManager import ClusterViewer
from IDADebugger import IDADebugger
from OllyDebugger import OllyDebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
and context (classes, functions, or code) from other files:
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# Path: ui/NotifyProgress.py
# class NotifyProgress(QtWidgets.QWidget):
# def __init__(self, name='current', *args, **kwargs):
# super(NotifyProgress, self).__init__(*args, **kwargs)
# self.analysis = name
# self.pbar = QtWidgets.QProgressBar(self)
# self.pbar.setGeometry(30, 40, 370, 25)
# self.value = 0
# self.setFixedSize(400, 100)
# self.setWindowTitle('Running %s Analysis...' % self.analysis)
#
# def pbar_update(self, value):
# self.value += value
# if self.value > 100:
# self.value = 100
# self.close()
# self.pbar.setValue(self.value)
#
# def pbar_set(self, value):
# self.pbar.setValue(value)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
. Output only the next line. | v = OptimizationViewer(trace, save=save) |
Given the code snippet: <|code_start|> if not trace.constant_propagation:
trace = optimization_const_propagation(trace)
if not trace.stack_addr_propagation:
trace = optimization_stack_addr_propagation(trace)
except:
pass
w.pbar_update(30)
# cluster
vr = find_virtual_regs(deepcopy(trace))
w.pbar_update(20)
cluster = repetition_clustering(deepcopy(trace))
w.pbar_update(25)
if visualization == 0:
v0 = ClusterViewer(cluster, create_bb_diff, trace.ctx_reg_size, save_func=save)
w.pbar_update(24)
v0.Show()
prev_ctx = defaultdict(lambda: 0)
stack_changes = defaultdict(lambda: 0)
for line in cluster:
if isinstance(line, Traceline):
prev_ctx = line.ctx
else:
stack_changes = create_cluster_gist(line, trace.ctx_reg_size, prev_ctx, stack_changes)
prev_ctx = line[-1].ctx
# sort the stack_changes by address
sorted_result = sorted(stack_changes.keys())
sorted_result.reverse()
w.close()
<|code_end|>
, generate the next line using the imports in this file:
from threading import Thread
from ui.UIManager import GradingViewer
from ui.UIManager import OptimizationViewer
from ui.UIManager import StackChangeViewer
from ui.UIManager import VMInputOuputViewer
from DebuggerHandler import load, save, get_dh
from lib.TraceAnalysis import *
from lib.VMRepresentation import get_vmr
from ui.NotifyProgress import NotifyProgress
from ui.UIManager import ClusterViewer
from IDADebugger import IDADebugger
from OllyDebugger import OllyDebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
and context (functions, classes, or occasionally code) from other files:
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# Path: ui/NotifyProgress.py
# class NotifyProgress(QtWidgets.QWidget):
# def __init__(self, name='current', *args, **kwargs):
# super(NotifyProgress, self).__init__(*args, **kwargs)
# self.analysis = name
# self.pbar = QtWidgets.QProgressBar(self)
# self.pbar.setGeometry(30, 40, 370, 25)
# self.value = 0
# self.setFixedSize(400, 100)
# self.setWindowTitle('Running %s Analysis...' % self.analysis)
#
# def pbar_update(self, value):
# self.value += value
# if self.value > 100:
# self.value = 100
# self.close()
# self.pbar.setValue(self.value)
#
# def pbar_set(self, value):
# self.pbar.setValue(value)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
. Output only the next line. | v1 = StackChangeViewer(vr, sorted_result, stack_changes) |
Given snippet: <|code_start|> try:
if func_addr is not None: # TODO enable input / output analysis of all functions
input = find_input(deepcopy(trace))
output = find_output(deepcopy(trace))
w.close()
else:
vr = DynamicAnalyzer(find_virtual_regs, trace)
w.pbar_update(10)
vr.start()
input = DynamicAnalyzer(find_input, trace)
w.pbar_update(10)
input.start()
output = DynamicAnalyzer(find_output, trace)
w.pbar_update(10)
output.start()
vr.join()
w.pbar_update(20)
vr = vr.get_result()
# create the trace excerpt for every relevant reg
for key in vr.keys():
if get_reg_class(key) is not None:
ctx[key] = follow_virt_reg(deepcopy(trace), virt_reg_addr=vr[key], real_reg_name=key)
vmr.vm_stack_reg_mapping = ctx
w.pbar_update(20)
input.join()
w.pbar_update(10)
output.join()
w.pbar_update(10)
w.close()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from threading import Thread
from ui.UIManager import GradingViewer
from ui.UIManager import OptimizationViewer
from ui.UIManager import StackChangeViewer
from ui.UIManager import VMInputOuputViewer
from DebuggerHandler import load, save, get_dh
from lib.TraceAnalysis import *
from lib.VMRepresentation import get_vmr
from ui.NotifyProgress import NotifyProgress
from ui.UIManager import ClusterViewer
from IDADebugger import IDADebugger
from OllyDebugger import OllyDebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
and context:
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# Path: ui/NotifyProgress.py
# class NotifyProgress(QtWidgets.QWidget):
# def __init__(self, name='current', *args, **kwargs):
# super(NotifyProgress, self).__init__(*args, **kwargs)
# self.analysis = name
# self.pbar = QtWidgets.QProgressBar(self)
# self.pbar.setGeometry(30, 40, 370, 25)
# self.value = 0
# self.setFixedSize(400, 100)
# self.setWindowTitle('Running %s Analysis...' % self.analysis)
#
# def pbar_update(self, value):
# self.value += value
# if self.value > 100:
# self.value = 100
# self.close()
# self.pbar.setValue(self.value)
#
# def pbar_set(self, value):
# self.pbar.setValue(value)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
which might include code, classes, or functions. Output only the next line. | v = VMInputOuputViewer(input.get_result(), output.get_result(), ctx) |
Based on the snippet: <|code_start|># IDA Debugger
def load_idadbg(self):
return IDADebugger()
# OllyDbg
def load_olly(self):
return OllyDebugger()
# Bochs Dbg
def load_bochsdbg(self):
LoadDebugger('Bochs', 0)
return IDADebugger()
# Win32 Dbg
def load_win32dbg(self):
LoadDebugger('win32', 0)
return IDADebugger()
# Immunity Dbg
def load_immunitydbg(self):
return IDADebugger()
# Working with Win32Dbg, BochsDbg, OllyDbg
available_debuggers = [load_idadbg, load_olly, load_bochsdbg, load_win32dbg, load_immunitydbg]
### INIT AND LOAD CONTEXT ###
def prepare_trace():
<|code_end|>
, predict the immediate next line with the help of imports:
from threading import Thread
from ui.UIManager import GradingViewer
from ui.UIManager import OptimizationViewer
from ui.UIManager import StackChangeViewer
from ui.UIManager import VMInputOuputViewer
from DebuggerHandler import load, save, get_dh
from lib.TraceAnalysis import *
from lib.VMRepresentation import get_vmr
from ui.NotifyProgress import NotifyProgress
from ui.UIManager import ClusterViewer
from IDADebugger import IDADebugger
from OllyDebugger import OllyDebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
and context (classes, functions, sometimes code) from other files:
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# Path: ui/NotifyProgress.py
# class NotifyProgress(QtWidgets.QWidget):
# def __init__(self, name='current', *args, **kwargs):
# super(NotifyProgress, self).__init__(*args, **kwargs)
# self.analysis = name
# self.pbar = QtWidgets.QProgressBar(self)
# self.pbar.setGeometry(30, 40, 370, 25)
# self.value = 0
# self.setFixedSize(400, 100)
# self.setWindowTitle('Running %s Analysis...' % self.analysis)
#
# def pbar_update(self, value):
# self.value += value
# if self.value > 100:
# self.value = 100
# self.close()
# self.pbar.setValue(self.value)
#
# def pbar_set(self, value):
# self.pbar.setValue(value)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
. Output only the next line. | vmr = get_vmr() |
Given the code snippet: <|code_start|> """
dbg_handl = get_dh(choice)
vmr = get_vmr()
trace = dbg_handl.gen_instruction_trace()
if trace is not None:
vmr.trace = trace
else:
raise Exception('[*] Trace seems to be None, so it was disregarded!')
### ANALYSIS FUNCTIONALITY###
# TODO multithreading !!!
class DynamicAnalyzer(Thread):
def __init__(self, func, trace, **kwargs):
super(DynamicAnalyzer, self).__init__()
self.analysis = func
self.trace = deepcopy(trace)
self.kwargs = kwargs
self.result = None
def run(self):
self.result = self.analysis(self.trace, self.kwargs)
def get_result(self):
return self.result
def address_heuristic():
"""
Compute the occurrence of every address in the instruction trace.
"""
<|code_end|>
, generate the next line using the imports in this file:
from threading import Thread
from ui.UIManager import GradingViewer
from ui.UIManager import OptimizationViewer
from ui.UIManager import StackChangeViewer
from ui.UIManager import VMInputOuputViewer
from DebuggerHandler import load, save, get_dh
from lib.TraceAnalysis import *
from lib.VMRepresentation import get_vmr
from ui.NotifyProgress import NotifyProgress
from ui.UIManager import ClusterViewer
from IDADebugger import IDADebugger
from OllyDebugger import OllyDebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
and context (functions, classes, or occasionally code) from other files:
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# Path: ui/NotifyProgress.py
# class NotifyProgress(QtWidgets.QWidget):
# def __init__(self, name='current', *args, **kwargs):
# super(NotifyProgress, self).__init__(*args, **kwargs)
# self.analysis = name
# self.pbar = QtWidgets.QProgressBar(self)
# self.pbar.setGeometry(30, 40, 370, 25)
# self.value = 0
# self.setFixedSize(400, 100)
# self.setWindowTitle('Running %s Analysis...' % self.analysis)
#
# def pbar_update(self, value):
# self.value += value
# if self.value > 100:
# self.value = 100
# self.close()
# self.pbar.setValue(self.value)
#
# def pbar_set(self, value):
# self.pbar.setValue(value)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
. Output only the next line. | w = NotifyProgress('Address count') |
Using the snippet: <|code_start|>
def clustering_analysis(visualization=0, grade=False, trace=None):
"""
Clustering analysis wrapper which clusters the trace into repeating instructions and presents the results in the Clustering Viewer.
:param visualization: output via Clustering Viewer or output window
:param grade: grading
:param trace: instruction trace
"""
if trace is None:
trace = prepare_trace()
w = NotifyProgress('Clustering')
w.show()
try:
try:
if not trace.constant_propagation:
trace = optimization_const_propagation(trace)
if not trace.stack_addr_propagation:
trace = optimization_stack_addr_propagation(trace)
except:
pass
w.pbar_update(30)
# cluster
vr = find_virtual_regs(deepcopy(trace))
w.pbar_update(20)
cluster = repetition_clustering(deepcopy(trace))
w.pbar_update(25)
if visualization == 0:
<|code_end|>
, determine the next line of code. You have imports:
from threading import Thread
from ui.UIManager import GradingViewer
from ui.UIManager import OptimizationViewer
from ui.UIManager import StackChangeViewer
from ui.UIManager import VMInputOuputViewer
from DebuggerHandler import load, save, get_dh
from lib.TraceAnalysis import *
from lib.VMRepresentation import get_vmr
from ui.NotifyProgress import NotifyProgress
from ui.UIManager import ClusterViewer
from IDADebugger import IDADebugger
from OllyDebugger import OllyDebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
from IDADebugger import IDADebugger
and context (class names, function names, or code) available:
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
#
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# Path: ui/NotifyProgress.py
# class NotifyProgress(QtWidgets.QWidget):
# def __init__(self, name='current', *args, **kwargs):
# super(NotifyProgress, self).__init__(*args, **kwargs)
# self.analysis = name
# self.pbar = QtWidgets.QProgressBar(self)
# self.pbar.setGeometry(30, 40, 370, 25)
# self.value = 0
# self.setFixedSize(400, 100)
# self.setWindowTitle('Running %s Analysis...' % self.analysis)
#
# def pbar_update(self, value):
# self.value += value
# if self.value > 100:
# self.value = 100
# self.close()
# self.pbar.setValue(self.value)
#
# def pbar_set(self, value):
# self.pbar.setValue(value)
#
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
. Output only the next line. | v0 = ClusterViewer(cluster, create_bb_diff, trace.ctx_reg_size, save_func=save) |
Predict the next line after this snippet: <|code_start|> "<Clustering Importance :{iClu}>\n"
"<Pattern Importance :{iPaMa}>\n"
"<Memory Usage Importance :{iMeUs}>\n"
"<Static Analysis Importance:{iSta}>\n"
"\n"
"\n"
"Dynamic Analysis:\n"
"<Step Into System Libraries :{rStepInSysLibs}>\n"
'<Extract function parameters:{rFuncParams}>{cDynamicValues}>\n'
), {
'cClusterValues': Form.ChkGroupControl(("rShowBB", "rGreedyCluster")),
'cDynamicValues': Form.ChkGroupControl(('rStepInSysLibs', 'rFuncParams')),
'iClusterHeu': Form.NumericInput(tp=Form.FT_DEC),
'iInOut': Form.NumericInput(tp=Form.FT_DEC),
'iClu': Form.NumericInput(tp=Form.FT_DEC),
'iPaMa': Form.NumericInput(tp=Form.FT_DEC),
'iMeUs': Form.NumericInput(tp=Form.FT_DEC),
'iSta': Form.NumericInput(tp=Form.FT_DEC),
'iVMAddr': Form.NumericInput(tp=Form.FT_DEC),
'iBaseAddr': Form.NumericInput(tp=Form.FT_DEC),
'iCodeEnd': Form.NumericInput(tp=Form.FT_DEC),
'iCodeStart': Form.NumericInput(tp=Form.FT_DEC),
})
def OnButtonNop(self, code=0):
pass
def Show():
settings = SettingsView()
settings.Compile()
<|code_end|>
using the current file's imports:
from idaapi import Form, BADADDR
from lib.VMRepresentation import get_vmr, VMContext
and any relevant context from other files:
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# class VMContext(object):
# def __init__(self):
# self.code_start = BADADDR
# self.code_end = BADADDR
# self.base_addr = BADADDR
# self.vm_addr = BADADDR
. Output only the next line. | vmr = get_vmr() |
Predict the next line after this snippet: <|code_start|>
def Show():
settings = SettingsView()
settings.Compile()
vmr = get_vmr()
vm_ctx = vmr.vm_ctx
settings.iCodeStart.value = vm_ctx.code_start
settings.iCodeEnd.value = vm_ctx.code_end
settings.iBaseAddr.value = vm_ctx.base_addr
settings.iVMAddr.value = vm_ctx.vm_addr
settings.rGreedyCluster.checked = vmr.greedy
settings.rShowBB.checked = vmr.bb
settings.iClusterHeu.value = vmr.cluster_magic
settings.iInOut.value = vmr.in_out
settings.iClu.value = vmr.clu
settings.iPaMa.value = vmr.pa_ma
settings.iMeUs.value = vmr.mem_use
settings.iSta.value = vmr.static
settings.rStepInSysLibs.checked = vmr.sys_libs
settings.rFuncParams.checked = vmr.extract_param
if settings.Execute() == 0: # Cancel
settings.Free()
else: # Confirm
vmr = get_vmr()
# VM values
<|code_end|>
using the current file's imports:
from idaapi import Form, BADADDR
from lib.VMRepresentation import get_vmr, VMContext
and any relevant context from other files:
# Path: lib/VMRepresentation.py
# def get_vmr():
# """
# Get the VMR instance.
# :return: vmr
# """
# global vmr
# if vmr is None:
# vmr = VMRepresentation()
# return vmr
#
# class VMContext(object):
# def __init__(self):
# self.code_start = BADADDR
# self.code_end = BADADDR
# self.base_addr = BADADDR
# self.vm_addr = BADADDR
. Output only the next line. | vm_ctx = VMContext() |
Here is a snippet: <|code_start|> elif self.size == 4:
end_str += '_d '
elif self.size == 8:
end_str += '_q '
elif self.inst_class != ASSIGNEMENT_T:
end_str += ' '
for pos, op in enumerate(self.op_lst):
if (self.inst_class == ASSIGNEMENT_T and pos == 0):
continue
if self.inst_type == READ_T and self.inst_class == ASSIGNEMENT_T:
end_str = end_str + '[' + str(op) + ']' + ', '
else:
end_str = end_str + str(op) + ', '
if (self.list_len != 0):
end_str = end_str[0:len(end_str) - 2] + '\n'
else:
end_str += '\n'
end_str = end_str.replace('+0x0', '')
return end_str
def get_scratch_variable(self):
"""
@brief Replace memory operands from 'vpush' and 'vpop' with
ScratchOperands
"""
if (self.inst_type != POP_T and
self.inst_type != PUSH_T and self.list_len != 1):
return
op0 = self.op_lst[0]
if (op0.type == MEMORY_T and
<|code_end|>
. Write the next line using the current file imports:
from lib.Register import (get_reg_class,
get_reg_class_lst)
from lib import StartVal as SV
and context from other files:
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
#
# def get_reg_class_lst(reg_class):
# """
# @return Returns the whole list of a given register class
# """
# return _registerClasses[reg_class]
#
# Path: lib/StartVal.py
# ASSEMBLER_64 = 64
# ASSEMBLER_32 = 32
, which may include functions, classes, or code. Output only the next line. | get_reg_class(op0.register) == get_reg_class('edi')): |
Here is a snippet: <|code_start|> end_str = end_str + str(op) + ', '
if (self.list_len != 0):
end_str = end_str[0:len(end_str) - 2] + '\n'
else:
end_str += '\n'
end_str = end_str.replace('+0x0', '')
return end_str
def get_scratch_variable(self):
"""
@brief Replace memory operands from 'vpush' and 'vpop' with
ScratchOperands
"""
if (self.inst_type != POP_T and
self.inst_type != PUSH_T and self.list_len != 1):
return
op0 = self.op_lst[0]
if (op0.type == MEMORY_T and
get_reg_class(op0.register) == get_reg_class('edi')):
self.op_lst[0] = ScratchOperand(SVARIABLE_T,
op0.displacement, op0.size)
def replace_reg_class(self, rreg, catch_value):
"""
@brief Replace register of evrey op with catch_value
if it is in the same register class as rreg
@param rreg Register to replace
@param catch_value Value that replaces the register
"""
reg_class = get_reg_class(rreg)
<|code_end|>
. Write the next line using the current file imports:
from lib.Register import (get_reg_class,
get_reg_class_lst)
from lib import StartVal as SV
and context from other files:
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
#
# def get_reg_class_lst(reg_class):
# """
# @return Returns the whole list of a given register class
# """
# return _registerClasses[reg_class]
#
# Path: lib/StartVal.py
# ASSEMBLER_64 = 64
# ASSEMBLER_32 = 32
, which may include functions, classes, or code. Output only the next line. | for reg in reversed(get_reg_class_lst(reg_class)): |
Next line prediction: <|code_start|> @brief Replace plain VmInstruction representation with a
push/pop representation. This representation needs temporal
variables and each of these temporal variables is unique
"""
ret = []
if self.inst_class == IN2_OUT2:
op0 = VariableOperand(VARIABLE_T, self.op_lst[0].size)
op1 = VariableOperand(VARIABLE_T, self.op_lst[1].size)
op0_size = self.op_lst[0].size / 8
# vm does not support byte pop/push maybe worng place for this
if op0_size < 2:
op0_size = 2
op1_size = self.op_lst[1].size / 8
if op1_size < 2:
op1_size = 2
ret.append(PseudoInstruction('vpop', self.addr,
[op0], op0_size, POP_T))
ret.append(PseudoInstruction('vpop', self.addr, [op1],
op1_size, POP_T))
assign_op = VariableOperand(VARIABLE_T, self.size)
assign_instruction = PseudoInstruction(
self.mnem, self.addr,
[assign_op, op0, op1], self.size,
self.inst_type, ASSIGNEMENT_T
)
ret.append(assign_instruction)
flagsop = VariableOperand(VARIABLE_T, self.size, True)
ret.append(PseudoInstruction('vpush', self.addr, [assign_op],
self.op_lst[0].size / 8, PUSH_T))
ret.append(PseudoInstruction('vpush', self.addr, [flagsop],
<|code_end|>
. Use current file imports:
(from lib.Register import (get_reg_class,
get_reg_class_lst)
from lib import StartVal as SV)
and context including class names, function names, or small code snippets from other files:
# Path: lib/Register.py
# def get_reg_class(reg):
# """
# @brief Determines the register class of a given reg.
# All different register names that address the same register
# belong to the same register class e.g.: 'ax' and 'eax'
# @param reg name of register
# @return register class
# """
# lreg = reg.lower()
# ret_value = None
# for pos, reg_list in enumerate(_registerClasses):
# for reg in reg_list:
# found = False
# if reg == lreg:
# found = True
# ret_value = pos
# break
# if found:
# break
# return ret_value
#
# def get_reg_class_lst(reg_class):
# """
# @return Returns the whole list of a given register class
# """
# return _registerClasses[reg_class]
#
# Path: lib/StartVal.py
# ASSEMBLER_64 = 64
# ASSEMBLER_32 = 32
. Output only the next line. | SV.dissassm_type / 8, PUSH_T)) |
Based on the snippet: <|code_start|># coding=utf-8
__author__ = 'Anatoli Kalysch'
class PluginViewer(PluginForm):
def __init__(self, title):
super(PluginViewer, self).__init__()
self.title = title
def Show(self, **kwargs):
return PluginForm.Show(self, self.title, options=PluginForm.FORM_PERSIST)
def OnCreate(self, form):
# Get parent widget
<|code_end|>
, predict the immediate next line with the help of imports:
from idaapi import PluginForm, msg
from ui.UIManager import form_to_widget
and context (classes, functions, sometimes code) from other files:
# Path: ui/UIManager.py
# class UIManager(object):
# def __init__(self):
# def get_init_menu(self):
# def add_menu(self, name):
# def remove_menu(self, name):
# def clear(self):
# def add_view(self, view):
. Output only the next line. | self.parent = form_to_widget(form) |
Using the snippet: <|code_start|>
if __name__ == '__main__':
DATA_PATH = sys.argv[1]
HDF5 = False
if HDF5:
print('Using hdf5')
<|code_end|>
, determine the next line of code. You have imports:
from crayimage.runutils import hdf5_disk_stream, np_disk_stream
import sys
import os.path as osp
import time
and context (class names, function names, or code) available:
# Path: crayimage/runutils/stream.py
# def hdf5_disk_stream(path, batch_sizes=8, cache_size=16):
# queue = Queue(maxsize=cache_size)
#
# worker = threading.Thread(
# target=hdf5_batch_worker,
# kwargs=dict(path=path, out_queue=queue, batch_sizes=batch_sizes)
# )
#
# worker.daemon = True
# worker.start()
#
# return queue_stream(queue)
#
# def np_disk_stream(data_root, batch_sizes=8, cache_size=16, mmap_mode='r'):
# bin_patches = [
# osp.join(data_root, 'bin_%d.npy' % i)
# for i in range(len(os.listdir(data_root)))
# ]
#
# if type(batch_sizes) in [long, int]:
# batch_sizes = [batch_sizes] * len(bin_patches)
#
# queues = [ Queue(maxsize=cache_size) for _ in bin_patches ]
#
# workers = [
# threading.Thread(
# target=np_batch_worker,
# kwargs=dict(path=path, out_queue=queue, batch_size=batch_size, mmap_mode=mmap_mode)
# )
#
# for path, queue, batch_size in zip(bin_patches, queues, batch_sizes)
# ]
#
# for worker in workers:
# worker.daemon = True
# worker.start()
#
# return queues_stream(queues)
. Output only the next line. | stream = hdf5_disk_stream(osp.join(DATA_PATH, 'Co60.hdf5'), batch_sizes=8, cache_size=16) |
Predict the next line for this snippet: <|code_start|>
if __name__ == '__main__':
DATA_PATH = sys.argv[1]
HDF5 = False
if HDF5:
print('Using hdf5')
stream = hdf5_disk_stream(osp.join(DATA_PATH, 'Co60.hdf5'), batch_sizes=8, cache_size=16)
else:
print('Using numpy memmaping')
<|code_end|>
with the help of current file imports:
from crayimage.runutils import hdf5_disk_stream, np_disk_stream
import sys
import os.path as osp
import time
and context from other files:
# Path: crayimage/runutils/stream.py
# def hdf5_disk_stream(path, batch_sizes=8, cache_size=16):
# queue = Queue(maxsize=cache_size)
#
# worker = threading.Thread(
# target=hdf5_batch_worker,
# kwargs=dict(path=path, out_queue=queue, batch_sizes=batch_sizes)
# )
#
# worker.daemon = True
# worker.start()
#
# return queue_stream(queue)
#
# def np_disk_stream(data_root, batch_sizes=8, cache_size=16, mmap_mode='r'):
# bin_patches = [
# osp.join(data_root, 'bin_%d.npy' % i)
# for i in range(len(os.listdir(data_root)))
# ]
#
# if type(batch_sizes) in [long, int]:
# batch_sizes = [batch_sizes] * len(bin_patches)
#
# queues = [ Queue(maxsize=cache_size) for _ in bin_patches ]
#
# workers = [
# threading.Thread(
# target=np_batch_worker,
# kwargs=dict(path=path, out_queue=queue, batch_size=batch_size, mmap_mode=mmap_mode)
# )
#
# for path, queue, batch_size in zip(bin_patches, queues, batch_sizes)
# ]
#
# for worker in workers:
# worker.daemon = True
# worker.start()
#
# return queues_stream(queues)
, which may contain function names, class names, or code. Output only the next line. | stream = np_disk_stream(osp.join(DATA_PATH, 'Co60'), batch_sizes=8, cache_size=16, mmap_mode='r') |
Using the snippet: <|code_start|>
def noise(patches):
return np.max(patches, axis=(2, 3))[:, 1] < 5
def hit(patches):
return np.max(patches, axis=(2, 3))[:, 1] > 25
class TestRunUtils(unittest.TestCase):
def test_load_and_filter(self):
print(os.getcwd())
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import numpy as np
import os
from crayimage.runutils import load_index
from crayimage.runutils import slice_filter_run
and context (class names, function names, or code) available:
# Path: crayimage/runutils/io.py
# def load_index(index_file, root):
# """
# Loads runs defined bu the provided index file.
# Index file is a definition of runs.
#
# :param index_file: path to the index file.
# It can be:
# 1. a path on your local filesystem,
# 2. a path relative to the data root,
# 3. a name of predefined index file.
#
# The file is searched in the described above order.
# :param root: path to the data root
# :return: list of Run objects defined by the index file.
# """
# spec = get_index_file(index_file, root)
#
# runs = dict()
# for run in spec:
# if type(spec[run]['path']) is list:
# paths = np.array([
# osp.normpath(item)
# for item in spec[run]['path']
# ])
# else:
# paths = get_run_paths(root, spec[run]['path'])
#
# timestamps = extract_from_paths(paths, spec[run]['timestamp'], long)
#
# sorting_index = np.argsort(timestamps)
#
# info = dict()
# for k in spec[run]['info']:
# info[k] = extract_from_paths(paths, spec[run]['info'][k])[sorting_index]
#
# runs[run] = Run(
# paths=paths[sorting_index],
# timestamps=timestamps[sorting_index],
# source=spec[run]['source'],
# image_type=spec[run]['type'],
# meta_info=info,
# run_info=spec[run].get('run_info', None),
# index_info=spec[run],
# name=run,
# data_root=root
# )
#
# return runs
#
# Path: crayimage/runutils/run_utils.py
# def slice_filter_run(run, predicates, fractions, window = 40, step = 20, n_jobs=-1):
# n_images = run.abs_paths.shape[0]
#
# scaled_fractions = [
# (int(np.ceil(float(f) / n_images)) if type(f) is long else f)
# for f in fractions
# ]
#
# return slice_map_run(
# run, filter_patches,
# function_args={
# 'predicates' : predicates,
# 'fractions' : scaled_fractions,
# },
# n_jobs=n_jobs,
# window=window, step=step, flat=True
# )
. Output only the next line. | runs = load_index('clean.json', '../../../../data') |
Using the snippet: <|code_start|>
def noise(patches):
return np.max(patches, axis=(2, 3))[:, 1] < 5
def hit(patches):
return np.max(patches, axis=(2, 3))[:, 1] > 25
class TestRunUtils(unittest.TestCase):
def test_load_and_filter(self):
print(os.getcwd())
runs = load_index('clean.json', '../../../../data')
co_run = runs['Co'].random_subset(10)
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import numpy as np
import os
from crayimage.runutils import load_index
from crayimage.runutils import slice_filter_run
and context (class names, function names, or code) available:
# Path: crayimage/runutils/io.py
# def load_index(index_file, root):
# """
# Loads runs defined bu the provided index file.
# Index file is a definition of runs.
#
# :param index_file: path to the index file.
# It can be:
# 1. a path on your local filesystem,
# 2. a path relative to the data root,
# 3. a name of predefined index file.
#
# The file is searched in the described above order.
# :param root: path to the data root
# :return: list of Run objects defined by the index file.
# """
# spec = get_index_file(index_file, root)
#
# runs = dict()
# for run in spec:
# if type(spec[run]['path']) is list:
# paths = np.array([
# osp.normpath(item)
# for item in spec[run]['path']
# ])
# else:
# paths = get_run_paths(root, spec[run]['path'])
#
# timestamps = extract_from_paths(paths, spec[run]['timestamp'], long)
#
# sorting_index = np.argsort(timestamps)
#
# info = dict()
# for k in spec[run]['info']:
# info[k] = extract_from_paths(paths, spec[run]['info'][k])[sorting_index]
#
# runs[run] = Run(
# paths=paths[sorting_index],
# timestamps=timestamps[sorting_index],
# source=spec[run]['source'],
# image_type=spec[run]['type'],
# meta_info=info,
# run_info=spec[run].get('run_info', None),
# index_info=spec[run],
# name=run,
# data_root=root
# )
#
# return runs
#
# Path: crayimage/runutils/run_utils.py
# def slice_filter_run(run, predicates, fractions, window = 40, step = 20, n_jobs=-1):
# n_images = run.abs_paths.shape[0]
#
# scaled_fractions = [
# (int(np.ceil(float(f) / n_images)) if type(f) is long else f)
# for f in fractions
# ]
#
# return slice_map_run(
# run, filter_patches,
# function_args={
# 'predicates' : predicates,
# 'fractions' : scaled_fractions,
# },
# n_jobs=n_jobs,
# window=window, step=step, flat=True
# )
. Output only the next line. | results = slice_filter_run( |
Given the following code snippet before the placeholder: <|code_start|>
if __name__ == '__main__':
np.random.seed(1234)
xs = np.stack([
np.arange(-10, 10, dtype='int16') for _ in range(128)
])
ys = np.stack([
np.arange(-10, 10, dtype='int16') for _ in range(128)
])
vals = np.random.uniform(1.0e-2, 1, size=(128, xs.shape[1])).astype('float32')
sizes = (8, 64, 32, 16, 8, 8)
main_job_n = np.sum(sizes)
n_iters = int(10 ** 1)
start = time.time()
for i in range(n_iters):
np.random.poisson(size=(main_job_n, 128, 128))
end = time.time()
delta_main = end - start
print('Main job alone, %.3f millisec / batch' % (
delta_main * 1000.0 / n_iters
))
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import time
import matplotlib.pyplot as plt
from crayimage.simulation.geant import SimulationStream
from builtins import range
and context including class names, function names, and sometimes code from other files:
# Path: crayimage/simulation/geant.py
# class SimulationStream(object):
# @classmethod
# def from_run(cls, run, spectrum=(1170.0, 1330.0), *args, **kwargs):
# runs = [
# run[run.meta_info['energy'] == energy_line]
# for energy_line in spectrum
# ]
#
# track_len = np.max([max_track_len(r) for r in runs])
#
# tracks_xs, tracks_ys, tracks_vals = map(np.vstack,
# zip(*[ read_sparse_run(r, track_len=track_len) for r in runs ])
# )
#
# return cls(tracks_xs, tracks_xs, tracks_vals, *args, **kwargs)
#
# def __init__(self, tracks_xs, tracks_ys, tracks_vals,
# sizes = (8, 32, 16, 8, 4, 2, 1), img_shape=(128, 128),
# cache = 1):
# if cache is None or cache <= 0:
# def stream():
# while True:
# yield simulation(tracks_xs, tracks_ys, tracks_vals, batch_sizes=sizes, img_shape=img_shape)
#
# self.stream = stream()
# else:
# queue = Queue(maxsize=cache)
#
# worker = threading.Thread(
# target=simulation_worker,
# kwargs=dict(
# output_queue=queue,
# xs=tracks_xs, ys=tracks_ys, vals=tracks_vals,
# batch_sizes=sizes,
# img_shape=img_shape
# )
# )
#
# worker.daemon = True
# worker.start()
#
# self.stream = queue_stream(queue)
#
# def __iter__(self):
# return self
#
# def next(self):
# return self.stream.next()
. Output only the next line. | stream = SimulationStream(xs, ys, vals, sizes=sizes, cache=-1) |
Given snippet: <|code_start|> zip(*[ read_sparse_run(r, track_len=track_len) for r in runs ])
)
return cls(tracks_xs, tracks_xs, tracks_vals, *args, **kwargs)
def __init__(self, tracks_xs, tracks_ys, tracks_vals,
sizes = (8, 32, 16, 8, 4, 2, 1), img_shape=(128, 128),
cache = 1):
if cache is None or cache <= 0:
def stream():
while True:
yield simulation(tracks_xs, tracks_ys, tracks_vals, batch_sizes=sizes, img_shape=img_shape)
self.stream = stream()
else:
queue = Queue(maxsize=cache)
worker = threading.Thread(
target=simulation_worker,
kwargs=dict(
output_queue=queue,
xs=tracks_xs, ys=tracks_ys, vals=tracks_vals,
batch_sizes=sizes,
img_shape=img_shape
)
)
worker.daemon = True
worker.start()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
import threading
from queue import Queue
from Queue import Queue
from crayimage.runutils import queue_stream
from .generation import simulation_samples, center_tracks_mean, center_tracks_mass, center_tracks_box, center_tracks_source
from .io import IndexedSparseImages, root_to_sparse, border_crossing, filter_border_crossing
from .ordering import order_tracks, order_sparse_images
and context:
# Path: crayimage/runutils/stream.py
# def queue_stream(queue):
# while True:
# yield queue.get(block=True)
which might include code, classes, or functions. Output only the next line. | self.stream = queue_stream(queue) |
Predict the next line after this snippet: <|code_start|>__all__ = [
'binnig_update',
'binning',
'uniform_mapping',
'greedy_max_entropy_mapping',
'almost_uniform_mapping'
]
def binnig_update(img, out, mapping=None):
bins = out.shape[-1]
n_channels = img.shape[0]
if img.dtype == RGB_T:
binning_rgb(img.reshape(n_channels, -1), mapping, out.reshape(n_channels, -1, bins))
return out
elif img.dtype == RAW_T:
binning_raw(img.reshape(1, -1), mapping, out.reshape(1, -1, bins))
return out
else:
<|code_end|>
using the current file's imports:
import numpy as np
from .special import COUNT_T, RGB_T, RAW_T, BIN_T
from .special import binning_rgb
from .special import binning_raw
from .utils import wrong_dtype_exception
and any relevant context from other files:
# Path: crayimage/imgutils/utils.py
# def wrong_dtype_exception(dtype):
# return Exception(
# 'Image type (%s) does not understood. '
# 'For RAW images use ndarrays of %s type, for RGB model use ndarrays of %s type' % \
# (dtype, RAW_T, RGB_T)
# )
. Output only the next line. | raise wrong_dtype_exception(img.dtype) |
Given the code snippet: <|code_start|>copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def largest_triangle_three_buckets(data, threshold):
"""
Return a downsampled version of data.
Parameters
----------
data: list of lists
data must be formated this way: [[x,y], [x,y], [x,y], ...]
threshold: int
threshold must be >= 2 and <= to the len of data
Returns
-------
data, but downsampled using threshold
"""
# Check if data and threshold are valid
<|code_end|>
, generate the next line using the imports in this file:
import math
from lib.utils import DTYPE_COORD
and context (functions, classes, or occasionally code) from other files:
# Path: lib/utils.py
# DTYPE_COORD = np.dtype([('x', np.float), ('y', np.float)])
. Output only the next line. | if not data.dtype == DTYPE_COORD: |
Based on the snippet: <|code_start|> module_filepath = os.path.join(self.location + "/modules", moduleClassName+'.py')
py_mod = imp.load_source(moduleClassName, module_filepath)
# instantiate the imported module
moduleInstance = getattr(py_mod, moduleClassName)(device_name=device_name, device_id=device_id,
rabbitmq_address=rabbitmq_address, module_conf=module_conf,
global_conf=self.conf, module_index=module_index)
# all modules should implement start() and stop()
thread = threading.Thread(target=moduleInstance.start)
thread.daemon = True
# assign the thread to internal registry and start it up
self.modules[module_id] = thread
self.modules[module_id].start()
#moduleInstance.stop()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--device_id', required=True,
help="A unique ID to identify the device you are sending data from. "
"For example: 'octopicorn2015'")
parser.add_argument('-n', '--device_name', required=True,
help="The name of the device your are sending data from. "
"Supported devices are: %s" % _SUPPORTED_DEVICES)
<|code_end|>
, predict the immediate next line with the help of imports:
from lib.devices import get_supported_metrics, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
from lib.constants import colors
import argparse
import imp
import os
import yaml
import time
import threading
and context (classes, functions, sometimes code) from other files:
# Path: lib/devices.py
# def get_supported_metrics():
# metrics = []
# for device_name in get_supported_devices():
# metrics.extend(get_metrics_names(device_name))
# return metrics
#
# def get_supported_devices():
# return [device['device_name'] for device in DEVICE_METADATA]
#
# RABBITMQ_ADDRESS = 'localhost'
#
# MOCK_DEVICE_ID = "mock"
#
# Path: lib/constants.py
# class colors:
# ENDC = '\033[0m'
# UNDERLINE = '\033[4m'
# BLINK = '\033[5m'
#
# HEADER = '\033[95m'
# OKBLUE = '\033[94m'
# OKGREEN = '\033[92m'
# FAIL = '\033[91m'
#
# BG_RASPBERRY = '\033[46m'
#
# DARK_RED = '\033[0;31m'
# RED = '\033[91m'
# ORANGE = '\033[1m'
# YELLOW = '\033[93m'
# GREEN = '\033[92m'
# DARK_GREEN = '\033[0;32m'
# CYAN = '\033[96m'
# MAGENTA = '\033[0;35m'
# RASPBERRY = '\033[0;36m'
# DARK_PURPLE = '\033[0;34m'
#
# GOLD = '\033[0;33m'
# SILVER = '\033[0;37m'
# GRAY = '\033[90m'
#
# BOLD_RED = '\033[1;31m'
# BOLD_ORANGE = '\033[1m'
# BOLD_YELLOW = '\033[1;33m'
# BOLD_GREEN = '\033[1;32m'
# BOLD_CYAN = '\033[1;36m'
# BOLD_BLUE = '\033[1;34m'
# BOLD_PURPLE = '\033[1;35m'
# BOLD_GRAY = '\033[1;30m'
#
# CYCLE = [YELLOW, GREEN, ORANGE, RASPBERRY, DARK_RED, CYAN, RED, DARK_GREEN, MAGENTA, DARK_PURPLE]
. Output only the next line. | parser.add_argument('-c', '--cloudbrain', default=RABBITMQ_ADDRESS, |
Given snippet: <|code_start|>
parser.add_argument('-i', '--device_id', required=True,
help="A unique ID to identify the device you are sending data from. "
"For example: 'octopicorn2015'")
parser.add_argument('-n', '--device_name', required=True,
help="The name of the device your are sending data from. "
"Supported devices are: %s" % _SUPPORTED_DEVICES)
parser.add_argument('-c', '--cloudbrain', default=RABBITMQ_ADDRESS,
help="The address of the CloudBrain instance you are sending data to.\n"
"Use " + RABBITMQ_ADDRESS + " to send data to our hosted service. \n"
"Otherwise use 'localhost' if running CloudBrain locally")
opts = parser.parse_args()
return opts
def main():
opts = parse_args()
device_name = opts.device_name
device_id = opts.device_id
cloudbrain_address = opts.cloudbrain
run(device_name,
device_id,
cloudbrain_address
)
def run(device_name='muse',
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from lib.devices import get_supported_metrics, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
from lib.constants import colors
import argparse
import imp
import os
import yaml
import time
import threading
and context:
# Path: lib/devices.py
# def get_supported_metrics():
# metrics = []
# for device_name in get_supported_devices():
# metrics.extend(get_metrics_names(device_name))
# return metrics
#
# def get_supported_devices():
# return [device['device_name'] for device in DEVICE_METADATA]
#
# RABBITMQ_ADDRESS = 'localhost'
#
# MOCK_DEVICE_ID = "mock"
#
# Path: lib/constants.py
# class colors:
# ENDC = '\033[0m'
# UNDERLINE = '\033[4m'
# BLINK = '\033[5m'
#
# HEADER = '\033[95m'
# OKBLUE = '\033[94m'
# OKGREEN = '\033[92m'
# FAIL = '\033[91m'
#
# BG_RASPBERRY = '\033[46m'
#
# DARK_RED = '\033[0;31m'
# RED = '\033[91m'
# ORANGE = '\033[1m'
# YELLOW = '\033[93m'
# GREEN = '\033[92m'
# DARK_GREEN = '\033[0;32m'
# CYAN = '\033[96m'
# MAGENTA = '\033[0;35m'
# RASPBERRY = '\033[0;36m'
# DARK_PURPLE = '\033[0;34m'
#
# GOLD = '\033[0;33m'
# SILVER = '\033[0;37m'
# GRAY = '\033[90m'
#
# BOLD_RED = '\033[1;31m'
# BOLD_ORANGE = '\033[1m'
# BOLD_YELLOW = '\033[1;33m'
# BOLD_GREEN = '\033[1;32m'
# BOLD_CYAN = '\033[1;36m'
# BOLD_BLUE = '\033[1;34m'
# BOLD_PURPLE = '\033[1;35m'
# BOLD_GRAY = '\033[1;30m'
#
# CYCLE = [YELLOW, GREEN, ORANGE, RASPBERRY, DARK_RED, CYAN, RED, DARK_GREEN, MAGENTA, DARK_PURPLE]
which might include code, classes, or functions. Output only the next line. | device_id=MOCK_DEVICE_ID, |
Next line prediction: <|code_start|>__author__ = 'odrulea'
_SUPPORTED_DEVICES = get_supported_devices()
_SUPPORTED_METRICS = get_supported_metrics()
class AnalysisService(object):
"""
Subscribes and writes data to a file
Only supports Pika communication method for now, not pipes
"""
LOGNAME = "[Analysis Service] "
def __init__(self, device_name, device_id, rabbitmq_address=None, conf_path=None):
if rabbitmq_address is None:
<|code_end|>
. Use current file imports:
(from lib.devices import get_supported_metrics, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
from lib.constants import colors
import argparse
import imp
import os
import yaml
import time
import threading)
and context including class names, function names, or small code snippets from other files:
# Path: lib/devices.py
# def get_supported_metrics():
# metrics = []
# for device_name in get_supported_devices():
# metrics.extend(get_metrics_names(device_name))
# return metrics
#
# def get_supported_devices():
# return [device['device_name'] for device in DEVICE_METADATA]
#
# RABBITMQ_ADDRESS = 'localhost'
#
# MOCK_DEVICE_ID = "mock"
#
# Path: lib/constants.py
# class colors:
# ENDC = '\033[0m'
# UNDERLINE = '\033[4m'
# BLINK = '\033[5m'
#
# HEADER = '\033[95m'
# OKBLUE = '\033[94m'
# OKGREEN = '\033[92m'
# FAIL = '\033[91m'
#
# BG_RASPBERRY = '\033[46m'
#
# DARK_RED = '\033[0;31m'
# RED = '\033[91m'
# ORANGE = '\033[1m'
# YELLOW = '\033[93m'
# GREEN = '\033[92m'
# DARK_GREEN = '\033[0;32m'
# CYAN = '\033[96m'
# MAGENTA = '\033[0;35m'
# RASPBERRY = '\033[0;36m'
# DARK_PURPLE = '\033[0;34m'
#
# GOLD = '\033[0;33m'
# SILVER = '\033[0;37m'
# GRAY = '\033[90m'
#
# BOLD_RED = '\033[1;31m'
# BOLD_ORANGE = '\033[1m'
# BOLD_YELLOW = '\033[1;33m'
# BOLD_GREEN = '\033[1;32m'
# BOLD_CYAN = '\033[1;36m'
# BOLD_BLUE = '\033[1;34m'
# BOLD_PURPLE = '\033[1;35m'
# BOLD_GRAY = '\033[1;30m'
#
# CYCLE = [YELLOW, GREEN, ORANGE, RASPBERRY, DARK_RED, CYAN, RED, DARK_GREEN, MAGENTA, DARK_PURPLE]
. Output only the next line. | raise ValueError(colors.FAIL + self.LOGNAME + "Pika subscriber needs to have a rabbitmq address!" + colors.ENDC) |
Predict the next line after this snippet: <|code_start|> if 'num_channels' in global_conf:
menu['num_channels'] = global_conf['num_channels']
# send the handshake to the clients
self.send(json.dumps(menu))
def on_message(self, message):
"""
This will receive instructions from the client to change the
stream. After the connection is established we expect to receive a JSON
with deviceName, deviceId, metric; then we subscribe to RabbitMQ and
start streaming the data.
"""
msg_dict = json.loads(message)
if msg_dict['type'] == 'subscription':
self.handle_channel_subscription(msg_dict)
elif msg_dict['type'] == 'unsubscription':
self.handle_channel_unsubscription(msg_dict)
elif msg_dict['type'] == 'command':
self.handle_channel_command(msg_dict)
def handle_channel_subscription(self, stream_configuration):
# parameters that can be passed in JSON from client
device_name = (stream_configuration['deviceName'] if "deviceName" in stream_configuration else None)
device_id = (stream_configuration['deviceId'] if "deviceId" in stream_configuration else None)
metric = (stream_configuration['metric'] if "metric" in stream_configuration else None)
data_type = (stream_configuration['dataType'] if "dataType" in stream_configuration else None)
<|code_end|>
using the current file's imports:
import json
import logging
from sockjs.tornado.conn import SockJSConnection
from lib.devices import RABBITMQ_ADDRESS
from TornadoSubscriber import TornadoSubscriber
from lib.utils import BufferToMatrix, ListConfOutputMetrics
from lib.constants import *
and any relevant context from other files:
# Path: lib/devices.py
# RABBITMQ_ADDRESS = 'localhost'
#
# Path: lib/utils.py
# def BufferToMatrix(jsonDump, output_type=None):
# """
# After retrieving the encoded json from the message queue buffer, we need to translate the 3 element json
# back into its original form.
# The 3 elements are:
# 0: use the 0th element to cast correct type
# 1: the base64 encoded data just needs to be base64 decoded
# 2: use the 2nd element to set correct dimensions with reshape()
#
# borrowed from: http://stackoverflow.com/questions/13461945/dumping-2d-python-array-with-json
# :param jsonDump:
# :return:
# """
# # if incoming json has not yet been json decoded, do it
# if isinstance(jsonDump, str):
# jsonDump = json.loads(jsonDump)
#
# # handle type
# dtype_string = jsonDump[0]
# if "DTYPE_" in dtype_string:
# # if the type
# dtype = np.dtype(eval(dtype_string))
# else:
# dtype = np.dtype(dtype_string)
#
# # reconstitute the data, using cast to decoded type from above
# matrix_array = np.frombuffer(base64.decodestring(jsonDump[1]),dtype)
# if len(jsonDump) > 2:
# matrix_array = matrix_array.reshape(jsonDump[2])
#
# if output_type is 'list':
# matrix_list = []
# vector = []
# [rows,cols] = matrix_array.shape
# if dtype_string == "DTYPE_COORD":
# for row in np.arange(rows):
# for column in np.arange(cols):
# point = [matrix_array[row][column]["x"],matrix_array[row][column]["y"]]
# vector.append(point)
# matrix_list.append(vector)
# vector = []
# else:
# for row in np.arange(rows):
# for column in np.arange(cols):
# vector.append(matrix_array[row][column])
# matrix_list.append(vector)
# vector = []
#
# return matrix_list
#
# return matrix_array
#
# def ListConfOutputMetrics(conf, prefix=None):
# """
# Loop through all of the available output metrics, among all of the modules defined in the conf .yml file.
# If an option prefix is specified, this will limit response sent to the UI to only include those metrics w/ the prefix
# """
# metrics = []
# print conf['modules']
# for module in conf['modules']:
# if 'outputs' in module and 'data' in module['outputs']:
# if 'message_queues' in module['outputs']['data']:
# if type(module['outputs']['data']['message_queues']) == str:
# if prefix is None or module['outputs']['data']['message_queues'].startswith(prefix):
# metrics.append(module['outputs']['data']['message_queues'])
# elif type(module['outputs']['data']['message_queues']) == list:
# for metric in module['outputs']['data']['message_queues']:
# if prefix is None or metric.startswith(prefix):
# metrics.append(metric)
#
# return metrics
. Output only the next line. | rabbitmq_address = (stream_configuration['rabbitmq_address'] if 'rabbitmq_address' in stream_configuration else RABBITMQ_ADDRESS) |
Given the following code snippet before the placeholder: <|code_start|>OD
"""
#logging.getLogger().setLevel(logging.ERROR)
class ConnectionPlot(SockJSConnection):
"""RtStreamConnection connection implementation"""
# Class level variable
clients = set()
conf = None
def __init__(self, session):
super(self.__class__, self).__init__(session)
self.subscribers = {}
def send_probe_factory(self, metric_name, data_type=None):
def send_probe(body):
#logging.debug("GOT [" + metric_name + "]: " + body)
#print "GOT [" + metric_name + "]: [" + data_type + "]"
if data_type == MESSAGE_TYPE_MATRIX:
"""
MESSAGE_TYPE_MATRIX output is a base64 encoded blob which needs to be decoded
"""
buffer_content = json.loads(body)
for record in buffer_content:
<|code_end|>
, predict the next line using imports from the current file:
import json
import logging
from sockjs.tornado.conn import SockJSConnection
from lib.devices import RABBITMQ_ADDRESS
from TornadoSubscriber import TornadoSubscriber
from lib.utils import BufferToMatrix, ListConfOutputMetrics
from lib.constants import *
and context including class names, function names, and sometimes code from other files:
# Path: lib/devices.py
# RABBITMQ_ADDRESS = 'localhost'
#
# Path: lib/utils.py
# def BufferToMatrix(jsonDump, output_type=None):
# """
# After retrieving the encoded json from the message queue buffer, we need to translate the 3 element json
# back into its original form.
# The 3 elements are:
# 0: use the 0th element to cast correct type
# 1: the base64 encoded data just needs to be base64 decoded
# 2: use the 2nd element to set correct dimensions with reshape()
#
# borrowed from: http://stackoverflow.com/questions/13461945/dumping-2d-python-array-with-json
# :param jsonDump:
# :return:
# """
# # if incoming json has not yet been json decoded, do it
# if isinstance(jsonDump, str):
# jsonDump = json.loads(jsonDump)
#
# # handle type
# dtype_string = jsonDump[0]
# if "DTYPE_" in dtype_string:
# # if the type
# dtype = np.dtype(eval(dtype_string))
# else:
# dtype = np.dtype(dtype_string)
#
# # reconstitute the data, using cast to decoded type from above
# matrix_array = np.frombuffer(base64.decodestring(jsonDump[1]),dtype)
# if len(jsonDump) > 2:
# matrix_array = matrix_array.reshape(jsonDump[2])
#
# if output_type is 'list':
# matrix_list = []
# vector = []
# [rows,cols] = matrix_array.shape
# if dtype_string == "DTYPE_COORD":
# for row in np.arange(rows):
# for column in np.arange(cols):
# point = [matrix_array[row][column]["x"],matrix_array[row][column]["y"]]
# vector.append(point)
# matrix_list.append(vector)
# vector = []
# else:
# for row in np.arange(rows):
# for column in np.arange(cols):
# vector.append(matrix_array[row][column])
# matrix_list.append(vector)
# vector = []
#
# return matrix_list
#
# return matrix_array
#
# def ListConfOutputMetrics(conf, prefix=None):
# """
# Loop through all of the available output metrics, among all of the modules defined in the conf .yml file.
# If an option prefix is specified, this will limit response sent to the UI to only include those metrics w/ the prefix
# """
# metrics = []
# print conf['modules']
# for module in conf['modules']:
# if 'outputs' in module and 'data' in module['outputs']:
# if 'message_queues' in module['outputs']['data']:
# if type(module['outputs']['data']['message_queues']) == str:
# if prefix is None or module['outputs']['data']['message_queues'].startswith(prefix):
# metrics.append(module['outputs']['data']['message_queues'])
# elif type(module['outputs']['data']['message_queues']) == list:
# for metric in module['outputs']['data']['message_queues']:
# if prefix is None or metric.startswith(prefix):
# metrics.append(metric)
#
# return metrics
. Output only the next line. | message = BufferToMatrix(record, output_type="list") |
Next line prediction: <|code_start|> def send_probe(body):
#logging.debug("GOT [" + metric_name + "]: " + body)
#print "GOT [" + metric_name + "]: [" + data_type + "]"
if data_type == MESSAGE_TYPE_MATRIX:
"""
MESSAGE_TYPE_MATRIX output is a base64 encoded blob which needs to be decoded
"""
buffer_content = json.loads(body)
for record in buffer_content:
message = BufferToMatrix(record, output_type="list")
self.send(message,True)
else:
"""
MESSAGE_TYPE_TIME_SAMPLE output is a dict object, with one element for each channel + one for timestamp
gotcha: Pika tends to make all keys in the dict utf8
"""
buffer_content = json.loads(body)
for record in buffer_content:
record["metric"] = metric_name
self.send(json.dumps(record))
return send_probe
def on_open(self, info):
logging.info("Got a new connection...")
# debug
print "[Tornado Server: ConnectionPlot] opened websocket connection"
self.clients.add(self)
<|code_end|>
. Use current file imports:
(import json
import logging
from sockjs.tornado.conn import SockJSConnection
from lib.devices import RABBITMQ_ADDRESS
from TornadoSubscriber import TornadoSubscriber
from lib.utils import BufferToMatrix, ListConfOutputMetrics
from lib.constants import *)
and context including class names, function names, or small code snippets from other files:
# Path: lib/devices.py
# RABBITMQ_ADDRESS = 'localhost'
#
# Path: lib/utils.py
# def BufferToMatrix(jsonDump, output_type=None):
# """
# After retrieving the encoded json from the message queue buffer, we need to translate the 3 element json
# back into its original form.
# The 3 elements are:
# 0: use the 0th element to cast correct type
# 1: the base64 encoded data just needs to be base64 decoded
# 2: use the 2nd element to set correct dimensions with reshape()
#
# borrowed from: http://stackoverflow.com/questions/13461945/dumping-2d-python-array-with-json
# :param jsonDump:
# :return:
# """
# # if incoming json has not yet been json decoded, do it
# if isinstance(jsonDump, str):
# jsonDump = json.loads(jsonDump)
#
# # handle type
# dtype_string = jsonDump[0]
# if "DTYPE_" in dtype_string:
# # if the type
# dtype = np.dtype(eval(dtype_string))
# else:
# dtype = np.dtype(dtype_string)
#
# # reconstitute the data, using cast to decoded type from above
# matrix_array = np.frombuffer(base64.decodestring(jsonDump[1]),dtype)
# if len(jsonDump) > 2:
# matrix_array = matrix_array.reshape(jsonDump[2])
#
# if output_type is 'list':
# matrix_list = []
# vector = []
# [rows,cols] = matrix_array.shape
# if dtype_string == "DTYPE_COORD":
# for row in np.arange(rows):
# for column in np.arange(cols):
# point = [matrix_array[row][column]["x"],matrix_array[row][column]["y"]]
# vector.append(point)
# matrix_list.append(vector)
# vector = []
# else:
# for row in np.arange(rows):
# for column in np.arange(cols):
# vector.append(matrix_array[row][column])
# matrix_list.append(vector)
# vector = []
#
# return matrix_list
#
# return matrix_array
#
# def ListConfOutputMetrics(conf, prefix=None):
# """
# Loop through all of the available output metrics, among all of the modules defined in the conf .yml file.
# If an option prefix is specified, this will limit response sent to the UI to only include those metrics w/ the prefix
# """
# metrics = []
# print conf['modules']
# for module in conf['modules']:
# if 'outputs' in module and 'data' in module['outputs']:
# if 'message_queues' in module['outputs']['data']:
# if type(module['outputs']['data']['message_queues']) == str:
# if prefix is None or module['outputs']['data']['message_queues'].startswith(prefix):
# metrics.append(module['outputs']['data']['message_queues'])
# elif type(module['outputs']['data']['message_queues']) == list:
# for metric in module['outputs']['data']['message_queues']:
# if prefix is None or metric.startswith(prefix):
# metrics.append(metric)
#
# return metrics
. Output only the next line. | metrics = ListConfOutputMetrics(self.conf, prefix="viz_") |
Predict the next line after this snippet: <|code_start|>
def connect(self):
credentials = pika.PlainCredentials('cloudbrain', 'cloudbrain')
self.connection = pika.BlockingConnection(pika.ConnectionParameters(
host=self.host, credentials=credentials))
self.channel = self.connection.channel()
for metric_id,metric_info in self.metrics.iteritems():
# each metric has an internal id, and a name
#
# the internal id is used as a consumer_tag, or think of it as a hardcoded local variable
# for example, if i have 2 inputs [class_label, data], the 2 inputs are not interchangeable,
# and we must know which is which, so we use their ids to identify them
#
# the name is used to find the correct exchange in rabbitmq
key = "%s:%s:%s" %(self.device_id,self.device_name, metric_info['name'])
# declare the exchange serving this metric
self.channel.exchange_declare(exchange=key, exchange_type='direct')
# declare queue and bind
# exclusive=True is important to make a queue that will be destroyed when client hangs up
# otherwise the queue would persist to the next session, with some old data still stuck in it
#
# Another thing, someone might look at this queue declaration and wonder: why didn't we pass in
# the queue name as a parameter like everywhere else? The reason for this is that, by not passing in
# a name, we allow pika to use a randomly generated name for the queue
self.queues[metric_id] = self.channel.queue_declare(exclusive=True).method.queue
self.channel.queue_bind(exchange=key, queue=self.queues[metric_id], routing_key=key)
<|code_end|>
using the current file's imports:
import pika
from lib.SubscriberInterface import Subscriber
from lib.constants import colors
and any relevant context from other files:
# Path: lib/SubscriberInterface.py
# class Subscriber(object):
# __metaclass__ = ABCMeta
#
# def __init__(self, device_name, device_id, host):
# """
#
# :return:
# """
# self.device_name = device_name
# self.device_id = device_id
# self.host = host
#
# @abstractmethod
# def consume_messages(self, callback):
# #TODO: write doc
# """
#
# :return:
# """
# @abstractmethod
# def connect(self):
# #TODO: write doc
# """
#
# :return:
# """
#
# @abstractmethod
# def disconnect(self):
# #TODO: write doc
# """
#
# :return:
# """
#
# Path: lib/constants.py
# class colors:
# ENDC = '\033[0m'
# UNDERLINE = '\033[4m'
# BLINK = '\033[5m'
#
# HEADER = '\033[95m'
# OKBLUE = '\033[94m'
# OKGREEN = '\033[92m'
# FAIL = '\033[91m'
#
# BG_RASPBERRY = '\033[46m'
#
# DARK_RED = '\033[0;31m'
# RED = '\033[91m'
# ORANGE = '\033[1m'
# YELLOW = '\033[93m'
# GREEN = '\033[92m'
# DARK_GREEN = '\033[0;32m'
# CYAN = '\033[96m'
# MAGENTA = '\033[0;35m'
# RASPBERRY = '\033[0;36m'
# DARK_PURPLE = '\033[0;34m'
#
# GOLD = '\033[0;33m'
# SILVER = '\033[0;37m'
# GRAY = '\033[90m'
#
# BOLD_RED = '\033[1;31m'
# BOLD_ORANGE = '\033[1m'
# BOLD_YELLOW = '\033[1;33m'
# BOLD_GREEN = '\033[1;32m'
# BOLD_CYAN = '\033[1;36m'
# BOLD_BLUE = '\033[1;34m'
# BOLD_PURPLE = '\033[1;35m'
# BOLD_GRAY = '\033[1;30m'
#
# CYCLE = [YELLOW, GREEN, ORANGE, RASPBERRY, DARK_RED, CYAN, RED, DARK_GREEN, MAGENTA, DARK_PURPLE]
. Output only the next line. | print colors.GOLD + "[Subscriber Started] Queue --> [" + key + "] for input [" + metric_id + "]" + colors.ENDC |
Next line prediction: <|code_start|>
class Connector(object):
__metaclass__ = ABCMeta
def __init__(self, publishers, buffer_size, step_size, device_name, device_port, device_mac=None):
<|code_end|>
. Use current file imports:
(from abc import ABCMeta, abstractmethod
from lib.devices import get_metrics_names)
and context including class names, function names, or small code snippets from other files:
# Path: lib/devices.py
# def get_metrics_names(device_type):
# """
# Get metric names for a specific device type.
# :return: list of metric names
# """
# metadata = [metadata for metadata in DEVICE_METADATA if metadata['device_name'] == device_type]
#
# if len(metadata) > 0:
# metrics = metadata[0]['metrics']
# else:
# raise _DeviceNameNotFound("Could not find device name '%s' in metadata" % device_type)
#
# metric_names = []
# for metric in metrics:
# metric_names.append(metric['metric_name'])
#
# return metric_names
. Output only the next line. | self.metrics = get_metrics_names(device_name) |
Given the following code snippet before the placeholder: <|code_start|> def disconnect(self):
self.connection.close_file()
def consume_messages(self, callback):
while True:
line = self.pipe.readline()
if line == '':
return # EOF
## TODO: figure out what ch, method, and properties are
data = json.loads(line)
body = data['body']
callback(None, None, None, json.dumps(body))
def get_one_message(self):
line = self.pipe.readline()
return json.loads(line)
def _print_message(ch, method, properties, body):
print body
if __name__ == "__main__":
device_id = "test"
device_name = "muse"
host = RABBITMQ_ADDRESS
buffer_size = 100
<|code_end|>
, predict the next line using imports from the current file:
import json
import sys
from lib.SubscriberInterface import Subscriber
from lib.devices import get_metrics_names, RABBITMQ_ADDRESS
and context including class names, function names, and sometimes code from other files:
# Path: lib/SubscriberInterface.py
# class Subscriber(object):
# __metaclass__ = ABCMeta
#
# def __init__(self, device_name, device_id, host):
# """
#
# :return:
# """
# self.device_name = device_name
# self.device_id = device_id
# self.host = host
#
# @abstractmethod
# def consume_messages(self, callback):
# #TODO: write doc
# """
#
# :return:
# """
# @abstractmethod
# def connect(self):
# #TODO: write doc
# """
#
# :return:
# """
#
# @abstractmethod
# def disconnect(self):
# #TODO: write doc
# """
#
# :return:
# """
#
# Path: lib/devices.py
# def get_metrics_names(device_type):
# """
# Get metric names for a specific device type.
# :return: list of metric names
# """
# metadata = [metadata for metadata in DEVICE_METADATA if metadata['device_name'] == device_type]
#
# if len(metadata) > 0:
# metrics = metadata[0]['metrics']
# else:
# raise _DeviceNameNotFound("Could not find device name '%s' in metadata" % device_type)
#
# metric_names = []
# for metric in metrics:
# metric_names.append(metric['metric_name'])
#
# return metric_names
#
# RABBITMQ_ADDRESS = 'localhost'
. Output only the next line. | metric_names = get_metrics_names(device_name) |
Based on the snippet: <|code_start|> else:
self.pipe = open(self.pipe_name, 'r')
def disconnect(self):
self.connection.close_file()
def consume_messages(self, callback):
while True:
line = self.pipe.readline()
if line == '':
return # EOF
## TODO: figure out what ch, method, and properties are
data = json.loads(line)
body = data['body']
callback(None, None, None, json.dumps(body))
def get_one_message(self):
line = self.pipe.readline()
return json.loads(line)
def _print_message(ch, method, properties, body):
print body
if __name__ == "__main__":
device_id = "test"
device_name = "muse"
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import sys
from lib.SubscriberInterface import Subscriber
from lib.devices import get_metrics_names, RABBITMQ_ADDRESS
and context (classes, functions, sometimes code) from other files:
# Path: lib/SubscriberInterface.py
# class Subscriber(object):
# __metaclass__ = ABCMeta
#
# def __init__(self, device_name, device_id, host):
# """
#
# :return:
# """
# self.device_name = device_name
# self.device_id = device_id
# self.host = host
#
# @abstractmethod
# def consume_messages(self, callback):
# #TODO: write doc
# """
#
# :return:
# """
# @abstractmethod
# def connect(self):
# #TODO: write doc
# """
#
# :return:
# """
#
# @abstractmethod
# def disconnect(self):
# #TODO: write doc
# """
#
# :return:
# """
#
# Path: lib/devices.py
# def get_metrics_names(device_type):
# """
# Get metric names for a specific device type.
# :return: list of metric names
# """
# metadata = [metadata for metadata in DEVICE_METADATA if metadata['device_name'] == device_type]
#
# if len(metadata) > 0:
# metrics = metadata[0]['metrics']
# else:
# raise _DeviceNameNotFound("Could not find device name '%s' in metadata" % device_type)
#
# metric_names = []
# for metric in metrics:
# metric_names.append(metric['metric_name'])
#
# return metric_names
#
# RABBITMQ_ADDRESS = 'localhost'
. Output only the next line. | host = RABBITMQ_ADDRESS |
Predict the next line after this snippet: <|code_start|> device_id,
cloudbrain_address,
buffer_size, step_size,
device_port,
pipe_name,
publisher,
device_mac)
def run(device_name="muse",
mock_data_enabled=True,
device_id=MOCK_DEVICE_ID,
cloudbrain_address=RABBITMQ_ADDRESS,
buffer_size=10, step_size=10,
device_port=None,
pipe_name=None,
publisher_type="pika",
device_mac=None):
if device_name == "muse" and not mock_data_enabled:
if not device_port:
device_port = 9090
elif device_name in ["openbci","openbci16"] and not mock_data_enabled:
else:
raise ValueError("Device type '%s' not supported. "
"Supported devices are:%s" % (device_name, _SUPPORTED_DEVICES))
metrics = get_metrics_names(device_name)
if publisher_type == "pika":
<|code_end|>
using the current file's imports:
import argparse
from lib.PikaPublisher import PikaPublisher
from lib.PipePublisher import PipePublisher
from lib.devices import get_metrics_names, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
from cloudbrain.connectors.MuseConnector import MuseConnector as Connector
from lib.connectors.OpenBCIConnector import OpenBCIConnector as Connector
and any relevant context from other files:
# Path: lib/PikaPublisher.py
# class PikaPublisher(Publisher):
# """
# Publisher implementation for RabbitMQ via the Pika client
# """
#
# def __init__(self, device_name, device_id, rabbitmq_address, metric_name):
# super(PikaPublisher, self).__init__(device_name, device_id, rabbitmq_address)
# self.connection = None
# self.channel = None
# self.metric_name = metric_name
#
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.basic_publish(exchange=key,
# routing_key=key,
# body=json.dumps(buffer_content),
# properties=pika.BasicProperties(
# delivery_mode=2, # this makes the message persistent
# ))
#
# def connect(self):
# credentials = pika.PlainCredentials('cloudbrain', 'cloudbrain')
#
# self.connection = pika.BlockingConnection(pika.ConnectionParameters(
# host=self.host, credentials=credentials))
# self.channel = self.connection.channel()
#
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.exchange_declare(exchange=key,
# type='direct')
#
# def disconnect(self):
# self.connection.close_file()
#
# Path: lib/PipePublisher.py
# class PipePublisher(Publisher):
# """
# Publisher implementation for writing data to pipe
# """
# PIPE_WRITING_LOCKS = dict()
#
# def __init__(self, device_name, device_id, metric_name, pipe_name=None):
# super(PipePublisher, self).__init__(device_name, device_id, None)
# self.metric_name = metric_name
# self.pipe_name = pipe_name
#
#
# def get_lock(self):
# lock = PipePublisher.PIPE_WRITING_LOCKS.get(self.pipe_name, None)
# if lock is None:
# lock = Lock()
# PipePublisher.PIPE_WRITING_LOCKS[self.pipe_name] = lock
# return lock
#
# def lock(self):
# lock = self.get_lock()
# lock.acquire(True)
#
# def unlock(self):
# lock = self.get_lock()
# lock.release()
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# out = {"key": key, 'body': buffer_content}
# to_write = json.dumps(out)
#
# self.lock()
#
# self.pipe.write(to_write)
# self.pipe.write("\n")
# self.pipe.flush()
#
# self.unlock()
#
# def connect(self):
# self.lock()
#
# if self.pipe_name is None:
# self.pipe = sys.stdout
# else:
# if os.path.exists(self.pipe_name) and not stat.S_ISFIFO(os.stat(self.pipe_name).st_mode):
# raise Exception("File '%s' exists and is not a named pipe." % self.pipe_name)
# elif not os.path.exists(self.pipe_name):
# os.mkfifo(self.pipe_name)
# self.pipe = open(self.pipe_name, 'a')
#
# self.unlock()
#
# def disconnect(self):
# if self.pipe_name is not None:
# self.pipe.close()
# os.remove(self.pipe_name)
#
# Path: lib/devices.py
# def get_metrics_names(device_type):
# """
# Get metric names for a specific device type.
# :return: list of metric names
# """
# metadata = [metadata for metadata in DEVICE_METADATA if metadata['device_name'] == device_type]
#
# if len(metadata) > 0:
# metrics = metadata[0]['metrics']
# else:
# raise _DeviceNameNotFound("Could not find device name '%s' in metadata" % device_type)
#
# metric_names = []
# for metric in metrics:
# metric_names.append(metric['metric_name'])
#
# return metric_names
#
# def get_supported_devices():
# return [device['device_name'] for device in DEVICE_METADATA]
#
# RABBITMQ_ADDRESS = 'localhost'
#
# MOCK_DEVICE_ID = "mock"
. Output only the next line. | publishers = {metric: PikaPublisher(device_name, |
Predict the next line after this snippet: <|code_start|> publisher,
device_mac)
def run(device_name="muse",
mock_data_enabled=True,
device_id=MOCK_DEVICE_ID,
cloudbrain_address=RABBITMQ_ADDRESS,
buffer_size=10, step_size=10,
device_port=None,
pipe_name=None,
publisher_type="pika",
device_mac=None):
if device_name == "muse" and not mock_data_enabled:
if not device_port:
device_port = 9090
elif device_name in ["openbci","openbci16"] and not mock_data_enabled:
else:
raise ValueError("Device type '%s' not supported. "
"Supported devices are:%s" % (device_name, _SUPPORTED_DEVICES))
metrics = get_metrics_names(device_name)
if publisher_type == "pika":
publishers = {metric: PikaPublisher(device_name,
device_id,
cloudbrain_address,
metric) for metric in metrics}
elif publisher_type == "pipe":
<|code_end|>
using the current file's imports:
import argparse
from lib.PikaPublisher import PikaPublisher
from lib.PipePublisher import PipePublisher
from lib.devices import get_metrics_names, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
from cloudbrain.connectors.MuseConnector import MuseConnector as Connector
from lib.connectors.OpenBCIConnector import OpenBCIConnector as Connector
and any relevant context from other files:
# Path: lib/PikaPublisher.py
# class PikaPublisher(Publisher):
# """
# Publisher implementation for RabbitMQ via the Pika client
# """
#
# def __init__(self, device_name, device_id, rabbitmq_address, metric_name):
# super(PikaPublisher, self).__init__(device_name, device_id, rabbitmq_address)
# self.connection = None
# self.channel = None
# self.metric_name = metric_name
#
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.basic_publish(exchange=key,
# routing_key=key,
# body=json.dumps(buffer_content),
# properties=pika.BasicProperties(
# delivery_mode=2, # this makes the message persistent
# ))
#
# def connect(self):
# credentials = pika.PlainCredentials('cloudbrain', 'cloudbrain')
#
# self.connection = pika.BlockingConnection(pika.ConnectionParameters(
# host=self.host, credentials=credentials))
# self.channel = self.connection.channel()
#
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.exchange_declare(exchange=key,
# type='direct')
#
# def disconnect(self):
# self.connection.close_file()
#
# Path: lib/PipePublisher.py
# class PipePublisher(Publisher):
# """
# Publisher implementation for writing data to pipe
# """
# PIPE_WRITING_LOCKS = dict()
#
# def __init__(self, device_name, device_id, metric_name, pipe_name=None):
# super(PipePublisher, self).__init__(device_name, device_id, None)
# self.metric_name = metric_name
# self.pipe_name = pipe_name
#
#
# def get_lock(self):
# lock = PipePublisher.PIPE_WRITING_LOCKS.get(self.pipe_name, None)
# if lock is None:
# lock = Lock()
# PipePublisher.PIPE_WRITING_LOCKS[self.pipe_name] = lock
# return lock
#
# def lock(self):
# lock = self.get_lock()
# lock.acquire(True)
#
# def unlock(self):
# lock = self.get_lock()
# lock.release()
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# out = {"key": key, 'body': buffer_content}
# to_write = json.dumps(out)
#
# self.lock()
#
# self.pipe.write(to_write)
# self.pipe.write("\n")
# self.pipe.flush()
#
# self.unlock()
#
# def connect(self):
# self.lock()
#
# if self.pipe_name is None:
# self.pipe = sys.stdout
# else:
# if os.path.exists(self.pipe_name) and not stat.S_ISFIFO(os.stat(self.pipe_name).st_mode):
# raise Exception("File '%s' exists and is not a named pipe." % self.pipe_name)
# elif not os.path.exists(self.pipe_name):
# os.mkfifo(self.pipe_name)
# self.pipe = open(self.pipe_name, 'a')
#
# self.unlock()
#
# def disconnect(self):
# if self.pipe_name is not None:
# self.pipe.close()
# os.remove(self.pipe_name)
#
# Path: lib/devices.py
# def get_metrics_names(device_type):
# """
# Get metric names for a specific device type.
# :return: list of metric names
# """
# metadata = [metadata for metadata in DEVICE_METADATA if metadata['device_name'] == device_type]
#
# if len(metadata) > 0:
# metrics = metadata[0]['metrics']
# else:
# raise _DeviceNameNotFound("Could not find device name '%s' in metadata" % device_type)
#
# metric_names = []
# for metric in metrics:
# metric_names.append(metric['metric_name'])
#
# return metric_names
#
# def get_supported_devices():
# return [device['device_name'] for device in DEVICE_METADATA]
#
# RABBITMQ_ADDRESS = 'localhost'
#
# MOCK_DEVICE_ID = "mock"
. Output only the next line. | publishers = {metric: PipePublisher(device_name, |
Predict the next line for this snippet: <|code_start|>
run(device_name,
mock_data_enabled,
device_id,
cloudbrain_address,
buffer_size, step_size,
device_port,
pipe_name,
publisher,
device_mac)
def run(device_name="muse",
mock_data_enabled=True,
device_id=MOCK_DEVICE_ID,
cloudbrain_address=RABBITMQ_ADDRESS,
buffer_size=10, step_size=10,
device_port=None,
pipe_name=None,
publisher_type="pika",
device_mac=None):
if device_name == "muse" and not mock_data_enabled:
if not device_port:
device_port = 9090
elif device_name in ["openbci","openbci16"] and not mock_data_enabled:
else:
raise ValueError("Device type '%s' not supported. "
"Supported devices are:%s" % (device_name, _SUPPORTED_DEVICES))
<|code_end|>
with the help of current file imports:
import argparse
from lib.PikaPublisher import PikaPublisher
from lib.PipePublisher import PipePublisher
from lib.devices import get_metrics_names, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
from cloudbrain.connectors.MuseConnector import MuseConnector as Connector
from lib.connectors.OpenBCIConnector import OpenBCIConnector as Connector
and context from other files:
# Path: lib/PikaPublisher.py
# class PikaPublisher(Publisher):
# """
# Publisher implementation for RabbitMQ via the Pika client
# """
#
# def __init__(self, device_name, device_id, rabbitmq_address, metric_name):
# super(PikaPublisher, self).__init__(device_name, device_id, rabbitmq_address)
# self.connection = None
# self.channel = None
# self.metric_name = metric_name
#
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.basic_publish(exchange=key,
# routing_key=key,
# body=json.dumps(buffer_content),
# properties=pika.BasicProperties(
# delivery_mode=2, # this makes the message persistent
# ))
#
# def connect(self):
# credentials = pika.PlainCredentials('cloudbrain', 'cloudbrain')
#
# self.connection = pika.BlockingConnection(pika.ConnectionParameters(
# host=self.host, credentials=credentials))
# self.channel = self.connection.channel()
#
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.exchange_declare(exchange=key,
# type='direct')
#
# def disconnect(self):
# self.connection.close_file()
#
# Path: lib/PipePublisher.py
# class PipePublisher(Publisher):
# """
# Publisher implementation for writing data to pipe
# """
# PIPE_WRITING_LOCKS = dict()
#
# def __init__(self, device_name, device_id, metric_name, pipe_name=None):
# super(PipePublisher, self).__init__(device_name, device_id, None)
# self.metric_name = metric_name
# self.pipe_name = pipe_name
#
#
# def get_lock(self):
# lock = PipePublisher.PIPE_WRITING_LOCKS.get(self.pipe_name, None)
# if lock is None:
# lock = Lock()
# PipePublisher.PIPE_WRITING_LOCKS[self.pipe_name] = lock
# return lock
#
# def lock(self):
# lock = self.get_lock()
# lock.acquire(True)
#
# def unlock(self):
# lock = self.get_lock()
# lock.release()
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# out = {"key": key, 'body': buffer_content}
# to_write = json.dumps(out)
#
# self.lock()
#
# self.pipe.write(to_write)
# self.pipe.write("\n")
# self.pipe.flush()
#
# self.unlock()
#
# def connect(self):
# self.lock()
#
# if self.pipe_name is None:
# self.pipe = sys.stdout
# else:
# if os.path.exists(self.pipe_name) and not stat.S_ISFIFO(os.stat(self.pipe_name).st_mode):
# raise Exception("File '%s' exists and is not a named pipe." % self.pipe_name)
# elif not os.path.exists(self.pipe_name):
# os.mkfifo(self.pipe_name)
# self.pipe = open(self.pipe_name, 'a')
#
# self.unlock()
#
# def disconnect(self):
# if self.pipe_name is not None:
# self.pipe.close()
# os.remove(self.pipe_name)
#
# Path: lib/devices.py
# def get_metrics_names(device_type):
# """
# Get metric names for a specific device type.
# :return: list of metric names
# """
# metadata = [metadata for metadata in DEVICE_METADATA if metadata['device_name'] == device_type]
#
# if len(metadata) > 0:
# metrics = metadata[0]['metrics']
# else:
# raise _DeviceNameNotFound("Could not find device name '%s' in metadata" % device_type)
#
# metric_names = []
# for metric in metrics:
# metric_names.append(metric['metric_name'])
#
# return metric_names
#
# def get_supported_devices():
# return [device['device_name'] for device in DEVICE_METADATA]
#
# RABBITMQ_ADDRESS = 'localhost'
#
# MOCK_DEVICE_ID = "mock"
, which may contain function names, class names, or code. Output only the next line. | metrics = get_metrics_names(device_name) |
Next line prediction: <|code_start|>
def validate_opts(opts):
"""
validate that we've got the right options
@param opts: (list) options to validate
@retun opts_valid: (bool) 1 if opts are valid. 0 otherwise.
"""
opts_valid = True
if (opts.device_name in ["openbci","openbci_daisy"]) and (opts.device_port is None):
opts_valid = False
return opts_valid
def get_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--device_id', required=True,
help="A unique ID to identify the device you are sending data from. "
"For example: 'octopicorn2016'")
parser.add_argument('-m', '--mock', action='store_true', required=False,
help="Use this flag to generate mock data for a "
"supported device name %s" % _SUPPORTED_DEVICES)
parser.add_argument('-n', '--device_name', required=True,
help="The name of the device your are sending data from. "
"Supported devices are: %s" % _SUPPORTED_DEVICES)
<|code_end|>
. Use current file imports:
(import argparse
from lib.PikaPublisher import PikaPublisher
from lib.PipePublisher import PipePublisher
from lib.devices import get_metrics_names, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
from cloudbrain.connectors.MuseConnector import MuseConnector as Connector
from lib.connectors.OpenBCIConnector import OpenBCIConnector as Connector)
and context including class names, function names, or small code snippets from other files:
# Path: lib/PikaPublisher.py
# class PikaPublisher(Publisher):
# """
# Publisher implementation for RabbitMQ via the Pika client
# """
#
# def __init__(self, device_name, device_id, rabbitmq_address, metric_name):
# super(PikaPublisher, self).__init__(device_name, device_id, rabbitmq_address)
# self.connection = None
# self.channel = None
# self.metric_name = metric_name
#
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.basic_publish(exchange=key,
# routing_key=key,
# body=json.dumps(buffer_content),
# properties=pika.BasicProperties(
# delivery_mode=2, # this makes the message persistent
# ))
#
# def connect(self):
# credentials = pika.PlainCredentials('cloudbrain', 'cloudbrain')
#
# self.connection = pika.BlockingConnection(pika.ConnectionParameters(
# host=self.host, credentials=credentials))
# self.channel = self.connection.channel()
#
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.exchange_declare(exchange=key,
# type='direct')
#
# def disconnect(self):
# self.connection.close_file()
#
# Path: lib/PipePublisher.py
# class PipePublisher(Publisher):
# """
# Publisher implementation for writing data to pipe
# """
# PIPE_WRITING_LOCKS = dict()
#
# def __init__(self, device_name, device_id, metric_name, pipe_name=None):
# super(PipePublisher, self).__init__(device_name, device_id, None)
# self.metric_name = metric_name
# self.pipe_name = pipe_name
#
#
# def get_lock(self):
# lock = PipePublisher.PIPE_WRITING_LOCKS.get(self.pipe_name, None)
# if lock is None:
# lock = Lock()
# PipePublisher.PIPE_WRITING_LOCKS[self.pipe_name] = lock
# return lock
#
# def lock(self):
# lock = self.get_lock()
# lock.acquire(True)
#
# def unlock(self):
# lock = self.get_lock()
# lock.release()
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# out = {"key": key, 'body': buffer_content}
# to_write = json.dumps(out)
#
# self.lock()
#
# self.pipe.write(to_write)
# self.pipe.write("\n")
# self.pipe.flush()
#
# self.unlock()
#
# def connect(self):
# self.lock()
#
# if self.pipe_name is None:
# self.pipe = sys.stdout
# else:
# if os.path.exists(self.pipe_name) and not stat.S_ISFIFO(os.stat(self.pipe_name).st_mode):
# raise Exception("File '%s' exists and is not a named pipe." % self.pipe_name)
# elif not os.path.exists(self.pipe_name):
# os.mkfifo(self.pipe_name)
# self.pipe = open(self.pipe_name, 'a')
#
# self.unlock()
#
# def disconnect(self):
# if self.pipe_name is not None:
# self.pipe.close()
# os.remove(self.pipe_name)
#
# Path: lib/devices.py
# def get_metrics_names(device_type):
# """
# Get metric names for a specific device type.
# :return: list of metric names
# """
# metadata = [metadata for metadata in DEVICE_METADATA if metadata['device_name'] == device_type]
#
# if len(metadata) > 0:
# metrics = metadata[0]['metrics']
# else:
# raise _DeviceNameNotFound("Could not find device name '%s' in metadata" % device_type)
#
# metric_names = []
# for metric in metrics:
# metric_names.append(metric['metric_name'])
#
# return metric_names
#
# def get_supported_devices():
# return [device['device_name'] for device in DEVICE_METADATA]
#
# RABBITMQ_ADDRESS = 'localhost'
#
# MOCK_DEVICE_ID = "mock"
. Output only the next line. | parser.add_argument('-c', '--cloudbrain', default=RABBITMQ_ADDRESS, |
Here is a snippet: <|code_start|> return opts
def main():
opts = get_opts()
mock_data_enabled = opts.mock
device_name = opts.device_name
device_id = opts.device_id
cloudbrain_address = opts.cloudbrain
buffer_size = int(opts.buffer_size)
device_port = opts.device_port
pipe_name = opts.output
publisher = opts.publisher
device_mac = opts.device_mac
step_size = buffer_size
if opts.step_size is not None:
step_size = int(opts.step_size)
run(device_name,
mock_data_enabled,
device_id,
cloudbrain_address,
buffer_size, step_size,
device_port,
pipe_name,
publisher,
device_mac)
def run(device_name="muse",
mock_data_enabled=True,
<|code_end|>
. Write the next line using the current file imports:
import argparse
from lib.PikaPublisher import PikaPublisher
from lib.PipePublisher import PipePublisher
from lib.devices import get_metrics_names, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
from cloudbrain.connectors.MuseConnector import MuseConnector as Connector
from lib.connectors.OpenBCIConnector import OpenBCIConnector as Connector
and context from other files:
# Path: lib/PikaPublisher.py
# class PikaPublisher(Publisher):
# """
# Publisher implementation for RabbitMQ via the Pika client
# """
#
# def __init__(self, device_name, device_id, rabbitmq_address, metric_name):
# super(PikaPublisher, self).__init__(device_name, device_id, rabbitmq_address)
# self.connection = None
# self.channel = None
# self.metric_name = metric_name
#
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.basic_publish(exchange=key,
# routing_key=key,
# body=json.dumps(buffer_content),
# properties=pika.BasicProperties(
# delivery_mode=2, # this makes the message persistent
# ))
#
# def connect(self):
# credentials = pika.PlainCredentials('cloudbrain', 'cloudbrain')
#
# self.connection = pika.BlockingConnection(pika.ConnectionParameters(
# host=self.host, credentials=credentials))
# self.channel = self.connection.channel()
#
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# self.channel.exchange_declare(exchange=key,
# type='direct')
#
# def disconnect(self):
# self.connection.close_file()
#
# Path: lib/PipePublisher.py
# class PipePublisher(Publisher):
# """
# Publisher implementation for writing data to pipe
# """
# PIPE_WRITING_LOCKS = dict()
#
# def __init__(self, device_name, device_id, metric_name, pipe_name=None):
# super(PipePublisher, self).__init__(device_name, device_id, None)
# self.metric_name = metric_name
# self.pipe_name = pipe_name
#
#
# def get_lock(self):
# lock = PipePublisher.PIPE_WRITING_LOCKS.get(self.pipe_name, None)
# if lock is None:
# lock = Lock()
# PipePublisher.PIPE_WRITING_LOCKS[self.pipe_name] = lock
# return lock
#
# def lock(self):
# lock = self.get_lock()
# lock.acquire(True)
#
# def unlock(self):
# lock = self.get_lock()
# lock.release()
#
# def publish(self, buffer_content):
# key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
# out = {"key": key, 'body': buffer_content}
# to_write = json.dumps(out)
#
# self.lock()
#
# self.pipe.write(to_write)
# self.pipe.write("\n")
# self.pipe.flush()
#
# self.unlock()
#
# def connect(self):
# self.lock()
#
# if self.pipe_name is None:
# self.pipe = sys.stdout
# else:
# if os.path.exists(self.pipe_name) and not stat.S_ISFIFO(os.stat(self.pipe_name).st_mode):
# raise Exception("File '%s' exists and is not a named pipe." % self.pipe_name)
# elif not os.path.exists(self.pipe_name):
# os.mkfifo(self.pipe_name)
# self.pipe = open(self.pipe_name, 'a')
#
# self.unlock()
#
# def disconnect(self):
# if self.pipe_name is not None:
# self.pipe.close()
# os.remove(self.pipe_name)
#
# Path: lib/devices.py
# def get_metrics_names(device_type):
# """
# Get metric names for a specific device type.
# :return: list of metric names
# """
# metadata = [metadata for metadata in DEVICE_METADATA if metadata['device_name'] == device_type]
#
# if len(metadata) > 0:
# metrics = metadata[0]['metrics']
# else:
# raise _DeviceNameNotFound("Could not find device name '%s' in metadata" % device_type)
#
# metric_names = []
# for metric in metrics:
# metric_names.append(metric['metric_name'])
#
# return metric_names
#
# def get_supported_devices():
# return [device['device_name'] for device in DEVICE_METADATA]
#
# RABBITMQ_ADDRESS = 'localhost'
#
# MOCK_DEVICE_ID = "mock"
, which may include functions, classes, or code. Output only the next line. | device_id=MOCK_DEVICE_ID, |
Next line prediction: <|code_start|>
.. code-block:: jinja
{{ my_dict|filter_items }}
{{ my_dict|filter_items("MY_PREFIX_") }}
{{ my_dict|filter_items("MY_PREFIX_", True) }}
This is most useful in combination with the special
:ref:`_all_env <all_env>` variable that shpkpr injects into every template.
For example, to iterate over only the template variables that start with
``LABEL_`` you could do:
.. code-block:: jinja
{% for k, v in _all_env|filter_items("LABEL_", strip_prefix=True) %}
"{{k}}": "{{v}}",
{% endfor %}
"""
if startswith is not None:
value = [x for x in value.items() if x[0].startswith(startswith)]
else:
value = value.items()
if startswith is not None and strip_prefix:
value = [(x[0].replace(startswith, "", 1), x[1]) for x in value]
return value
<|code_end|>
. Use current file imports:
(from shpkpr import exceptions
import slugify as libslugify)
and context including class names, function names, or small code snippets from other files:
# Path: shpkpr/exceptions.py
# class ShpkprException(ClickException):
# def _args_from_exception(exception):
# def rewrap(exceptions_to_catch, exception_to_rewrap_with=ShpkprException):
# def real_decorator(function):
# def wrapper(*args, **kwargs):
. Output only the next line. | class IntegerRequired(exceptions.ShpkprException): |
Given the following code snippet before the placeholder: <|code_start|># stdlib imports
# third-party imports
# local imports
@pytest.fixture
def runner():
runner = CliRunner()
<|code_end|>
, predict the next line using imports from the current file:
import functools
import pytest
from click.testing import CliRunner
from shpkpr.cli.entrypoint import cli
and context including class names, function names, and sometimes code from other files:
# Path: shpkpr/cli/entrypoint.py
# @click.command(cls=ShpkprCLI, context_settings=CONTEXT_SETTINGS)
# def cli():
# """A tool to manage applications running on Marathon."""
# logger.configure()
. Output only the next line. | return functools.partial(runner.invoke, cli) |
Next line prediction: <|code_start|>def render_json_template(template_path, template_name, **values):
"""Initialise a jinja2 template and render it with the passed-in values.
The template, once rendered is treated as JSON and converted into a python
dictionary. If the template is not valid JSON after rendering then an
exception will be raised.
If a template defines a placeholder for a variable that is not included in
`values` an `UndefinedError` will be raised.
``template_path`` should be the base directory in which your templates are
stored.
``template_name`` is the name (path) of the template being used to render.
``values`` should be regular keyword arguments to the function which will
be passed to the template at render time.
"""
# shpkpr ships with a number of built-in templates for each deployment type,
# so we need to tell jinja where to look for them
here = os.path.dirname(os.path.abspath(__file__))
built_in_template_path = os.path.join(here, "resources", "templates")
# build a new Jinja2 environment so we can inject some custom filters into
# the template we're rendering.
template_env = jinja2.Environment(
undefined=jinja2.StrictUndefined,
loader=jinja2.FileSystemLoader([
built_in_template_path,
template_path,
]),
)
<|code_end|>
. Use current file imports:
(import json
import os
import jinja2
from shpkpr import exceptions
from shpkpr import template_filters)
and context including class names, function names, or small code snippets from other files:
# Path: shpkpr/exceptions.py
# class ShpkprException(ClickException):
# def _args_from_exception(exception):
# def rewrap(exceptions_to_catch, exception_to_rewrap_with=ShpkprException):
# def real_decorator(function):
# def wrapper(*args, **kwargs):
#
# Path: shpkpr/template_filters.py
# def filter_items(value, startswith=None, strip_prefix=False):
# def require_int(value, min=None, max=None):
# def require_float(value, min=None, max=None):
# def slugify(value, *args, **kwargs):
# class IntegerRequired(exceptions.ShpkprException):
# class IntegerTooSmall(exceptions.ShpkprException):
# class IntegerTooLarge(exceptions.ShpkprException):
# class FloatRequired(exceptions.ShpkprException):
# class FloatTooSmall(exceptions.ShpkprException):
# class FloatTooLarge(exceptions.ShpkprException):
. Output only the next line. | template_env.filters['filter_items'] = template_filters.filter_items |
Next line prediction: <|code_start|># third-party imports
# local imports
def _write_template_to_disk(tmpdir, template_name, template_data):
"""shpkpr loads template files from disk normally. This convenience
function writes a template file to disk and returns a (directory, name)
tuple.
"""
with tmpdir.join(template_name).open("w") as f:
f.write(template_data)
return (tmpdir.strpath, template_name)
def test_load_environment_vars_without_prefix(monkeypatch):
monkeypatch.setenv('BANANA', 'bread')
monkeypatch.setenv('STRAWBERRY', 'cheesecake')
monkeypatch.setenv('APPLE_AND_BLACKCURRANT', 'crumble')
<|code_end|>
. Use current file imports:
(import pytest
from shpkpr.template import InvalidJSONError
from shpkpr.template import MissingTemplateError
from shpkpr.template import UndefinedError
from shpkpr.template import load_values_from_environment
from shpkpr.template import render_json_template
from shpkpr.template_filters import IntegerRequired
from shpkpr.template_filters import IntegerTooLarge
from shpkpr.template_filters import IntegerTooSmall
from shpkpr.template_filters import FloatRequired
from shpkpr.template_filters import FloatTooLarge
from shpkpr.template_filters import FloatTooSmall)
and context including class names, function names, or small code snippets from other files:
# Path: shpkpr/template.py
# class InvalidJSONError(exceptions.ShpkprException):
# """Raised when a template can be rendered successfully but does not parse
# as valid JSON afterwards.
# """
# exit_code = 2
#
# def format_message(self):
# return 'Unable to parse rendered template as JSON, check variables'
#
# Path: shpkpr/template.py
# class MissingTemplateError(exceptions.ShpkprException):
# """Raised when a template cannot be loaded for any reason.
# """
# exit_code = 2
#
# def format_message(self):
# return 'Unable to load template from disk: %s' % self.message
#
# Path: shpkpr/template.py
# class UndefinedError(exceptions.ShpkprException):
# """Raised when a template contains a placeholder for a variable that
# wasn't included in the context dictionary passed in at render time.
# """
# exit_code = 2
#
# def format_message(self):
# return 'Unable to render template: %s' % self.message
#
# Path: shpkpr/template.py
# def load_values_from_environment(prefix="", overrides=None):
# """Reads values from the environment.
#
# If ``prefix`` is a non-empty string, only environment variables with the
# given prefix will be returned. The prefix, if given, will be stripped from
# any returned keys.
#
# If ``overrides`` is a dict-like object, the key/value pairs it contains
# will be added to the returned dictionary. Any values specified by
# overrides will take precedence over values pulled from the environment
# where the key names clash.
# """
# values = {}
#
# # add a trailing underscore to the prefix if there isn't one
# prefix = prefix + "_" if prefix and not prefix.endswith("_") else prefix
#
# # load values from the environment
# for k, v in os.environ.items():
# if k.startswith(prefix):
# values[k.replace(prefix, "", 1)] = v
#
# # add override values if any passed in
# try:
# for k, v in overrides.items():
# values[k] = v
# except AttributeError:
# pass
#
# return values
#
# Path: shpkpr/template.py
# @exceptions.rewrap(ValueError, InvalidJSONError)
# @exceptions.rewrap(jinja2.UndefinedError, UndefinedError)
# @exceptions.rewrap(jinja2.TemplateNotFound, MissingTemplateError)
# def render_json_template(template_path, template_name, **values):
# """Initialise a jinja2 template and render it with the passed-in values.
#
# The template, once rendered is treated as JSON and converted into a python
# dictionary. If the template is not valid JSON after rendering then an
# exception will be raised.
#
# If a template defines a placeholder for a variable that is not included in
# `values` an `UndefinedError` will be raised.
#
# ``template_path`` should be the base directory in which your templates are
# stored.
# ``template_name`` is the name (path) of the template being used to render.
# ``values`` should be regular keyword arguments to the function which will
# be passed to the template at render time.
# """
# # shpkpr ships with a number of built-in templates for each deployment type,
# # so we need to tell jinja where to look for them
# here = os.path.dirname(os.path.abspath(__file__))
# built_in_template_path = os.path.join(here, "resources", "templates")
#
# # build a new Jinja2 environment so we can inject some custom filters into
# # the template we're rendering.
# template_env = jinja2.Environment(
# undefined=jinja2.StrictUndefined,
# loader=jinja2.FileSystemLoader([
# built_in_template_path,
# template_path,
# ]),
# )
# template_env.filters['filter_items'] = template_filters.filter_items
# template_env.filters['require_int'] = template_filters.require_int
# template_env.filters['require_float'] = template_filters.require_float
# template_env.filters['slugify'] = template_filters.slugify
#
# template = template_env.get_template(template_name)
# rendered_template = template.render(_all_env=values, **values)
# return json.loads(rendered_template)
#
# Path: shpkpr/template_filters.py
# class IntegerRequired(exceptions.ShpkprException):
# exit_code = 2
#
# Path: shpkpr/template_filters.py
# class IntegerTooLarge(exceptions.ShpkprException):
# exit_code = 2
#
# Path: shpkpr/template_filters.py
# class IntegerTooSmall(exceptions.ShpkprException):
# exit_code = 2
#
# Path: shpkpr/template_filters.py
# class FloatRequired(exceptions.ShpkprException):
# exit_code = 2
#
# Path: shpkpr/template_filters.py
# class FloatTooLarge(exceptions.ShpkprException):
# exit_code = 2
#
# Path: shpkpr/template_filters.py
# class FloatTooSmall(exceptions.ShpkprException):
# exit_code = 2
. Output only the next line. | values = load_values_from_environment() |
Based on the snippet: <|code_start|># third-party imports
# local imports
def _mock_gethostbyname_ex(aliaslist=None, ipaddrlist=None):
"""Returns a mocked socket.gethostbyname_ex function for testing use
"""
if aliaslist is None:
aliaslist = []
if ipaddrlist is None:
ipaddrlist = ['127.0.0.1']
return lambda hostname: (hostname, aliaslist, ipaddrlist)
@mock.patch('socket.gethostbyname_ex')
def test_resolve(gethostbyname_ex):
ipaddrlist = ['127.0.0.1', '127.0.0.2']
gethostbyname_ex.side_effect = _mock_gethostbyname_ex(ipaddrlist=ipaddrlist)
<|code_end|>
, predict the immediate next line with the help of imports:
import mock
from shpkpr.marathon_lb import resolver
and context (classes, functions, sometimes code) from other files:
# Path: shpkpr/marathon_lb/resolver.py
# def resolve(marathon_lb_url):
# def _get_alias_records(hostname):
# def _reassemble_urls(url, hosts):
# def _reassemble_url(url, host):
. Output only the next line. | actual_urls = resolver.resolve('http://foobar.com:1234') |
Given the code snippet: <|code_start|>@options.chronos_client
@options.job_name
@options.output_formatter
def show(chronos_client, job_name, output_formatter, **kw):
"""List application configuration.
"""
jobs = chronos_client.list()
if job_name is None:
payload = jobs
else:
payload = _find_job(jobs, job_name)
logger.info(output_formatter.format(payload))
def _inject_secrets(template, secrets):
"""Given an object containing secrets, inject them into a Chronos job prior
to deployment.
"""
if not template.get("environmentVariables"):
template["environmentVariables"] = []
for key, secret in secrets.items():
template["environmentVariables"].append({
"name": key,
"value": secret,
})
return template
@cli.command('set', short_help='Add or Update a Chronos Job', context_settings=CONTEXT_SETTINGS)
<|code_end|>
, generate the next line using the imports in this file:
import logging
import click
from shpkpr.cli import arguments, options
from shpkpr.cli.entrypoint import CONTEXT_SETTINGS
from shpkpr.template import load_values_from_environment
from shpkpr.template import render_json_template
from shpkpr.vault import resolve_secrets
and context (functions, classes, or occasionally code) from other files:
# Path: shpkpr/cli/arguments.py
# def _env_pairs_to_dict(ctx, param, value):
#
# Path: shpkpr/cli/options.py
# def _validate_chronos_version(ctx, param, value):
# def _validate_authentication(ctx, url, service_name):
# def _validate_marathon_client(ctx, _, __):
# def _validate_chronos_client(ctx, _, __):
# def _validate_vault_client(ctx, _, __):
#
# Path: shpkpr/cli/entrypoint.py
# CONTEXT_SETTINGS = dict(auto_envvar_prefix='SHPKPR')
#
# Path: shpkpr/template.py
# def load_values_from_environment(prefix="", overrides=None):
# """Reads values from the environment.
#
# If ``prefix`` is a non-empty string, only environment variables with the
# given prefix will be returned. The prefix, if given, will be stripped from
# any returned keys.
#
# If ``overrides`` is a dict-like object, the key/value pairs it contains
# will be added to the returned dictionary. Any values specified by
# overrides will take precedence over values pulled from the environment
# where the key names clash.
# """
# values = {}
#
# # add a trailing underscore to the prefix if there isn't one
# prefix = prefix + "_" if prefix and not prefix.endswith("_") else prefix
#
# # load values from the environment
# for k, v in os.environ.items():
# if k.startswith(prefix):
# values[k.replace(prefix, "", 1)] = v
#
# # add override values if any passed in
# try:
# for k, v in overrides.items():
# values[k] = v
# except AttributeError:
# pass
#
# return values
#
# Path: shpkpr/template.py
# @exceptions.rewrap(ValueError, InvalidJSONError)
# @exceptions.rewrap(jinja2.UndefinedError, UndefinedError)
# @exceptions.rewrap(jinja2.TemplateNotFound, MissingTemplateError)
# def render_json_template(template_path, template_name, **values):
# """Initialise a jinja2 template and render it with the passed-in values.
#
# The template, once rendered is treated as JSON and converted into a python
# dictionary. If the template is not valid JSON after rendering then an
# exception will be raised.
#
# If a template defines a placeholder for a variable that is not included in
# `values` an `UndefinedError` will be raised.
#
# ``template_path`` should be the base directory in which your templates are
# stored.
# ``template_name`` is the name (path) of the template being used to render.
# ``values`` should be regular keyword arguments to the function which will
# be passed to the template at render time.
# """
# # shpkpr ships with a number of built-in templates for each deployment type,
# # so we need to tell jinja where to look for them
# here = os.path.dirname(os.path.abspath(__file__))
# built_in_template_path = os.path.join(here, "resources", "templates")
#
# # build a new Jinja2 environment so we can inject some custom filters into
# # the template we're rendering.
# template_env = jinja2.Environment(
# undefined=jinja2.StrictUndefined,
# loader=jinja2.FileSystemLoader([
# built_in_template_path,
# template_path,
# ]),
# )
# template_env.filters['filter_items'] = template_filters.filter_items
# template_env.filters['require_int'] = template_filters.require_int
# template_env.filters['require_float'] = template_filters.require_float
# template_env.filters['slugify'] = template_filters.slugify
#
# template = template_env.get_template(template_name)
# rendered_template = template.render(_all_env=values, **values)
# return json.loads(rendered_template)
#
# Path: shpkpr/vault.py
# def resolve_secrets(vault_client, rendered_template):
# """Parse a rendered template, extract any secret definitions, retrieve them
# from vault and return them to the caller.
#
# This is used in situations where direct Vault support is not available e.g.
# Chronos.
# """
# resolved_secrets = {}
#
# secrets = rendered_template.get("secrets", {})
# for name, definition in secrets.items():
# # parse the secret source and retrieve from vault
# path, key = definition["source"].split(":")
# secret = vault_client.read(path)
# if secret:
# resolved_secrets[name] = secret["data"][key]
# else:
# logger.info("Couldn't locate secret in Vault: {0}".format(path))
#
# return resolved_secrets
. Output only the next line. | @arguments.env_pairs |
Continue the code snippet: <|code_start|># stdlib imports
# third-party imports
# local imports
logger = logging.getLogger(__name__)
@click.group('cron', context_settings=CONTEXT_SETTINGS)
def cli():
"""Manage Chronos jobs.
"""
@cli.command('show', short_help='List Chronos Jobs as json', context_settings=CONTEXT_SETTINGS)
<|code_end|>
. Use current file imports:
import logging
import click
from shpkpr.cli import arguments, options
from shpkpr.cli.entrypoint import CONTEXT_SETTINGS
from shpkpr.template import load_values_from_environment
from shpkpr.template import render_json_template
from shpkpr.vault import resolve_secrets
and context (classes, functions, or code) from other files:
# Path: shpkpr/cli/arguments.py
# def _env_pairs_to_dict(ctx, param, value):
#
# Path: shpkpr/cli/options.py
# def _validate_chronos_version(ctx, param, value):
# def _validate_authentication(ctx, url, service_name):
# def _validate_marathon_client(ctx, _, __):
# def _validate_chronos_client(ctx, _, __):
# def _validate_vault_client(ctx, _, __):
#
# Path: shpkpr/cli/entrypoint.py
# CONTEXT_SETTINGS = dict(auto_envvar_prefix='SHPKPR')
#
# Path: shpkpr/template.py
# def load_values_from_environment(prefix="", overrides=None):
# """Reads values from the environment.
#
# If ``prefix`` is a non-empty string, only environment variables with the
# given prefix will be returned. The prefix, if given, will be stripped from
# any returned keys.
#
# If ``overrides`` is a dict-like object, the key/value pairs it contains
# will be added to the returned dictionary. Any values specified by
# overrides will take precedence over values pulled from the environment
# where the key names clash.
# """
# values = {}
#
# # add a trailing underscore to the prefix if there isn't one
# prefix = prefix + "_" if prefix and not prefix.endswith("_") else prefix
#
# # load values from the environment
# for k, v in os.environ.items():
# if k.startswith(prefix):
# values[k.replace(prefix, "", 1)] = v
#
# # add override values if any passed in
# try:
# for k, v in overrides.items():
# values[k] = v
# except AttributeError:
# pass
#
# return values
#
# Path: shpkpr/template.py
# @exceptions.rewrap(ValueError, InvalidJSONError)
# @exceptions.rewrap(jinja2.UndefinedError, UndefinedError)
# @exceptions.rewrap(jinja2.TemplateNotFound, MissingTemplateError)
# def render_json_template(template_path, template_name, **values):
# """Initialise a jinja2 template and render it with the passed-in values.
#
# The template, once rendered is treated as JSON and converted into a python
# dictionary. If the template is not valid JSON after rendering then an
# exception will be raised.
#
# If a template defines a placeholder for a variable that is not included in
# `values` an `UndefinedError` will be raised.
#
# ``template_path`` should be the base directory in which your templates are
# stored.
# ``template_name`` is the name (path) of the template being used to render.
# ``values`` should be regular keyword arguments to the function which will
# be passed to the template at render time.
# """
# # shpkpr ships with a number of built-in templates for each deployment type,
# # so we need to tell jinja where to look for them
# here = os.path.dirname(os.path.abspath(__file__))
# built_in_template_path = os.path.join(here, "resources", "templates")
#
# # build a new Jinja2 environment so we can inject some custom filters into
# # the template we're rendering.
# template_env = jinja2.Environment(
# undefined=jinja2.StrictUndefined,
# loader=jinja2.FileSystemLoader([
# built_in_template_path,
# template_path,
# ]),
# )
# template_env.filters['filter_items'] = template_filters.filter_items
# template_env.filters['require_int'] = template_filters.require_int
# template_env.filters['require_float'] = template_filters.require_float
# template_env.filters['slugify'] = template_filters.slugify
#
# template = template_env.get_template(template_name)
# rendered_template = template.render(_all_env=values, **values)
# return json.loads(rendered_template)
#
# Path: shpkpr/vault.py
# def resolve_secrets(vault_client, rendered_template):
# """Parse a rendered template, extract any secret definitions, retrieve them
# from vault and return them to the caller.
#
# This is used in situations where direct Vault support is not available e.g.
# Chronos.
# """
# resolved_secrets = {}
#
# secrets = rendered_template.get("secrets", {})
# for name, definition in secrets.items():
# # parse the secret source and retrieve from vault
# path, key = definition["source"].split(":")
# secret = vault_client.read(path)
# if secret:
# resolved_secrets[name] = secret["data"][key]
# else:
# logger.info("Couldn't locate secret in Vault: {0}".format(path))
#
# return resolved_secrets
. Output only the next line. | @options.chronos_client |
Given snippet: <|code_start|>
def _inject_secrets(template, secrets):
"""Given an object containing secrets, inject them into a Chronos job prior
to deployment.
"""
if not template.get("environmentVariables"):
template["environmentVariables"] = []
for key, secret in secrets.items():
template["environmentVariables"].append({
"name": key,
"value": secret,
})
return template
@cli.command('set', short_help='Add or Update a Chronos Job', context_settings=CONTEXT_SETTINGS)
@arguments.env_pairs
@options.chronos_client
@options.vault_client
@options.template_names
@options.template_path
@options.env_prefix
def set(chronos_client, vault_client, template_path, template_names, env_prefix, env_pairs, **kw):
"""Add or Update a job in chronos.
"""
# use the default template if none was specified
if not template_names:
template_names = ["chronos/default/job.json.tmpl"]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import click
from shpkpr.cli import arguments, options
from shpkpr.cli.entrypoint import CONTEXT_SETTINGS
from shpkpr.template import load_values_from_environment
from shpkpr.template import render_json_template
from shpkpr.vault import resolve_secrets
and context:
# Path: shpkpr/cli/arguments.py
# def _env_pairs_to_dict(ctx, param, value):
#
# Path: shpkpr/cli/options.py
# def _validate_chronos_version(ctx, param, value):
# def _validate_authentication(ctx, url, service_name):
# def _validate_marathon_client(ctx, _, __):
# def _validate_chronos_client(ctx, _, __):
# def _validate_vault_client(ctx, _, __):
#
# Path: shpkpr/cli/entrypoint.py
# CONTEXT_SETTINGS = dict(auto_envvar_prefix='SHPKPR')
#
# Path: shpkpr/template.py
# def load_values_from_environment(prefix="", overrides=None):
# """Reads values from the environment.
#
# If ``prefix`` is a non-empty string, only environment variables with the
# given prefix will be returned. The prefix, if given, will be stripped from
# any returned keys.
#
# If ``overrides`` is a dict-like object, the key/value pairs it contains
# will be added to the returned dictionary. Any values specified by
# overrides will take precedence over values pulled from the environment
# where the key names clash.
# """
# values = {}
#
# # add a trailing underscore to the prefix if there isn't one
# prefix = prefix + "_" if prefix and not prefix.endswith("_") else prefix
#
# # load values from the environment
# for k, v in os.environ.items():
# if k.startswith(prefix):
# values[k.replace(prefix, "", 1)] = v
#
# # add override values if any passed in
# try:
# for k, v in overrides.items():
# values[k] = v
# except AttributeError:
# pass
#
# return values
#
# Path: shpkpr/template.py
# @exceptions.rewrap(ValueError, InvalidJSONError)
# @exceptions.rewrap(jinja2.UndefinedError, UndefinedError)
# @exceptions.rewrap(jinja2.TemplateNotFound, MissingTemplateError)
# def render_json_template(template_path, template_name, **values):
# """Initialise a jinja2 template and render it with the passed-in values.
#
# The template, once rendered is treated as JSON and converted into a python
# dictionary. If the template is not valid JSON after rendering then an
# exception will be raised.
#
# If a template defines a placeholder for a variable that is not included in
# `values` an `UndefinedError` will be raised.
#
# ``template_path`` should be the base directory in which your templates are
# stored.
# ``template_name`` is the name (path) of the template being used to render.
# ``values`` should be regular keyword arguments to the function which will
# be passed to the template at render time.
# """
# # shpkpr ships with a number of built-in templates for each deployment type,
# # so we need to tell jinja where to look for them
# here = os.path.dirname(os.path.abspath(__file__))
# built_in_template_path = os.path.join(here, "resources", "templates")
#
# # build a new Jinja2 environment so we can inject some custom filters into
# # the template we're rendering.
# template_env = jinja2.Environment(
# undefined=jinja2.StrictUndefined,
# loader=jinja2.FileSystemLoader([
# built_in_template_path,
# template_path,
# ]),
# )
# template_env.filters['filter_items'] = template_filters.filter_items
# template_env.filters['require_int'] = template_filters.require_int
# template_env.filters['require_float'] = template_filters.require_float
# template_env.filters['slugify'] = template_filters.slugify
#
# template = template_env.get_template(template_name)
# rendered_template = template.render(_all_env=values, **values)
# return json.loads(rendered_template)
#
# Path: shpkpr/vault.py
# def resolve_secrets(vault_client, rendered_template):
# """Parse a rendered template, extract any secret definitions, retrieve them
# from vault and return them to the caller.
#
# This is used in situations where direct Vault support is not available e.g.
# Chronos.
# """
# resolved_secrets = {}
#
# secrets = rendered_template.get("secrets", {})
# for name, definition in secrets.items():
# # parse the secret source and retrieve from vault
# path, key = definition["source"].split(":")
# secret = vault_client.read(path)
# if secret:
# resolved_secrets[name] = secret["data"][key]
# else:
# logger.info("Couldn't locate secret in Vault: {0}".format(path))
#
# return resolved_secrets
which might include code, classes, or functions. Output only the next line. | values = load_values_from_environment(prefix=env_prefix, overrides=env_pairs) |
Using the snippet: <|code_start|> to deployment.
"""
if not template.get("environmentVariables"):
template["environmentVariables"] = []
for key, secret in secrets.items():
template["environmentVariables"].append({
"name": key,
"value": secret,
})
return template
@cli.command('set', short_help='Add or Update a Chronos Job', context_settings=CONTEXT_SETTINGS)
@arguments.env_pairs
@options.chronos_client
@options.vault_client
@options.template_names
@options.template_path
@options.env_prefix
def set(chronos_client, vault_client, template_path, template_names, env_prefix, env_pairs, **kw):
"""Add or Update a job in chronos.
"""
# use the default template if none was specified
if not template_names:
template_names = ["chronos/default/job.json.tmpl"]
values = load_values_from_environment(prefix=env_prefix, overrides=env_pairs)
current_jobs = chronos_client.list()
for template_name in template_names:
<|code_end|>
, determine the next line of code. You have imports:
import logging
import click
from shpkpr.cli import arguments, options
from shpkpr.cli.entrypoint import CONTEXT_SETTINGS
from shpkpr.template import load_values_from_environment
from shpkpr.template import render_json_template
from shpkpr.vault import resolve_secrets
and context (class names, function names, or code) available:
# Path: shpkpr/cli/arguments.py
# def _env_pairs_to_dict(ctx, param, value):
#
# Path: shpkpr/cli/options.py
# def _validate_chronos_version(ctx, param, value):
# def _validate_authentication(ctx, url, service_name):
# def _validate_marathon_client(ctx, _, __):
# def _validate_chronos_client(ctx, _, __):
# def _validate_vault_client(ctx, _, __):
#
# Path: shpkpr/cli/entrypoint.py
# CONTEXT_SETTINGS = dict(auto_envvar_prefix='SHPKPR')
#
# Path: shpkpr/template.py
# def load_values_from_environment(prefix="", overrides=None):
# """Reads values from the environment.
#
# If ``prefix`` is a non-empty string, only environment variables with the
# given prefix will be returned. The prefix, if given, will be stripped from
# any returned keys.
#
# If ``overrides`` is a dict-like object, the key/value pairs it contains
# will be added to the returned dictionary. Any values specified by
# overrides will take precedence over values pulled from the environment
# where the key names clash.
# """
# values = {}
#
# # add a trailing underscore to the prefix if there isn't one
# prefix = prefix + "_" if prefix and not prefix.endswith("_") else prefix
#
# # load values from the environment
# for k, v in os.environ.items():
# if k.startswith(prefix):
# values[k.replace(prefix, "", 1)] = v
#
# # add override values if any passed in
# try:
# for k, v in overrides.items():
# values[k] = v
# except AttributeError:
# pass
#
# return values
#
# Path: shpkpr/template.py
# @exceptions.rewrap(ValueError, InvalidJSONError)
# @exceptions.rewrap(jinja2.UndefinedError, UndefinedError)
# @exceptions.rewrap(jinja2.TemplateNotFound, MissingTemplateError)
# def render_json_template(template_path, template_name, **values):
# """Initialise a jinja2 template and render it with the passed-in values.
#
# The template, once rendered is treated as JSON and converted into a python
# dictionary. If the template is not valid JSON after rendering then an
# exception will be raised.
#
# If a template defines a placeholder for a variable that is not included in
# `values` an `UndefinedError` will be raised.
#
# ``template_path`` should be the base directory in which your templates are
# stored.
# ``template_name`` is the name (path) of the template being used to render.
# ``values`` should be regular keyword arguments to the function which will
# be passed to the template at render time.
# """
# # shpkpr ships with a number of built-in templates for each deployment type,
# # so we need to tell jinja where to look for them
# here = os.path.dirname(os.path.abspath(__file__))
# built_in_template_path = os.path.join(here, "resources", "templates")
#
# # build a new Jinja2 environment so we can inject some custom filters into
# # the template we're rendering.
# template_env = jinja2.Environment(
# undefined=jinja2.StrictUndefined,
# loader=jinja2.FileSystemLoader([
# built_in_template_path,
# template_path,
# ]),
# )
# template_env.filters['filter_items'] = template_filters.filter_items
# template_env.filters['require_int'] = template_filters.require_int
# template_env.filters['require_float'] = template_filters.require_float
# template_env.filters['slugify'] = template_filters.slugify
#
# template = template_env.get_template(template_name)
# rendered_template = template.render(_all_env=values, **values)
# return json.loads(rendered_template)
#
# Path: shpkpr/vault.py
# def resolve_secrets(vault_client, rendered_template):
# """Parse a rendered template, extract any secret definitions, retrieve them
# from vault and return them to the caller.
#
# This is used in situations where direct Vault support is not available e.g.
# Chronos.
# """
# resolved_secrets = {}
#
# secrets = rendered_template.get("secrets", {})
# for name, definition in secrets.items():
# # parse the secret source and retrieve from vault
# path, key = definition["source"].split(":")
# secret = vault_client.read(path)
# if secret:
# resolved_secrets[name] = secret["data"][key]
# else:
# logger.info("Couldn't locate secret in Vault: {0}".format(path))
#
# return resolved_secrets
. Output only the next line. | rendered_template = render_json_template(template_path, template_name, **values) |
Predict the next line for this snippet: <|code_start|> """
if not template.get("environmentVariables"):
template["environmentVariables"] = []
for key, secret in secrets.items():
template["environmentVariables"].append({
"name": key,
"value": secret,
})
return template
@cli.command('set', short_help='Add or Update a Chronos Job', context_settings=CONTEXT_SETTINGS)
@arguments.env_pairs
@options.chronos_client
@options.vault_client
@options.template_names
@options.template_path
@options.env_prefix
def set(chronos_client, vault_client, template_path, template_names, env_prefix, env_pairs, **kw):
"""Add or Update a job in chronos.
"""
# use the default template if none was specified
if not template_names:
template_names = ["chronos/default/job.json.tmpl"]
values = load_values_from_environment(prefix=env_prefix, overrides=env_pairs)
current_jobs = chronos_client.list()
for template_name in template_names:
rendered_template = render_json_template(template_path, template_name, **values)
<|code_end|>
with the help of current file imports:
import logging
import click
from shpkpr.cli import arguments, options
from shpkpr.cli.entrypoint import CONTEXT_SETTINGS
from shpkpr.template import load_values_from_environment
from shpkpr.template import render_json_template
from shpkpr.vault import resolve_secrets
and context from other files:
# Path: shpkpr/cli/arguments.py
# def _env_pairs_to_dict(ctx, param, value):
#
# Path: shpkpr/cli/options.py
# def _validate_chronos_version(ctx, param, value):
# def _validate_authentication(ctx, url, service_name):
# def _validate_marathon_client(ctx, _, __):
# def _validate_chronos_client(ctx, _, __):
# def _validate_vault_client(ctx, _, __):
#
# Path: shpkpr/cli/entrypoint.py
# CONTEXT_SETTINGS = dict(auto_envvar_prefix='SHPKPR')
#
# Path: shpkpr/template.py
# def load_values_from_environment(prefix="", overrides=None):
# """Reads values from the environment.
#
# If ``prefix`` is a non-empty string, only environment variables with the
# given prefix will be returned. The prefix, if given, will be stripped from
# any returned keys.
#
# If ``overrides`` is a dict-like object, the key/value pairs it contains
# will be added to the returned dictionary. Any values specified by
# overrides will take precedence over values pulled from the environment
# where the key names clash.
# """
# values = {}
#
# # add a trailing underscore to the prefix if there isn't one
# prefix = prefix + "_" if prefix and not prefix.endswith("_") else prefix
#
# # load values from the environment
# for k, v in os.environ.items():
# if k.startswith(prefix):
# values[k.replace(prefix, "", 1)] = v
#
# # add override values if any passed in
# try:
# for k, v in overrides.items():
# values[k] = v
# except AttributeError:
# pass
#
# return values
#
# Path: shpkpr/template.py
# @exceptions.rewrap(ValueError, InvalidJSONError)
# @exceptions.rewrap(jinja2.UndefinedError, UndefinedError)
# @exceptions.rewrap(jinja2.TemplateNotFound, MissingTemplateError)
# def render_json_template(template_path, template_name, **values):
# """Initialise a jinja2 template and render it with the passed-in values.
#
# The template, once rendered is treated as JSON and converted into a python
# dictionary. If the template is not valid JSON after rendering then an
# exception will be raised.
#
# If a template defines a placeholder for a variable that is not included in
# `values` an `UndefinedError` will be raised.
#
# ``template_path`` should be the base directory in which your templates are
# stored.
# ``template_name`` is the name (path) of the template being used to render.
# ``values`` should be regular keyword arguments to the function which will
# be passed to the template at render time.
# """
# # shpkpr ships with a number of built-in templates for each deployment type,
# # so we need to tell jinja where to look for them
# here = os.path.dirname(os.path.abspath(__file__))
# built_in_template_path = os.path.join(here, "resources", "templates")
#
# # build a new Jinja2 environment so we can inject some custom filters into
# # the template we're rendering.
# template_env = jinja2.Environment(
# undefined=jinja2.StrictUndefined,
# loader=jinja2.FileSystemLoader([
# built_in_template_path,
# template_path,
# ]),
# )
# template_env.filters['filter_items'] = template_filters.filter_items
# template_env.filters['require_int'] = template_filters.require_int
# template_env.filters['require_float'] = template_filters.require_float
# template_env.filters['slugify'] = template_filters.slugify
#
# template = template_env.get_template(template_name)
# rendered_template = template.render(_all_env=values, **values)
# return json.loads(rendered_template)
#
# Path: shpkpr/vault.py
# def resolve_secrets(vault_client, rendered_template):
# """Parse a rendered template, extract any secret definitions, retrieve them
# from vault and return them to the caller.
#
# This is used in situations where direct Vault support is not available e.g.
# Chronos.
# """
# resolved_secrets = {}
#
# secrets = rendered_template.get("secrets", {})
# for name, definition in secrets.items():
# # parse the secret source and retrieve from vault
# path, key = definition["source"].split(":")
# secret = vault_client.read(path)
# if secret:
# resolved_secrets[name] = secret["data"][key]
# else:
# logger.info("Couldn't locate secret in Vault: {0}".format(path))
#
# return resolved_secrets
, which may contain function names, class names, or code. Output only the next line. | resolved_secrets = resolve_secrets(vault_client, rendered_template) |
Based on the snippet: <|code_start|># third-party imports
# local imports
@pytest.fixture
@freeze_time("2011-11-11T11:11:11.111111Z")
def app_definition(json_fixture):
"""Prepare an app definition for deployment.
This fixture prepares an app and assumes that no existing stack is present
on Marathon.
"""
app_definition_fixture = json_fixture("marathon/bluegreen_app_new")
app_states_fixture = json_fixture("valid_apps")
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from freezegun import freeze_time
from shpkpr.deployment.bluegreen.prepare import prepare_app_definition
and context (classes, functions, sometimes code) from other files:
# Path: shpkpr/deployment/bluegreen/prepare.py
# def prepare_app_definition(app_definition, old_app_definition=None, apps_state=None, marathon_info=None):
# """Prepares ``app_definition`` for blue/green deployment by adding the
# necessary Marathon labels and setting the ID and service port.
#
# If an old app definition is passed in, it will be used to correctly pick the
# color and port for the new deployment, otherwise we default to "blue" and
# use the ports as defined in the new app definition.
# """
# # make a deep copy of the new app definition before we start mutating it so
# # we can refer back to properties of the original as needed.
# new_app_definition = copy.deepcopy(app_definition)
#
# if old_app_definition is not None:
# _rotate_colour(new_app_definition, old_app_definition)
# _rotate_port(new_app_definition, old_app_definition)
# else:
# _set_app_colour(new_app_definition, 'blue')
# _init_ports(new_app_definition, apps_state, marathon_info)
#
# new_app_definition['id'] = new_app_definition['id'] + '-' + _get_app_colour(new_app_definition)
# new_app_definition['labels']['HAPROXY_APP_ID'] = app_definition['id']
# new_app_definition['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = datetime.now().isoformat()
# new_app_definition['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'] = str(new_app_definition['instances'])
# return new_app_definition
. Output only the next line. | return prepare_app_definition(app_definition_fixture, None, app_states_fixture['apps']) |
Based on the snippet: <|code_start|># stdlib imports
# third-party imports
# local imports
@pytest.fixture
def runner():
@click.command()
<|code_end|>
, predict the immediate next line with the help of imports:
import functools
import click
import pytest
from click.testing import CliRunner
from shpkpr.cli import options
and context (classes, functions, sometimes code) from other files:
# Path: shpkpr/cli/options.py
# def _validate_chronos_version(ctx, param, value):
# def _validate_authentication(ctx, url, service_name):
# def _validate_marathon_client(ctx, _, __):
# def _validate_chronos_client(ctx, _, __):
# def _validate_vault_client(ctx, _, __):
. Output only the next line. | @options.marathon_client |
Given snippet: <|code_start|># third-party imports
# local imports
@pytest.fixture
def valid_app_definition(json_fixture):
return json_fixture("marathon/bluegreen_app_new")
def test_valid_app(valid_app_definition):
"""Test that an app validates successfully when all the required labels are
present.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from shpkpr.deployment.bluegreen.validate import AppDefinitionValidator
from shpkpr.deployment.bluegreen.validate import ValidationError
and context:
# Path: shpkpr/deployment/bluegreen/validate.py
# class AppDefinitionValidator(object):
# """Ensure that an app definition has the minimum set of labels required for
# a bluegreen deploy.
#
# These labels ensure shpkpr is able to manage and track "groups" of otherwise
# independent/standalone applications that have previously been deployed to
# Marathon.
# """
#
# REQUIRED_LABELS = [
# "HAPROXY_DEPLOYMENT_GROUP"
# ]
#
# ERROR_MESSAGE = "Missing label(s) from application definition: {0}"
#
# def __init__(self, *args, **kwargs):
# pass
#
# def validate(self, app_definition):
# missing_labels = []
# labels = app_definition.get('labels', {})
# for required_label in self.REQUIRED_LABELS:
# if required_label in labels:
# continue
# missing_labels.append(required_label)
# if missing_labels:
# msg = self.ERROR_MESSAGE.format(", ".join(missing_labels))
# raise ValidationError(msg)
#
# Path: shpkpr/deployment/bluegreen/validate.py
# class ValidationError(exceptions.ShpkprException):
# exit_code = 2
#
# def format_message(self):
# return 'Unable to validate deployment: %s' % self.message
which might include code, classes, or functions. Output only the next line. | validator = AppDefinitionValidator() |
Predict the next line for this snippet: <|code_start|># third-party imports
# local imports
@pytest.fixture
def valid_app_definition(json_fixture):
return json_fixture("marathon/bluegreen_app_new")
def test_valid_app(valid_app_definition):
"""Test that an app validates successfully when all the required labels are
present.
"""
validator = AppDefinitionValidator()
try:
validator.validate(valid_app_definition)
<|code_end|>
with the help of current file imports:
import pytest
from shpkpr.deployment.bluegreen.validate import AppDefinitionValidator
from shpkpr.deployment.bluegreen.validate import ValidationError
and context from other files:
# Path: shpkpr/deployment/bluegreen/validate.py
# class AppDefinitionValidator(object):
# """Ensure that an app definition has the minimum set of labels required for
# a bluegreen deploy.
#
# These labels ensure shpkpr is able to manage and track "groups" of otherwise
# independent/standalone applications that have previously been deployed to
# Marathon.
# """
#
# REQUIRED_LABELS = [
# "HAPROXY_DEPLOYMENT_GROUP"
# ]
#
# ERROR_MESSAGE = "Missing label(s) from application definition: {0}"
#
# def __init__(self, *args, **kwargs):
# pass
#
# def validate(self, app_definition):
# missing_labels = []
# labels = app_definition.get('labels', {})
# for required_label in self.REQUIRED_LABELS:
# if required_label in labels:
# continue
# missing_labels.append(required_label)
# if missing_labels:
# msg = self.ERROR_MESSAGE.format(", ".join(missing_labels))
# raise ValidationError(msg)
#
# Path: shpkpr/deployment/bluegreen/validate.py
# class ValidationError(exceptions.ShpkprException):
# exit_code = 2
#
# def format_message(self):
# return 'Unable to validate deployment: %s' % self.message
, which may contain function names, class names, or code. Output only the next line. | except ValidationError as e: |
Next line prediction: <|code_start|># stdlib imports
# third-party imports
# local imports
@pytest.fixture
def haproxy_stats(file_fixture):
csv_data = file_fixture("haproxy/stats.csv")
<|code_end|>
. Use current file imports:
(import collections
import pytest
from shpkpr.marathon_lb.stats import Stats)
and context including class names, function names, or small code snippets from other files:
# Path: shpkpr/marathon_lb/stats.py
# class Stats(object):
#
# def __init__(self, *instance_stats):
# self._raw_stats = instance_stats
#
# def __iter__(self):
# return (r for r in self._stats)
#
# def __getitem__(self, i):
# return self._stats[i]
#
# def __len__(self):
# return len(self._stats)
#
# @cached_property
# def _stats(self):
# """Parse and aggregate all stats files, caching the result
# """
# _stats = []
# for instance_stats in self._raw_stats:
# _stats.extend(self._parse_instance_stats(instance_stats))
# return _stats
#
# def _parse_instance_stats(self, instance_stats):
# """Parse a single HAProxy stats CSV file.
# """
# rows = instance_stats.splitlines()
# headers = rows[0].lstrip('# ').rstrip(',\n').split(',')
# csv_reader = csv.reader(rows[1:], quotechar="'")
#
# Row = namedtuple('Row', headers)
# return [Row(*row[0:-1]) for row in csv_reader if row[0][0] != '#']
. Output only the next line. | return Stats(csv_data) |
Given the following code snippet before the placeholder: <|code_start|># third-party imports
# local imports
@pytest.fixture
def valid_app_definition(json_fixture):
return json_fixture("marathon/bluegreen_app_new")
def test_valid_state(valid_app_definition):
"""Test that an app validates successfully when no existing stacks are
present on Marathon.
"""
marathon_client = mock.Mock()
# we use a lambda here because mock interprets the empty list as list of
# return values or exceptions to be raised and a single empty list causes an
# error. the two options are wrapping the empty list in an outer list, or a
# function that returns the empty list.
marathon_client.list_applications.side_effect = lambda: []
<|code_end|>
, predict the next line using imports from the current file:
import mock
import pytest
from shpkpr.deployment.bluegreen.validate import MarathonStateValidator
from shpkpr.deployment.bluegreen.validate import ValidationError
and context including class names, function names, and sometimes code from other files:
# Path: shpkpr/deployment/bluegreen/validate.py
# class MarathonStateValidator(object):
# """Ensure that the application is in a clean state on Marathon before
# deploying.
#
# This validator checks that exactly one application is running in the target
# deployment group. If more than one application stack (typically both blue
# and green) are detected then it may indicate that another deploy is in
# progress or that a previous failed but wasn't cleaned up. Either way a
# deployment should not continue under those circumstances.
# """
#
# ERROR_MESSAGE = (
# "More than one active application stack detected on Marathon, please "
# "resolve before continuing with your deploy. This may indicate that "
# "another deploy is already in progress or that a previous one has "
# "failed."
# )
#
# def __init__(self, marathon_client, *args, **kwargs):
# self.marathon_client = marathon_client
#
# def validate(self, app_definition):
# def get_deployment_group(app_definition):
# labels = app_definition.get('labels', {})
# return labels.get('HAPROXY_DEPLOYMENT_GROUP')
#
# app_group = get_deployment_group(app_definition)
#
# remote_apps = self.marathon_client.list_applications()
# remote_groups = [get_deployment_group(app) for app in remote_apps]
#
# if Counter(remote_groups)[app_group] > 1:
# raise ValidationError(self.ERROR_MESSAGE)
#
# Path: shpkpr/deployment/bluegreen/validate.py
# class ValidationError(exceptions.ShpkprException):
# exit_code = 2
#
# def format_message(self):
# return 'Unable to validate deployment: %s' % self.message
. Output only the next line. | validator = MarathonStateValidator(marathon_client) |
Predict the next line after this snippet: <|code_start|># third-party imports
# local imports
@pytest.fixture
def valid_app_definition(json_fixture):
return json_fixture("marathon/bluegreen_app_new")
def test_valid_state(valid_app_definition):
"""Test that an app validates successfully when no existing stacks are
present on Marathon.
"""
marathon_client = mock.Mock()
# we use a lambda here because mock interprets the empty list as list of
# return values or exceptions to be raised and a single empty list causes an
# error. the two options are wrapping the empty list in an outer list, or a
# function that returns the empty list.
marathon_client.list_applications.side_effect = lambda: []
validator = MarathonStateValidator(marathon_client)
try:
validator.validate(valid_app_definition)
<|code_end|>
using the current file's imports:
import mock
import pytest
from shpkpr.deployment.bluegreen.validate import MarathonStateValidator
from shpkpr.deployment.bluegreen.validate import ValidationError
and any relevant context from other files:
# Path: shpkpr/deployment/bluegreen/validate.py
# class MarathonStateValidator(object):
# """Ensure that the application is in a clean state on Marathon before
# deploying.
#
# This validator checks that exactly one application is running in the target
# deployment group. If more than one application stack (typically both blue
# and green) are detected then it may indicate that another deploy is in
# progress or that a previous failed but wasn't cleaned up. Either way a
# deployment should not continue under those circumstances.
# """
#
# ERROR_MESSAGE = (
# "More than one active application stack detected on Marathon, please "
# "resolve before continuing with your deploy. This may indicate that "
# "another deploy is already in progress or that a previous one has "
# "failed."
# )
#
# def __init__(self, marathon_client, *args, **kwargs):
# self.marathon_client = marathon_client
#
# def validate(self, app_definition):
# def get_deployment_group(app_definition):
# labels = app_definition.get('labels', {})
# return labels.get('HAPROXY_DEPLOYMENT_GROUP')
#
# app_group = get_deployment_group(app_definition)
#
# remote_apps = self.marathon_client.list_applications()
# remote_groups = [get_deployment_group(app) for app in remote_apps]
#
# if Counter(remote_groups)[app_group] > 1:
# raise ValidationError(self.ERROR_MESSAGE)
#
# Path: shpkpr/deployment/bluegreen/validate.py
# class ValidationError(exceptions.ShpkprException):
# exit_code = 2
#
# def format_message(self):
# return 'Unable to validate deployment: %s' % self.message
. Output only the next line. | except ValidationError as e: |
Predict the next line after this snippet: <|code_start|># third-party imports
# local imports
@pytest.fixture(scope="session")
def env():
# read the required environment variables into a dictionary and assert
# that they're set appropriately
env = {
"SHPKPR_MARATHON_APP_ID": os.environ.get("SHPKPR_MARATHON_APP_ID", None),
"SHPKPR_MARATHON_URL": os.environ.get("SHPKPR_MARATHON_URL", None),
"SHPKPR_MARATHON_LB_URL": os.environ.get("SHPKPR_MARATHON_LB_URL", None),
"SHPKPR_APPLICATION": os.environ.get("SHPKPR_APPLICATION", None),
"SHPKPR_DEPLOYMENT_GROUP": os.environ.get("SHPKPR_DEPLOYMENT_GROUP", None),
"SHPKPR_DOCKER_REPOTAG": os.environ.get("SHPKPR_DOCKER_REPOTAG", None),
"SHPKPR_DOCKER_EXPOSED_PORT": os.environ.get("SHPKPR_DOCKER_EXPOSED_PORT", None),
"SHPKPR_DEPLOY_DOMAIN": os.environ.get("SHPKPR_DEPLOY_DOMAIN", None),
"SHPKPR_CHRONOS_URL": os.environ.get("SHPKPR_CHRONOS_URL", None),
"SHPKPR_CHRONOS_VERSION": os.environ.get("SHPKPR_CHRONOS_VERSION", None),
"SHPKPR_VAULT_ADDR": os.environ.get("SHPKPR_VAULT_ADDR", None),
"SHPKPR_VAULT_TOKEN": os.environ.get("SHPKPR_VAULT_TOKEN", None),
}
assert None not in env.values()
return env
@pytest.fixture
def runner():
runner = CliRunner()
<|code_end|>
using the current file's imports:
import functools
import os
import pytest
from click.testing import CliRunner
from shpkpr.cli.entrypoint import cli
and any relevant context from other files:
# Path: shpkpr/cli/entrypoint.py
# @click.command(cls=ShpkprCLI, context_settings=CONTEXT_SETTINGS)
# def cli():
# """A tool to manage applications running on Marathon."""
# logger.configure()
. Output only the next line. | return functools.partial(runner.invoke, cli) |
Here is a snippet: <|code_start|># third-party imports
# local imports
@pytest.fixture
def app_definition(json_fixture):
"""Prepare an app definition for deployment.
This fixture prepares an app and assumes that an existing stack is present
on Marathon.
"""
new_app_definition_fixture = json_fixture("marathon/bluegreen_app_new")
old_app_definition_fixture = json_fixture("marathon/bluegreen_app_existing")
app_states_fixture = json_fixture("valid_apps")
info_definition_fixture = json_fixture("marathon/info")
<|code_end|>
. Write the next line using the current file imports:
import pytest
from shpkpr.deployment.bluegreen.prepare import prepare_app_definition
and context from other files:
# Path: shpkpr/deployment/bluegreen/prepare.py
# def prepare_app_definition(app_definition, old_app_definition=None, apps_state=None, marathon_info=None):
# """Prepares ``app_definition`` for blue/green deployment by adding the
# necessary Marathon labels and setting the ID and service port.
#
# If an old app definition is passed in, it will be used to correctly pick the
# color and port for the new deployment, otherwise we default to "blue" and
# use the ports as defined in the new app definition.
# """
# # make a deep copy of the new app definition before we start mutating it so
# # we can refer back to properties of the original as needed.
# new_app_definition = copy.deepcopy(app_definition)
#
# if old_app_definition is not None:
# _rotate_colour(new_app_definition, old_app_definition)
# _rotate_port(new_app_definition, old_app_definition)
# else:
# _set_app_colour(new_app_definition, 'blue')
# _init_ports(new_app_definition, apps_state, marathon_info)
#
# new_app_definition['id'] = new_app_definition['id'] + '-' + _get_app_colour(new_app_definition)
# new_app_definition['labels']['HAPROXY_APP_ID'] = app_definition['id']
# new_app_definition['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = datetime.now().isoformat()
# new_app_definition['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'] = str(new_app_definition['instances'])
# return new_app_definition
, which may include functions, classes, or code. Output only the next line. | return prepare_app_definition(new_app_definition_fixture, |
Based on the snippet: <|code_start|> # app, otherwise we treat it as a list of multiple applications to be
# deployed together.
if isinstance(application_payload, (list, tuple)):
path = "/v2/apps/"
else:
path = "/v2/apps/" + application_payload['id']
params = {"force": "true"} if force else {}
response = self._make_request('PUT', path, params=params, json=application_payload)
if response.status_code in [200, 201]:
deployment = response.json()
return MarathonDeployment(self, deployment['deploymentId'])
# raise an appropriate error if something went wrong
if response.status_code == 409:
deployment_ids = ', '.join([x['id'] for x in response.json()['deployments']])
raise ClientError("App(s) locked by one or more deployments: %s" % deployment_ids)
raise ClientError("Unknown Marathon error: %s\n\n%s" % (response.status_code, response.text))
def get_deployment(self, deployment_id):
"""Returns detailed information for a single deploy
"""
response = self._make_request('GET', "/v2/deployments")
if response.status_code == 200:
for deployment in response.json():
if deployment['id'] == deployment_id:
return deployment
<|code_end|>
, predict the immediate next line with the help of imports:
import requests
from .deployment import DeploymentNotFound
from .deployment import MarathonDeployment
from shpkpr import exceptions
and context (classes, functions, sometimes code) from other files:
# Path: shpkpr/marathon/deployment.py
# class DeploymentNotFound(exceptions.ShpkprException):
# pass
#
# Path: shpkpr/marathon/deployment.py
# class MarathonDeployment(object):
# """Marathon deployment object.
#
# Allows the caller to check a deployment's status and cancel or rollback a
# deployment.
# """
#
# def __init__(self, client, deployment_id):
# self._client = client
# self.deployment_id = deployment_id
#
# def check(self):
# """Check if this deployment has completed.
#
# This method returns a True when a deployment is complete, False when a
# deployment is in progress.
# """
# try:
# self._client.get_deployment(self.deployment_id)
# except DeploymentNotFound:
# # if the deployment isn't listed, then we can consider the deployment as completed
# # successfully and return True. According to the marathon docs: "If the deployment
# # is gone from the list of deployments, then this means it is finished."
# return True
#
# return False
#
# def wait(self, timeout=900, check_interval_secs=5):
# """Waits for a deployment to finish
#
# If a deployment completes successfully, True is returned, if it fails
# for any reason a DeploymentFailed exception is raised. If the
# deployment does not complete within ``timeout`` seconds, a
# DeploymentFailed exception is raised.
#
# ``check_interval_secs`` is used to determine the delay between
# subsequent checks. The default of 5 seconds is adequate for normal
# use.
# """
# _started = datetime.datetime.utcnow()
#
# while True:
#
# time.sleep(check_interval_secs)
#
# # check if the deployment has completed, if it has we return True
# if self.check():
# return True
#
# # if the specified timeout has elapsed we raise a DeploymentFailed error
# delta = datetime.datetime.utcnow() - _started
# if delta.total_seconds() > timeout:
# raise DeploymentFailed('Timed out: %d seconds' % timeout)
#
# Path: shpkpr/exceptions.py
# class ShpkprException(ClickException):
# def _args_from_exception(exception):
# def rewrap(exceptions_to_catch, exception_to_rewrap_with=ShpkprException):
# def real_decorator(function):
# def wrapper(*args, **kwargs):
. Output only the next line. | raise DeploymentNotFound(deployment_id) |
Using the snippet: <|code_start|> applications = response.json()['apps']
application_list = []
for app in applications:
application_list.append(app)
return application_list
raise ClientError("Unknown Marathon error: %s\n\n%s" % (response.status_code, response.text))
def deploy(self, application_payload, force=False):
"""Deploys the given application(s) to Marathon.
"""
# if the payload is a list and is one element long then we extract it
# as we want to treat single app deploys differently. Doing this here
# helps keep the cmd implementation clean.
if isinstance(application_payload, (list, tuple)) and len(application_payload) == 1:
application_payload = application_payload[0]
# if at this point our payload is a dict then we treat it as a single
# app, otherwise we treat it as a list of multiple applications to be
# deployed together.
if isinstance(application_payload, (list, tuple)):
path = "/v2/apps/"
else:
path = "/v2/apps/" + application_payload['id']
params = {"force": "true"} if force else {}
response = self._make_request('PUT', path, params=params, json=application_payload)
if response.status_code in [200, 201]:
deployment = response.json()
<|code_end|>
, determine the next line of code. You have imports:
import requests
from .deployment import DeploymentNotFound
from .deployment import MarathonDeployment
from shpkpr import exceptions
and context (class names, function names, or code) available:
# Path: shpkpr/marathon/deployment.py
# class DeploymentNotFound(exceptions.ShpkprException):
# pass
#
# Path: shpkpr/marathon/deployment.py
# class MarathonDeployment(object):
# """Marathon deployment object.
#
# Allows the caller to check a deployment's status and cancel or rollback a
# deployment.
# """
#
# def __init__(self, client, deployment_id):
# self._client = client
# self.deployment_id = deployment_id
#
# def check(self):
# """Check if this deployment has completed.
#
# This method returns a True when a deployment is complete, False when a
# deployment is in progress.
# """
# try:
# self._client.get_deployment(self.deployment_id)
# except DeploymentNotFound:
# # if the deployment isn't listed, then we can consider the deployment as completed
# # successfully and return True. According to the marathon docs: "If the deployment
# # is gone from the list of deployments, then this means it is finished."
# return True
#
# return False
#
# def wait(self, timeout=900, check_interval_secs=5):
# """Waits for a deployment to finish
#
# If a deployment completes successfully, True is returned, if it fails
# for any reason a DeploymentFailed exception is raised. If the
# deployment does not complete within ``timeout`` seconds, a
# DeploymentFailed exception is raised.
#
# ``check_interval_secs`` is used to determine the delay between
# subsequent checks. The default of 5 seconds is adequate for normal
# use.
# """
# _started = datetime.datetime.utcnow()
#
# while True:
#
# time.sleep(check_interval_secs)
#
# # check if the deployment has completed, if it has we return True
# if self.check():
# return True
#
# # if the specified timeout has elapsed we raise a DeploymentFailed error
# delta = datetime.datetime.utcnow() - _started
# if delta.total_seconds() > timeout:
# raise DeploymentFailed('Timed out: %d seconds' % timeout)
#
# Path: shpkpr/exceptions.py
# class ShpkprException(ClickException):
# def _args_from_exception(exception):
# def rewrap(exceptions_to_catch, exception_to_rewrap_with=ShpkprException):
# def real_decorator(function):
# def wrapper(*args, **kwargs):
. Output only the next line. | return MarathonDeployment(self, deployment['deploymentId']) |
Predict the next line after this snippet: <|code_start|>
@mock.patch("shpkpr.cli.options.hvac.Client")
def test_resolve_secrets(mock_vault_client_class):
mock_vault_data = {
'secret/my_project/my_path': {
'my_key': 'some_secret_info'
}
}
mock_rendered_template = {
'secrets': {
'MY_SECRET_USING_REL_PATH': {'source': 'my_project/my_path:my_key'},
'MY_SECRET_USING_FULL_PATH': {'source': 'secret/my_project/my_path:my_key'},
}
}
def read_vault_data(path):
secrets = mock_vault_data.get(path, None)
return dict(data=secrets) if secrets else None
mock_vault_client = mock_vault_client_class.return_value
mock_vault_client.read.side_effect = read_vault_data
<|code_end|>
using the current file's imports:
import mock
from shpkpr.vault import resolve_secrets
and any relevant context from other files:
# Path: shpkpr/vault.py
# def resolve_secrets(vault_client, rendered_template):
# """Parse a rendered template, extract any secret definitions, retrieve them
# from vault and return them to the caller.
#
# This is used in situations where direct Vault support is not available e.g.
# Chronos.
# """
# resolved_secrets = {}
#
# secrets = rendered_template.get("secrets", {})
# for name, definition in secrets.items():
# # parse the secret source and retrieve from vault
# path, key = definition["source"].split(":")
# secret = vault_client.read(path)
# if secret:
# resolved_secrets[name] = secret["data"][key]
# else:
# logger.info("Couldn't locate secret in Vault: {0}".format(path))
#
# return resolved_secrets
. Output only the next line. | result = resolve_secrets(mock_vault_client, mock_rendered_template) |
Continue the code snippet: <|code_start|># Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
logger = logging.getLogger(__name__)
class LearningRateScheduler:
"""
:param base_lr: Base learning rate.
:param warmup: Number of initial updates during which the learning rate
linearly increases.
:param t_scale: Scaling factor for step number.
"""
def __init__(self, base_lr: float = 1.0, warmup: int = 0, t_scale: float = 1.0) -> None:
self.base_lr = base_lr
<|code_end|>
. Use current file imports:
import logging
import sockeye.constants as C
from math import sqrt
from typing import Optional
from sockeye.utils import check_condition
and context (classes, functions, or code) from other files:
# Path: sockeye/utils.py
# def check_condition(condition: bool, error_message: str):
# """
# Check the condition and if it is not met, exit with the given error message
# and error_code, similar to assertions.
#
# :param condition: Condition to check.
# :param error_message: Error message to show to the user.
# """
# if not condition:
# raise SockeyeError(error_message)
. Output only the next line. | check_condition(warmup >= 0, "warmup needs to be >= 0.") |
Here is a snippet: <|code_start|># Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
def test_batch_scorer():
# TODO: make this a useful test
batch = 2
seq = 4
nh = 6
logits = pt.ones(batch, seq, nh)
label = pt.ones(batch, seq).to(pt.long)
length_ratio = pt.ones(batch, )
source_length = pt.randint(0, seq, (batch,)).to(pt.float32)
target_length = source_length
<|code_end|>
. Write the next line using the current file imports:
import torch as pt
import sockeye.scoring
from sockeye.beam_search import CandidateScorer
and context from other files:
# Path: sockeye/beam_search.py
# class CandidateScorer(pt.nn.Module):
#
# def __init__(self,
# length_penalty_alpha: float = 1.0,
# length_penalty_beta: float = 0.0,
# brevity_penalty_weight: float = 0.0) -> None:
# super().__init__()
# self._lp = LengthPenalty(alpha=length_penalty_alpha, beta=length_penalty_beta)
# self._bp = None # type: Optional[BrevityPenalty]
# if brevity_penalty_weight > 0.0:
# self._bp = BrevityPenalty(weight=brevity_penalty_weight)
#
# def forward(self, scores, lengths, reference_lengths):
# lp = self._lp(lengths)
# if self._bp is not None:
# bp = self._bp(lengths, reference_lengths)
# else:
# bp = 0.0
# if isinstance(scores, (int, float)):
# return scores / lp - bp
# else:
# if isinstance(lp, pt.Tensor):
# lp = lp.to(scores.dtype)
# if isinstance(bp, pt.Tensor):
# bp = bp.to(scores.dtype)
# return (scores.squeeze(1) / lp - bp).unsqueeze(1)
#
# def unnormalize(self, scores, lengths, reference_lengths):
# bp = 0.0 if self._bp is None else self._bp(lengths, reference_lengths)
# if isinstance(scores, (int, float)):
# return (scores + bp) * self._lp(lengths)
# else:
# return ((scores.squeeze(1) + bp) * self._lp(lengths)).unsqueeze(1)
, which may include functions, classes, or code. Output only the next line. | b = sockeye.scoring.BatchScorer(scorer=CandidateScorer(1.0, 0.0, 0.0), |
Using the snippet: <|code_start|># Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
test_cases = [(
["this is a test", "another test case"], ["this is a test case", "another test case"], 0.9444444394753087,
0.928571423622449, 0.9338624338620563),
(["this is a single test case"], ["this is a single test case"], 0.999999995, 0.999999995,
0.9999999999995),
(["single test case"], ["another single test case"], 0.8571428522448981, 0.7999999952000001,
0.8241758241756372),
(["no overlap between sentences"], ["this is another test case"], 0.0, 0.0, 0.0),
(["exact match in the test case", "another exact match"],
["exact match in the test case", "another exact match"], 0.999999995, 0.999999995, 0.9999999999995)]
@pytest.mark.parametrize("hypotheses, references, rouge1_score, rouge2_score, rougel_score", test_cases)
def test_rouge_1(hypotheses, references, rouge1_score, rouge2_score, rougel_score):
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from sockeye_contrib import rouge
and context (class names, function names, or code) available:
# Path: sockeye_contrib/rouge.py
# def rouge(hypotheses, references):
# """Calculates average rouge scores for a list of hypotheses and
# references"""
#
# # Filter out hyps that are of 0 length
# # hyps_and_refs = zip(hypotheses, references)
# # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
# # hypotheses, references = zip(*hyps_and_refs)
#
# # Calculate ROUGE-1 F1, precision, recall scores
# rouge_1 = [
# rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
# ]
# rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))
#
# # Calculate ROUGE-2 F1, precision, recall scores
# rouge_2 = [
# rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)
# ]
# rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))
#
# # Calculate ROUGE-L F1, precision, recall scores
# rouge_l = [
# rouge_l_sentence_level([hyp], [ref])
# for hyp, ref in zip(hypotheses, references)
# ]
# rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))
#
# return {
# "rouge_1/f_score": rouge_1_f,
# "rouge_1/r_score": rouge_1_r,
# "rouge_1/p_score": rouge_1_p,
# "rouge_2/f_score": rouge_2_f,
# "rouge_2/r_score": rouge_2_r,
# "rouge_2/p_score": rouge_2_p,
# "rouge_l/f_score": rouge_l_f,
# "rouge_l/r_score": rouge_l_r,
# "rouge_l/p_score": rouge_l_p,
# }
. Output only the next line. | rouge_score = rouge.rouge_1(hypotheses, references) |
Continue the code snippet: <|code_start|> 'encoder.layers.1.pre_ff.layer_norm.beta',
'encoder.layers.1.pre_ff.layer_norm.gamma',
'encoder.layers.1.pre_self_attention.layer_norm.beta',
'encoder.layers.1.pre_self_attention.layer_norm.gamma',
'encoder.layers.1.self_attention.ff_in.weight',
'encoder.layers.1.self_attention.ff_out.weight',
'encoder.layers.2.ff.ff1.bias',
'encoder.layers.2.ff.ff1.weight',
'encoder.layers.2.ff.ff2.bias',
'encoder.layers.2.ff.ff2.weight',
'encoder.layers.2.pre_ff.layer_norm.beta',
'encoder.layers.2.pre_ff.layer_norm.gamma',
'encoder.layers.2.pre_self_attention.layer_norm.beta',
'encoder.layers.2.pre_self_attention.layer_norm.gamma',
'encoder.layers.2.self_attention.ff_in.weight',
'encoder.layers.2.self_attention.ff_out.weight',
]
@pytest.mark.parametrize("param_names, strategy, expected_fixed_param_names", [
(ALL_PARAMS, C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_DECODER, ALL_EXCEPT_DECODER_PARAMS),
(ALL_PARAMS, C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_OUTER_LAYERS, ALL_EXCEPT_OUTER_LAYERS_PARAMS),
(ALL_PARAMS, C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_EMBEDDINGS, ALL_EXCEPT_EMBED_PARAMS),
(ALL_PARAMS, C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_OUTPUT_PROJ, ALL_EXCEPT_OUTPUT_PROJ_PARAMS),
])
def test_fixed_param_strategy(param_names, strategy, expected_fixed_param_names):
config = mock.Mock()
config.config_encoder.num_layers = NUM_LAYERS
config.config_decoder.num_layers = NUM_LAYERS
params = {name: None for name in ALL_PARAMS}
<|code_end|>
. Use current file imports:
from unittest import mock
from sockeye.train import fixed_param_names_from_strategy
import pytest
import sockeye.constants as C
and context (classes, functions, or code) from other files:
# Path: sockeye/train.py
# def fixed_param_names_from_strategy(config: model.ModelConfig,
# params: Dict[str, torch.nn.parameter.Parameter],
# strategy: str) -> List[str]:
# """
# Generate a fixed parameter list given a list of all parameter names and
# a strategy.
# """
# # Number of encoder/decoder layers in model.
# num_encoder_layers = config.config_encoder.num_layers
# num_decoder_layers = config.config_decoder.num_layers
#
# def is_fixed(name: str) -> bool:
# if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_DECODER:
# # Any decoder layer.
# return not name.startswith(C.DECODER_PREFIX)
# if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_OUTER_LAYERS:
# # First and last encoder and decoder layers.
# first_encoder_prefix = f'{C.ENCODER_PREFIX}.layers.{0}'
# last_encoder_prefix = f'{C.ENCODER_PREFIX}.layers.{num_encoder_layers - 1}'
# first_decoder_prefix = f'{C.DECODER_PREFIX}.layers.{0}'
# last_decoder_prefix = f'{C.DECODER_PREFIX}.layers.{num_decoder_layers - 1}'
# return not (name.startswith(first_encoder_prefix) or
# name.startswith(last_encoder_prefix) or
# name.startswith(first_decoder_prefix) or
# name.startswith(last_decoder_prefix))
# if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_EMBEDDINGS:
# # Any type of learned embedding.
# return not (name.startswith(C.SOURCE_EMBEDDING_PREFIX) or name.startswith(C.TARGET_EMBEDDING_PREFIX))
# if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_OUTPUT_PROJ:
# # Target output projection.
# return not name.startswith(C.DEFAULT_OUTPUT_LAYER_PREFIX)
# if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_FEED_FORWARD:
# return not (name.endswith("ff.ff1.bias") or name.endswith("ff.ff1.weight") or
# name.endswith("ff.ff2.bias") or name.endswith("ff.ff2.weight"))
# if strategy == C.FIXED_PARAM_STRATEGY_ENCODER_AND_SOURCE_EMBEDDINGS:
# return name.startswith(C.ENCODER_PREFIX) or name.startswith(C.SOURCE_EMBEDDING_PREFIX)
# if strategy == C.FIXED_PARAM_STRATEGY_ENCODER_HALF_AND_SOURCE_EMBEDDINGS:
# if name.startswith(C.ENCODER_PREFIX):
# for i in range(num_encoder_layers // 2):
# if name.startswith(f"{C.ENCODER_PREFIX}.layers.{i}"):
# return True
# return name.startswith(C.SOURCE_EMBEDDING_PREFIX)
# raise ValueError("Unknown fixed parameter strategy: %s" % strategy)
#
# return [name for name in params if is_fixed(name)]
. Output only the next line. | fixed_param_names = fixed_param_names_from_strategy(config, params, strategy) |
Given the following code snippet before the placeholder: <|code_start|> assert raw_vocab == Counter({"a": 1, "b": 1, "c": 2, "d": 1, "e": 1})
test_vocab = [
# Example 1
(["one two three", "one two three"], None, 1,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "two": 4, "three": 5, "one": 6}),
(["one two three", "one two three"], 3, 1,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "two": 4, "three": 5, "one": 6}),
(["one two three", "one two three"], 3, 2,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "two": 4, "three": 5, "one": 6}),
(["one two three", "one two three"], 2, 2,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "two": 4, "three": 5}),
# Example 2
(["one one two three ", "one two three"], 3, 1,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "one": 4, "two": 5, "three": 6}),
(["one one two three ", "one two three"], 3, 2,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "one": 4, "two": 5, "three": 6}),
(["one one two three ", "one two three"], 3, 3,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "one": 4}),
(["one one two three ", "one two three"], 2, 1,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "one": 4, "two": 5}),
# Example 3 (including special symbols)
(["one two three <s> <s>", "one two three <s> <s>"], None, 1,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "two": 4, "three": 5, "one": 6}),
]
@pytest.mark.parametrize("data,size,min_count,expected", test_vocab)
def test_build_vocab(data, size, min_count, expected):
<|code_end|>
, predict the next line using imports from the current file:
import pytest
import sockeye.constants as C
from unittest import mock
from collections import Counter
from sockeye.vocab import (build_vocab, get_ordered_tokens_from_vocab, is_valid_vocab, \
_get_sorted_source_vocab_fnames, count_tokens)
and context including class names, function names, and sometimes code from other files:
# Path: sockeye/vocab.py
# def build_vocab(data: Iterable[str], num_words: Optional[int] = None, min_count: int = 1,
# pad_to_multiple_of: Optional[int] = None) -> Vocab:
# """
# Creates a vocabulary mapping from words to ids. Increasing integer ids are assigned by word frequency,
# using lexical sorting as a tie breaker. The only exception to this are special symbols such as the padding symbol
# (PAD).
#
# :param data: Sequence of sentences containing whitespace-delimited tokens.
# :param num_words: Optional maximum number of words in the vocabulary.
# :param min_count: Minimum occurrences of words to be included in the vocabulary.
# :param pad_to_multiple_of: If not None, pads the vocabulary to a size that is the next multiple of this int.
# :return: Word-to-id mapping.
# """
# raw_vocab = count_tokens(data)
# return build_pruned_vocab(raw_vocab=raw_vocab,
# num_words=num_words,
# min_count=min_count,
# pad_to_multiple_of=pad_to_multiple_of)
#
# def get_ordered_tokens_from_vocab(vocab: Vocab) -> List[str]:
# """
# Returns the list of tokens in a vocabulary, ordered by increasing vocabulary id.
#
# :param vocab: Input vocabulary.
# :return: List of tokens.
# """
# return [token for token, token_id in sorted(vocab.items(), key=lambda i: i[1])]
#
# def is_valid_vocab(vocab: Vocab) -> bool:
# """
# Checks if a vocabulary is valid. We define valid as:
# 1. All indices from 0 to num_words - 1 are present without duplicates.
# 2. PAD_SYMBOL has word id 0, UNK_SYMBOL has word id 1, BOS_SYMBOL has word id 2, EOS_SYMBOL has word id 3.
# """
# if vocab[C.PAD_SYMBOL] != C.PAD_ID:
# logger.warning("PAD_SYMBOL does not have word id 0 in vocabulary.")
# return False
# if vocab[C.UNK_SYMBOL] != C.UNK_ID:
# logger.warning("UNK_SYMBOL does not have word id 1 in vocabulary.")
# return False
# if vocab[C.BOS_SYMBOL] != C.BOS_ID:
# logger.warning("BOS_SYMBOL does not have word id 2 in vocabulary.")
# return False
# if vocab[C.EOS_SYMBOL] != C.EOS_ID:
# logger.warning("EOS_SYMBOL does not have word id 3 in vocabulary.")
# return False
# word_ids = []
# for word, word_id in vocab.items():
# word_ids.append(word_id)
# word_ids_set = set(word_ids)
# if len(word_ids_set) != len(word_ids):
# logger.warning("Duplicate word_ids in vocabulary.")
# return False
#
# expected_word_ids = set(range(0, len(vocab)))
# if expected_word_ids != word_ids_set:
# logger.warning("Not all word_ids from 0 to len(vocabulary) present in vocabulary.")
# return False
#
# return True
#
# def _get_sorted_source_vocab_fnames(folder) -> List[str]:
# _key = lambda x: int(x.split('.', 3)[-2])
# return sorted([f for f in os.listdir(folder) if f.startswith(C.VOCAB_SRC_PREFIX)], key=_key)
#
# def count_tokens(data: Iterable[str]) -> Counter:
# """
# Count whitespace delimited tokens.
#
# :param data: Sequence of sentences containing whitespace-delimited tokens.
# :return: Token counter.
# """
# return Counter(token for line in data for token in utils.get_tokens(line))
. Output only the next line. | vocab = build_vocab(data=data, num_words=size, min_count=min_count) |
Using the snippet: <|code_start|> data = [" ".join('word%d' % i for i in range(num_types))]
size = None
min_count = 1
vocab = build_vocab(data, size, min_count, pad_to_multiple_of=pad_to_multiple_of)
assert len(vocab) == expected_vocab_size
test_constants = [
# Example 1
(["one two three", "one two three"], 3, 1, C.VOCAB_SYMBOLS),
(["one two three", "one two three"], 3, 2, C.VOCAB_SYMBOLS),
(["one two three", "one two three"], 2, 2, C.VOCAB_SYMBOLS),
# Example 2
(["one one two three ", "one two three"], 3, 1, C.VOCAB_SYMBOLS),
(["one one two three ", "one two three"], 3, 2, C.VOCAB_SYMBOLS),
(["one one two three ", "one two three"], 3, 3, C.VOCAB_SYMBOLS),
(["one one two three ", "one two three"], 2, 1, C.VOCAB_SYMBOLS),
]
@pytest.mark.parametrize("data,size,min_count,constants", test_constants)
def test_constants_in_vocab(data, size, min_count, constants):
vocab = build_vocab(data, size, min_count)
for const in constants:
assert const in vocab
@pytest.mark.parametrize("vocab, expected_output", [({"<pad>": 0, "a": 4, "b": 2}, ["<pad>", "b", "a"]),
({}, [])])
def test_get_ordered_tokens_from_vocab(vocab, expected_output):
<|code_end|>
, determine the next line of code. You have imports:
import pytest
import sockeye.constants as C
from unittest import mock
from collections import Counter
from sockeye.vocab import (build_vocab, get_ordered_tokens_from_vocab, is_valid_vocab, \
_get_sorted_source_vocab_fnames, count_tokens)
and context (class names, function names, or code) available:
# Path: sockeye/vocab.py
# def build_vocab(data: Iterable[str], num_words: Optional[int] = None, min_count: int = 1,
# pad_to_multiple_of: Optional[int] = None) -> Vocab:
# """
# Creates a vocabulary mapping from words to ids. Increasing integer ids are assigned by word frequency,
# using lexical sorting as a tie breaker. The only exception to this are special symbols such as the padding symbol
# (PAD).
#
# :param data: Sequence of sentences containing whitespace-delimited tokens.
# :param num_words: Optional maximum number of words in the vocabulary.
# :param min_count: Minimum occurrences of words to be included in the vocabulary.
# :param pad_to_multiple_of: If not None, pads the vocabulary to a size that is the next multiple of this int.
# :return: Word-to-id mapping.
# """
# raw_vocab = count_tokens(data)
# return build_pruned_vocab(raw_vocab=raw_vocab,
# num_words=num_words,
# min_count=min_count,
# pad_to_multiple_of=pad_to_multiple_of)
#
# def get_ordered_tokens_from_vocab(vocab: Vocab) -> List[str]:
# """
# Returns the list of tokens in a vocabulary, ordered by increasing vocabulary id.
#
# :param vocab: Input vocabulary.
# :return: List of tokens.
# """
# return [token for token, token_id in sorted(vocab.items(), key=lambda i: i[1])]
#
# def is_valid_vocab(vocab: Vocab) -> bool:
# """
# Checks if a vocabulary is valid. We define valid as:
# 1. All indices from 0 to num_words - 1 are present without duplicates.
# 2. PAD_SYMBOL has word id 0, UNK_SYMBOL has word id 1, BOS_SYMBOL has word id 2, EOS_SYMBOL has word id 3.
# """
# if vocab[C.PAD_SYMBOL] != C.PAD_ID:
# logger.warning("PAD_SYMBOL does not have word id 0 in vocabulary.")
# return False
# if vocab[C.UNK_SYMBOL] != C.UNK_ID:
# logger.warning("UNK_SYMBOL does not have word id 1 in vocabulary.")
# return False
# if vocab[C.BOS_SYMBOL] != C.BOS_ID:
# logger.warning("BOS_SYMBOL does not have word id 2 in vocabulary.")
# return False
# if vocab[C.EOS_SYMBOL] != C.EOS_ID:
# logger.warning("EOS_SYMBOL does not have word id 3 in vocabulary.")
# return False
# word_ids = []
# for word, word_id in vocab.items():
# word_ids.append(word_id)
# word_ids_set = set(word_ids)
# if len(word_ids_set) != len(word_ids):
# logger.warning("Duplicate word_ids in vocabulary.")
# return False
#
# expected_word_ids = set(range(0, len(vocab)))
# if expected_word_ids != word_ids_set:
# logger.warning("Not all word_ids from 0 to len(vocabulary) present in vocabulary.")
# return False
#
# return True
#
# def _get_sorted_source_vocab_fnames(folder) -> List[str]:
# _key = lambda x: int(x.split('.', 3)[-2])
# return sorted([f for f in os.listdir(folder) if f.startswith(C.VOCAB_SRC_PREFIX)], key=_key)
#
# def count_tokens(data: Iterable[str]) -> Counter:
# """
# Count whitespace delimited tokens.
#
# :param data: Sequence of sentences containing whitespace-delimited tokens.
# :return: Token counter.
# """
# return Counter(token for line in data for token in utils.get_tokens(line))
. Output only the next line. | assert get_ordered_tokens_from_vocab(vocab) == expected_output |
Continue the code snippet: <|code_start|> vocab = build_vocab(data, size, min_count)
for const in constants:
assert const in vocab
@pytest.mark.parametrize("vocab, expected_output", [({"<pad>": 0, "a": 4, "b": 2}, ["<pad>", "b", "a"]),
({}, [])])
def test_get_ordered_tokens_from_vocab(vocab, expected_output):
assert get_ordered_tokens_from_vocab(vocab) == expected_output
@pytest.mark.parametrize(
"vocab, expected_result",
[
({symbol: idx for idx, symbol in enumerate(C.VOCAB_SYMBOLS + ["w1", "w2"])}, True),
# A vocabulary with just the valid symbols doesn't make much sense but is technically valid
({symbol: idx for idx, symbol in enumerate(C.VOCAB_SYMBOLS)}, True),
# Manually specifying the list of required special symbol so that we avoid making a backwards-incompatible
# change by adding a new symbol to C.VOCAB_SYMBOLS
({symbol: idx for idx, symbol in enumerate([C.PAD_SYMBOL, C.UNK_SYMBOL, C.BOS_SYMBOL, C.EOS_SYMBOL])}, True),
# PAD_ID must have word id 0
({symbol: idx for idx, symbol in enumerate(reversed(C.VOCAB_SYMBOLS))}, False),
({symbol: idx for idx, symbol in enumerate(list(reversed(C.VOCAB_SYMBOLS)) + ["w1", "w2"])}, False),
# If there is a gap the vocabulary is not valid:
({symbol: idx if symbol != "w2" else idx + 1 for idx, symbol in enumerate(C.VOCAB_SYMBOLS + ["w1", "w2"])}, False),
# There shouldn't be any duplicate word ids
({symbol: idx if symbol != "w2" else idx - 1 for idx, symbol in enumerate(C.VOCAB_SYMBOLS + ["w1", "w2"])}, False),
]
)
def test_verify_valid_vocab(vocab, expected_result):
<|code_end|>
. Use current file imports:
import pytest
import sockeye.constants as C
from unittest import mock
from collections import Counter
from sockeye.vocab import (build_vocab, get_ordered_tokens_from_vocab, is_valid_vocab, \
_get_sorted_source_vocab_fnames, count_tokens)
and context (classes, functions, or code) from other files:
# Path: sockeye/vocab.py
# def build_vocab(data: Iterable[str], num_words: Optional[int] = None, min_count: int = 1,
# pad_to_multiple_of: Optional[int] = None) -> Vocab:
# """
# Creates a vocabulary mapping from words to ids. Increasing integer ids are assigned by word frequency,
# using lexical sorting as a tie breaker. The only exception to this are special symbols such as the padding symbol
# (PAD).
#
# :param data: Sequence of sentences containing whitespace-delimited tokens.
# :param num_words: Optional maximum number of words in the vocabulary.
# :param min_count: Minimum occurrences of words to be included in the vocabulary.
# :param pad_to_multiple_of: If not None, pads the vocabulary to a size that is the next multiple of this int.
# :return: Word-to-id mapping.
# """
# raw_vocab = count_tokens(data)
# return build_pruned_vocab(raw_vocab=raw_vocab,
# num_words=num_words,
# min_count=min_count,
# pad_to_multiple_of=pad_to_multiple_of)
#
# def get_ordered_tokens_from_vocab(vocab: Vocab) -> List[str]:
# """
# Returns the list of tokens in a vocabulary, ordered by increasing vocabulary id.
#
# :param vocab: Input vocabulary.
# :return: List of tokens.
# """
# return [token for token, token_id in sorted(vocab.items(), key=lambda i: i[1])]
#
# def is_valid_vocab(vocab: Vocab) -> bool:
# """
# Checks if a vocabulary is valid. We define valid as:
# 1. All indices from 0 to num_words - 1 are present without duplicates.
# 2. PAD_SYMBOL has word id 0, UNK_SYMBOL has word id 1, BOS_SYMBOL has word id 2, EOS_SYMBOL has word id 3.
# """
# if vocab[C.PAD_SYMBOL] != C.PAD_ID:
# logger.warning("PAD_SYMBOL does not have word id 0 in vocabulary.")
# return False
# if vocab[C.UNK_SYMBOL] != C.UNK_ID:
# logger.warning("UNK_SYMBOL does not have word id 1 in vocabulary.")
# return False
# if vocab[C.BOS_SYMBOL] != C.BOS_ID:
# logger.warning("BOS_SYMBOL does not have word id 2 in vocabulary.")
# return False
# if vocab[C.EOS_SYMBOL] != C.EOS_ID:
# logger.warning("EOS_SYMBOL does not have word id 3 in vocabulary.")
# return False
# word_ids = []
# for word, word_id in vocab.items():
# word_ids.append(word_id)
# word_ids_set = set(word_ids)
# if len(word_ids_set) != len(word_ids):
# logger.warning("Duplicate word_ids in vocabulary.")
# return False
#
# expected_word_ids = set(range(0, len(vocab)))
# if expected_word_ids != word_ids_set:
# logger.warning("Not all word_ids from 0 to len(vocabulary) present in vocabulary.")
# return False
#
# return True
#
# def _get_sorted_source_vocab_fnames(folder) -> List[str]:
# _key = lambda x: int(x.split('.', 3)[-2])
# return sorted([f for f in os.listdir(folder) if f.startswith(C.VOCAB_SRC_PREFIX)], key=_key)
#
# def count_tokens(data: Iterable[str]) -> Counter:
# """
# Count whitespace delimited tokens.
#
# :param data: Sequence of sentences containing whitespace-delimited tokens.
# :return: Token counter.
# """
# return Counter(token for line in data for token in utils.get_tokens(line))
. Output only the next line. | assert is_valid_vocab(vocab) == expected_result |
Based on the snippet: <|code_start|>def test_get_ordered_tokens_from_vocab(vocab, expected_output):
assert get_ordered_tokens_from_vocab(vocab) == expected_output
@pytest.mark.parametrize(
"vocab, expected_result",
[
({symbol: idx for idx, symbol in enumerate(C.VOCAB_SYMBOLS + ["w1", "w2"])}, True),
# A vocabulary with just the valid symbols doesn't make much sense but is technically valid
({symbol: idx for idx, symbol in enumerate(C.VOCAB_SYMBOLS)}, True),
# Manually specifying the list of required special symbol so that we avoid making a backwards-incompatible
# change by adding a new symbol to C.VOCAB_SYMBOLS
({symbol: idx for idx, symbol in enumerate([C.PAD_SYMBOL, C.UNK_SYMBOL, C.BOS_SYMBOL, C.EOS_SYMBOL])}, True),
# PAD_ID must have word id 0
({symbol: idx for idx, symbol in enumerate(reversed(C.VOCAB_SYMBOLS))}, False),
({symbol: idx for idx, symbol in enumerate(list(reversed(C.VOCAB_SYMBOLS)) + ["w1", "w2"])}, False),
# If there is a gap the vocabulary is not valid:
({symbol: idx if symbol != "w2" else idx + 1 for idx, symbol in enumerate(C.VOCAB_SYMBOLS + ["w1", "w2"])}, False),
# There shouldn't be any duplicate word ids
({symbol: idx if symbol != "w2" else idx - 1 for idx, symbol in enumerate(C.VOCAB_SYMBOLS + ["w1", "w2"])}, False),
]
)
def test_verify_valid_vocab(vocab, expected_result):
assert is_valid_vocab(vocab) == expected_result
def test_get_sorted_source_vocab_fnames():
expected_fnames = [C.VOCAB_SRC_NAME % i for i in [1, 2, 10]]
with mock.patch('os.listdir') as mocked_listdir:
mocked_listdir.return_value = [C.VOCAB_SRC_NAME % i for i in [2, 1, 10]]
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
import sockeye.constants as C
from unittest import mock
from collections import Counter
from sockeye.vocab import (build_vocab, get_ordered_tokens_from_vocab, is_valid_vocab, \
_get_sorted_source_vocab_fnames, count_tokens)
and context (classes, functions, sometimes code) from other files:
# Path: sockeye/vocab.py
# def build_vocab(data: Iterable[str], num_words: Optional[int] = None, min_count: int = 1,
# pad_to_multiple_of: Optional[int] = None) -> Vocab:
# """
# Creates a vocabulary mapping from words to ids. Increasing integer ids are assigned by word frequency,
# using lexical sorting as a tie breaker. The only exception to this are special symbols such as the padding symbol
# (PAD).
#
# :param data: Sequence of sentences containing whitespace-delimited tokens.
# :param num_words: Optional maximum number of words in the vocabulary.
# :param min_count: Minimum occurrences of words to be included in the vocabulary.
# :param pad_to_multiple_of: If not None, pads the vocabulary to a size that is the next multiple of this int.
# :return: Word-to-id mapping.
# """
# raw_vocab = count_tokens(data)
# return build_pruned_vocab(raw_vocab=raw_vocab,
# num_words=num_words,
# min_count=min_count,
# pad_to_multiple_of=pad_to_multiple_of)
#
# def get_ordered_tokens_from_vocab(vocab: Vocab) -> List[str]:
# """
# Returns the list of tokens in a vocabulary, ordered by increasing vocabulary id.
#
# :param vocab: Input vocabulary.
# :return: List of tokens.
# """
# return [token for token, token_id in sorted(vocab.items(), key=lambda i: i[1])]
#
# def is_valid_vocab(vocab: Vocab) -> bool:
# """
# Checks if a vocabulary is valid. We define valid as:
# 1. All indices from 0 to num_words - 1 are present without duplicates.
# 2. PAD_SYMBOL has word id 0, UNK_SYMBOL has word id 1, BOS_SYMBOL has word id 2, EOS_SYMBOL has word id 3.
# """
# if vocab[C.PAD_SYMBOL] != C.PAD_ID:
# logger.warning("PAD_SYMBOL does not have word id 0 in vocabulary.")
# return False
# if vocab[C.UNK_SYMBOL] != C.UNK_ID:
# logger.warning("UNK_SYMBOL does not have word id 1 in vocabulary.")
# return False
# if vocab[C.BOS_SYMBOL] != C.BOS_ID:
# logger.warning("BOS_SYMBOL does not have word id 2 in vocabulary.")
# return False
# if vocab[C.EOS_SYMBOL] != C.EOS_ID:
# logger.warning("EOS_SYMBOL does not have word id 3 in vocabulary.")
# return False
# word_ids = []
# for word, word_id in vocab.items():
# word_ids.append(word_id)
# word_ids_set = set(word_ids)
# if len(word_ids_set) != len(word_ids):
# logger.warning("Duplicate word_ids in vocabulary.")
# return False
#
# expected_word_ids = set(range(0, len(vocab)))
# if expected_word_ids != word_ids_set:
# logger.warning("Not all word_ids from 0 to len(vocabulary) present in vocabulary.")
# return False
#
# return True
#
# def _get_sorted_source_vocab_fnames(folder) -> List[str]:
# _key = lambda x: int(x.split('.', 3)[-2])
# return sorted([f for f in os.listdir(folder) if f.startswith(C.VOCAB_SRC_PREFIX)], key=_key)
#
# def count_tokens(data: Iterable[str]) -> Counter:
# """
# Count whitespace delimited tokens.
#
# :param data: Sequence of sentences containing whitespace-delimited tokens.
# :return: Token counter.
# """
# return Counter(token for line in data for token in utils.get_tokens(line))
. Output only the next line. | fnames = _get_sorted_source_vocab_fnames(None) |
Predict the next line after this snippet: <|code_start|># Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
@pytest.mark.parametrize('learning_rate_warmup,learning_rate_t_scale',
[(1, 1), (3, 2), (10, .5), (20, 1)])
def test_inv_sqrt_decay_scheduler(learning_rate_warmup, learning_rate_t_scale):
<|code_end|>
using the current file's imports:
import pytest
import numpy as np
from sockeye import lr_scheduler
and any relevant context from other files:
# Path: sockeye/lr_scheduler.py
# class LearningRateScheduler:
# class AdaptiveLearningRateScheduler(LearningRateScheduler):
# class LearningRateSchedulerInvSqrtDecay(LearningRateScheduler):
# class LearningRateSchedulerLinearDecay(LearningRateScheduler):
# class LearningRateSchedulerPlateauReduce(AdaptiveLearningRateScheduler):
# def __init__(self, base_lr: float = 1.0, warmup: int = 0, t_scale: float = 1.0) -> None:
# def __call__(self, t):
# def _warmup(self, scaled_t):
# def new_evaluation_result(self, has_improved: bool) -> bool:
# def __call__(self, t: int):
# def __init__(self, base_lr: float, total_steps: int, warmup: int = 0, t_scale: float = 1.0) -> None:
# def __call__(self, t: int):
# def __init__(self, base_lr: float, reduce_factor: float, reduce_num_not_improved: int, warmup: int = 0) -> None:
# def new_evaluation_result(self, has_improved: bool) -> bool:
# def __call__(self, t):
# def __repr__(self):
# def get_lr_scheduler(scheduler_type: str,
# base_learning_rate: float,
# learning_rate_t_scale: float,
# learning_rate_reduce_factor: float,
# learning_rate_reduce_num_not_improved: int,
# learning_rate_warmup: int = 0,
# max_updates: Optional[int] = None) -> Optional[LearningRateScheduler]:
. Output only the next line. | scheduler = lr_scheduler.get_lr_scheduler('inv-sqrt-decay', |
Given snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
RESPONSE_STATUS = Choices(
('OK', 'OK'),
)
class MerchantHttpRequest(HttpRequest):
def __init__(self, merchant, order):
self.merchant = merchant
self.order = order
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
from model_utils import Choices
from simptools.wrappers.http import HttpClient, HttpRequest
from requests.exceptions import ConnectionError
from payway.merchants.models import Merchant
and context:
# Path: payway/merchants/models.py
# class Merchant(RandomUIDAbstractModel):
#
# URL_METHODS = Choices(
# ('POST', 'POST'),
# ('GET', 'GET'),
# )
#
# user = models.ForeignKey(User, related_name='merchants', verbose_name=_('user'))
# name = models.CharField('Имя', max_length=255)
# secret_key = models.CharField(_('secret key'), max_length=50)
#
# result_url = models.URLField(_('result url'))
# result_url_method = models.CharField(_('result url method'), max_length=4, choices=URL_METHODS)
#
# success_url = models.URLField(_('success url'))
# fail_url = models.URLField(_('fail url'))
#
# class Meta:
# verbose_name = 'продавец'
# verbose_name_plural = 'продавцы'
# db_table = 'payway_merchants'
which might include code, classes, or functions. Output only the next line. | if self.merchant.result_url_method == Merchant.URL_METHODS.GET: |
Using the snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class OrderListView(TemplateView):
template_name = 'orders/list.html'
def get(self, request, *args, **kwargs):
orders_page = create_paginated_page(
query_set=Order.objects.filter(user=request.user).order_by("-id"),
page_number=request.GET.get('page') or 1,
<|code_end|>
, determine the next line of code. You have imports:
from django.views.generic.base import TemplateView
from django_simptools.shortcuts import create_paginated_page
from payway.orders.conf.settings import ORDERS_PER_PAGE
from payway.orders.models import Order
and context (class names, function names, or code) available:
# Path: payway/orders/conf/settings.py
# ORDERS_PER_PAGE = getattr(settings, 'ORDERS_PER_PAGE', 10)
#
# Path: payway/orders/models.py
# class Order(TimeStampedModel):
# PAYMENT_STATUS = Choices(
# (False, 'NOT_PAID', _('not paid')),
# (True, 'PAID', _('paid')),
# )
# user = models.ForeignKey(User, related_name='orders', verbose_name=_('user'))
# merchant = models.ForeignKey(Merchant, related_name='orders', verbose_name=_('merchant'))
# uid = models.PositiveIntegerField(unique=False, editable=False)
# sum = fields.MoneyField(_('sum'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES)
# account = models.ForeignKey(Account, related_name='orders', verbose_name=_('account'))
# description = models.CharField(_('description'), max_length=255, blank=True)
# is_paid = models.BooleanField(_('payment status'), default=False, choices=PAYMENT_STATUS)
# objects = ChainableQuerySetManager(ExtendedQuerySet)
#
# class Meta:
# verbose_name = _('order')
# verbose_name_plural = _('orders')
# db_table = 'payway_orders'
#
# def __init__(self, *args, **kwargs):
# account = kwargs.get('account') or None
# if account:
# kwargs.setdefault('user', account.user)
# super(Order, self).__init__(*args, **kwargs)
#
# def set_paid(self, is_paid=False):
# self.is_paid = is_paid
#
# def __unicode__(self):
# return 'Order {0}'.format(self.uid)
. Output only the next line. | objects_per_page=ORDERS_PER_PAGE |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class OrderListView(TemplateView):
template_name = 'orders/list.html'
def get(self, request, *args, **kwargs):
orders_page = create_paginated_page(
<|code_end|>
, predict the next line using imports from the current file:
from django.views.generic.base import TemplateView
from django_simptools.shortcuts import create_paginated_page
from payway.orders.conf.settings import ORDERS_PER_PAGE
from payway.orders.models import Order
and context including class names, function names, and sometimes code from other files:
# Path: payway/orders/conf/settings.py
# ORDERS_PER_PAGE = getattr(settings, 'ORDERS_PER_PAGE', 10)
#
# Path: payway/orders/models.py
# class Order(TimeStampedModel):
# PAYMENT_STATUS = Choices(
# (False, 'NOT_PAID', _('not paid')),
# (True, 'PAID', _('paid')),
# )
# user = models.ForeignKey(User, related_name='orders', verbose_name=_('user'))
# merchant = models.ForeignKey(Merchant, related_name='orders', verbose_name=_('merchant'))
# uid = models.PositiveIntegerField(unique=False, editable=False)
# sum = fields.MoneyField(_('sum'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES)
# account = models.ForeignKey(Account, related_name='orders', verbose_name=_('account'))
# description = models.CharField(_('description'), max_length=255, blank=True)
# is_paid = models.BooleanField(_('payment status'), default=False, choices=PAYMENT_STATUS)
# objects = ChainableQuerySetManager(ExtendedQuerySet)
#
# class Meta:
# verbose_name = _('order')
# verbose_name_plural = _('orders')
# db_table = 'payway_orders'
#
# def __init__(self, *args, **kwargs):
# account = kwargs.get('account') or None
# if account:
# kwargs.setdefault('user', account.user)
# super(Order, self).__init__(*args, **kwargs)
#
# def set_paid(self, is_paid=False):
# self.is_paid = is_paid
#
# def __unicode__(self):
# return 'Order {0}'.format(self.uid)
. Output only the next line. | query_set=Order.objects.filter(user=request.user).order_by("-id"), |
Here is a snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class OrderPaymentViewTestCase(BaseViewTestCase):
fixtures = [
"payway_accounts_auth.json",
"payway_accounts_accounts.json",
"payway_merchants_merchants.json",
"payway_orders_orders.json",
]
def setUp(self):
self.create_new_user_with_account()
self.order = Order(
uid=1234,
sum=Money(10, self.account.currency),
description=u"Тестовый товар",
)
<|code_end|>
. Write the next line using the current file imports:
from django.core.urlresolvers import reverse
from moneyed.classes import Money
from payway.merchants.models import Merchant
from payway.orders.models import Order
from payway.orders.tests import mocks
from payway.utils.tests.base import BaseViewTestCase
and context from other files:
# Path: payway/merchants/models.py
# class Merchant(RandomUIDAbstractModel):
#
# URL_METHODS = Choices(
# ('POST', 'POST'),
# ('GET', 'GET'),
# )
#
# user = models.ForeignKey(User, related_name='merchants', verbose_name=_('user'))
# name = models.CharField('Имя', max_length=255)
# secret_key = models.CharField(_('secret key'), max_length=50)
#
# result_url = models.URLField(_('result url'))
# result_url_method = models.CharField(_('result url method'), max_length=4, choices=URL_METHODS)
#
# success_url = models.URLField(_('success url'))
# fail_url = models.URLField(_('fail url'))
#
# class Meta:
# verbose_name = 'продавец'
# verbose_name_plural = 'продавцы'
# db_table = 'payway_merchants'
#
# Path: payway/orders/models.py
# class Order(TimeStampedModel):
# PAYMENT_STATUS = Choices(
# (False, 'NOT_PAID', _('not paid')),
# (True, 'PAID', _('paid')),
# )
# user = models.ForeignKey(User, related_name='orders', verbose_name=_('user'))
# merchant = models.ForeignKey(Merchant, related_name='orders', verbose_name=_('merchant'))
# uid = models.PositiveIntegerField(unique=False, editable=False)
# sum = fields.MoneyField(_('sum'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES)
# account = models.ForeignKey(Account, related_name='orders', verbose_name=_('account'))
# description = models.CharField(_('description'), max_length=255, blank=True)
# is_paid = models.BooleanField(_('payment status'), default=False, choices=PAYMENT_STATUS)
# objects = ChainableQuerySetManager(ExtendedQuerySet)
#
# class Meta:
# verbose_name = _('order')
# verbose_name_plural = _('orders')
# db_table = 'payway_orders'
#
# def __init__(self, *args, **kwargs):
# account = kwargs.get('account') or None
# if account:
# kwargs.setdefault('user', account.user)
# super(Order, self).__init__(*args, **kwargs)
#
# def set_paid(self, is_paid=False):
# self.is_paid = is_paid
#
# def __unicode__(self):
# return 'Order {0}'.format(self.uid)
#
# Path: payway/orders/tests/mocks.py
# def mock_MerchantHttpClient_success_execute():
# def mock_MerchantHttpClient_fail_execute():
# def set_back_MerchantHttpClient_execute():
# class ResponseMock(object):
#
# Path: payway/utils/tests/base.py
# class BaseViewTestCase(AuthorizedViewTestCase):
#
# _url = ''
#
# def create_new_user_with_account(self):
# password = 'abc123'
# self.user = User.objects.create_user(
# 'user',
# 'user@example.com',
# password,
# )
# self.account = self.user.accounts.create(user=self.user, currency='RUB')
# self.account.add(Money(20, self.account.currency))
# self.client_login(username=self.user.username, password=password)
#
# def assert_url_available(self):
# response = self.client.get(self._url)
# self.failUnlessEqual(response.status_code, 200)
#
# def assert_redirects(self, expected_url):
# response = self.client.get(self._url)
# self.assertRedirects(response, expected_url)
#
# def assert_has_message(self, response, message, level):
# messages_list = CookieStorage(response)._decode(response.cookies['messages'].value)
# self.assertIn(Message(level, message), messages_list)
, which may include functions, classes, or code. Output only the next line. | self.merchant = Merchant.objects.get(uid=972855239) |
Based on the snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class OrderPaymentViewTestCase(BaseViewTestCase):
fixtures = [
"payway_accounts_auth.json",
"payway_accounts_accounts.json",
"payway_merchants_merchants.json",
"payway_orders_orders.json",
]
def setUp(self):
self.create_new_user_with_account()
<|code_end|>
, predict the immediate next line with the help of imports:
from django.core.urlresolvers import reverse
from moneyed.classes import Money
from payway.merchants.models import Merchant
from payway.orders.models import Order
from payway.orders.tests import mocks
from payway.utils.tests.base import BaseViewTestCase
and context (classes, functions, sometimes code) from other files:
# Path: payway/merchants/models.py
# class Merchant(RandomUIDAbstractModel):
#
# URL_METHODS = Choices(
# ('POST', 'POST'),
# ('GET', 'GET'),
# )
#
# user = models.ForeignKey(User, related_name='merchants', verbose_name=_('user'))
# name = models.CharField('Имя', max_length=255)
# secret_key = models.CharField(_('secret key'), max_length=50)
#
# result_url = models.URLField(_('result url'))
# result_url_method = models.CharField(_('result url method'), max_length=4, choices=URL_METHODS)
#
# success_url = models.URLField(_('success url'))
# fail_url = models.URLField(_('fail url'))
#
# class Meta:
# verbose_name = 'продавец'
# verbose_name_plural = 'продавцы'
# db_table = 'payway_merchants'
#
# Path: payway/orders/models.py
# class Order(TimeStampedModel):
# PAYMENT_STATUS = Choices(
# (False, 'NOT_PAID', _('not paid')),
# (True, 'PAID', _('paid')),
# )
# user = models.ForeignKey(User, related_name='orders', verbose_name=_('user'))
# merchant = models.ForeignKey(Merchant, related_name='orders', verbose_name=_('merchant'))
# uid = models.PositiveIntegerField(unique=False, editable=False)
# sum = fields.MoneyField(_('sum'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES)
# account = models.ForeignKey(Account, related_name='orders', verbose_name=_('account'))
# description = models.CharField(_('description'), max_length=255, blank=True)
# is_paid = models.BooleanField(_('payment status'), default=False, choices=PAYMENT_STATUS)
# objects = ChainableQuerySetManager(ExtendedQuerySet)
#
# class Meta:
# verbose_name = _('order')
# verbose_name_plural = _('orders')
# db_table = 'payway_orders'
#
# def __init__(self, *args, **kwargs):
# account = kwargs.get('account') or None
# if account:
# kwargs.setdefault('user', account.user)
# super(Order, self).__init__(*args, **kwargs)
#
# def set_paid(self, is_paid=False):
# self.is_paid = is_paid
#
# def __unicode__(self):
# return 'Order {0}'.format(self.uid)
#
# Path: payway/orders/tests/mocks.py
# def mock_MerchantHttpClient_success_execute():
# def mock_MerchantHttpClient_fail_execute():
# def set_back_MerchantHttpClient_execute():
# class ResponseMock(object):
#
# Path: payway/utils/tests/base.py
# class BaseViewTestCase(AuthorizedViewTestCase):
#
# _url = ''
#
# def create_new_user_with_account(self):
# password = 'abc123'
# self.user = User.objects.create_user(
# 'user',
# 'user@example.com',
# password,
# )
# self.account = self.user.accounts.create(user=self.user, currency='RUB')
# self.account.add(Money(20, self.account.currency))
# self.client_login(username=self.user.username, password=password)
#
# def assert_url_available(self):
# response = self.client.get(self._url)
# self.failUnlessEqual(response.status_code, 200)
#
# def assert_redirects(self, expected_url):
# response = self.client.get(self._url)
# self.assertRedirects(response, expected_url)
#
# def assert_has_message(self, response, message, level):
# messages_list = CookieStorage(response)._decode(response.cookies['messages'].value)
# self.assertIn(Message(level, message), messages_list)
. Output only the next line. | self.order = Order( |
Given snippet: <|code_start|> def setUp(self):
self.create_new_user_with_account()
self.order = Order(
uid=1234,
sum=Money(10, self.account.currency),
description=u"Тестовый товар",
)
self.merchant = Merchant.objects.get(uid=972855239)
self.merchant_account = self.merchant.user.accounts.get(uid=1111132330)
self.GET = self.POST = {
'uid': self.order.uid,
'sum': self.order.sum.amount,
'merchant_uid': self.merchant.uid,
'merchant_account': self.merchant_account.uid,
'description': self.order.description,
}
self.POST.setdefault('account', self.account.uid)
def test_get(self):
response = self.client.get(reverse('orders_payment'), self.GET)
self.failUnlessEqual(response.status_code, 200)
self.assertContainsTextItems(response, self.GET.values() + ['Pay order'])
def test_order_exists(self):
self.GET['uid'] = 1
response = self.client.get(reverse('orders_payment'), self.GET)
self.failUnlessEqual(response.status_code, 200)
self.assertContainsTextItems(response, self.GET.values() + ['Order is paid'])
def test_success_order_payment(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.core.urlresolvers import reverse
from moneyed.classes import Money
from payway.merchants.models import Merchant
from payway.orders.models import Order
from payway.orders.tests import mocks
from payway.utils.tests.base import BaseViewTestCase
and context:
# Path: payway/merchants/models.py
# class Merchant(RandomUIDAbstractModel):
#
# URL_METHODS = Choices(
# ('POST', 'POST'),
# ('GET', 'GET'),
# )
#
# user = models.ForeignKey(User, related_name='merchants', verbose_name=_('user'))
# name = models.CharField('Имя', max_length=255)
# secret_key = models.CharField(_('secret key'), max_length=50)
#
# result_url = models.URLField(_('result url'))
# result_url_method = models.CharField(_('result url method'), max_length=4, choices=URL_METHODS)
#
# success_url = models.URLField(_('success url'))
# fail_url = models.URLField(_('fail url'))
#
# class Meta:
# verbose_name = 'продавец'
# verbose_name_plural = 'продавцы'
# db_table = 'payway_merchants'
#
# Path: payway/orders/models.py
# class Order(TimeStampedModel):
# PAYMENT_STATUS = Choices(
# (False, 'NOT_PAID', _('not paid')),
# (True, 'PAID', _('paid')),
# )
# user = models.ForeignKey(User, related_name='orders', verbose_name=_('user'))
# merchant = models.ForeignKey(Merchant, related_name='orders', verbose_name=_('merchant'))
# uid = models.PositiveIntegerField(unique=False, editable=False)
# sum = fields.MoneyField(_('sum'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES)
# account = models.ForeignKey(Account, related_name='orders', verbose_name=_('account'))
# description = models.CharField(_('description'), max_length=255, blank=True)
# is_paid = models.BooleanField(_('payment status'), default=False, choices=PAYMENT_STATUS)
# objects = ChainableQuerySetManager(ExtendedQuerySet)
#
# class Meta:
# verbose_name = _('order')
# verbose_name_plural = _('orders')
# db_table = 'payway_orders'
#
# def __init__(self, *args, **kwargs):
# account = kwargs.get('account') or None
# if account:
# kwargs.setdefault('user', account.user)
# super(Order, self).__init__(*args, **kwargs)
#
# def set_paid(self, is_paid=False):
# self.is_paid = is_paid
#
# def __unicode__(self):
# return 'Order {0}'.format(self.uid)
#
# Path: payway/orders/tests/mocks.py
# def mock_MerchantHttpClient_success_execute():
# def mock_MerchantHttpClient_fail_execute():
# def set_back_MerchantHttpClient_execute():
# class ResponseMock(object):
#
# Path: payway/utils/tests/base.py
# class BaseViewTestCase(AuthorizedViewTestCase):
#
# _url = ''
#
# def create_new_user_with_account(self):
# password = 'abc123'
# self.user = User.objects.create_user(
# 'user',
# 'user@example.com',
# password,
# )
# self.account = self.user.accounts.create(user=self.user, currency='RUB')
# self.account.add(Money(20, self.account.currency))
# self.client_login(username=self.user.username, password=password)
#
# def assert_url_available(self):
# response = self.client.get(self._url)
# self.failUnlessEqual(response.status_code, 200)
#
# def assert_redirects(self, expected_url):
# response = self.client.get(self._url)
# self.assertRedirects(response, expected_url)
#
# def assert_has_message(self, response, message, level):
# messages_list = CookieStorage(response)._decode(response.cookies['messages'].value)
# self.assertIn(Message(level, message), messages_list)
which might include code, classes, or functions. Output only the next line. | mocks.mock_MerchantHttpClient_success_execute() |
Next line prediction: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
admin.site.register(Purse)
<|code_end|>
. Use current file imports:
(from django.contrib import admin
from payway.webmoney.models import Purse, ResultResponsePayment)
and context including class names, function names, or small code snippets from other files:
# Path: payway/webmoney/models.py
# class Purse(models.Model):
# purse = models.CharField(_('purse'), max_length=13, unique=True)
# secret_key = models.CharField(_('secret key'), max_length=50)
# is_active = models.BooleanField(_('is active'), default=False)
# currency = models.CharField(_('currency'), max_length=3, choices=CURRENCY_CHOICES)
#
# class Meta:
# db_table = 'payway_webmoney_purses'
#
# def __unicode__(self):
# return '%s' % self.purse
#
# class ResultResponsePayment(ResponsePayment):
#
# class Meta:
# db_table = 'payway_webmoney_resultresponsepayment'
#
# payment_system = _('Webmoney')
#
# PAYMENT_MODE = Choices((0, _('real')), (1, _('test')))
#
# LMI_MODE = models.PositiveSmallIntegerField(choices=PAYMENT_MODE)
# payee_purse = models.ForeignKey(Purse, related_name='success_payments')
#
# LMI_SYS_INVS_NO = models.PositiveIntegerField()
# LMI_SYS_TRANS_NO = models.PositiveIntegerField()
# LMI_SYS_TRANS_DATE = models.DateTimeField()
# LMI_PAYMENT_NO = models.PositiveIntegerField()
#
# LMI_PAYER_PURSE = models.CharField(max_length=13)
# LMI_PAYER_WM = models.CharField(max_length=12)
# LMI_HASH = models.CharField(max_length=255)
#
# LMI_CAPITALLER_WMID = models.CharField(max_length=12, blank=True)
# LMI_WMCHECK_NUMBER = models.CharField(max_length=20, blank=True)
# LMI_PAYMER_NUMBER = models.CharField(max_length=30, blank=True)
# LMI_PAYMER_EMAIL = models.EmailField(blank=True)
# LMI_TELEPAT_PHONENUMBER = models.CharField(max_length=30, blank=True)
# LMI_TELEPAT_ORDERID = models.CharField(max_length=30, blank=True)
# LMI_PAYMENT_DESC = models.CharField(max_length=255, blank=True)
# LMI_LANG = models.CharField(max_length=10, blank=True)
. Output only the next line. | admin.site.register(ResultResponsePayment) |
Predict the next line for this snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
CURRENCY_CHOICES = [(CURRENCIES['RUB'].code, CURRENCIES['RUB'].name)]
class Purse(models.Model):
purse = models.CharField(_('purse'), max_length=13, unique=True)
secret_key = models.CharField(_('secret key'), max_length=50)
is_active = models.BooleanField(_('is active'), default=False)
currency = models.CharField(_('currency'), max_length=3, choices=CURRENCY_CHOICES)
class Meta:
db_table = 'payway_webmoney_purses'
def __unicode__(self):
return '%s' % self.purse
<|code_end|>
with the help of current file imports:
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import Choices
from moneyed.classes import CURRENCIES
from payway.accounts.models import ResponsePayment
and context from other files:
# Path: payway/accounts/models.py
# class ResponsePayment(InheritanceCastModel, AbstractPayment):
# """
# Parent response model
# """
# OK_STATUS = Choices((True, 'SUCCESS', _('success')), (False, 'FAIL', _('fail')))
#
# is_OK = models.BooleanField(choices=OK_STATUS, default=OK_STATUS.FAIL)
#
# class Meta:
# db_table = 'payway_response_payments'
#
# def __unicode__(self):
# return u'id:{0} is_ok:{2}'.format(self.id, self.money_amount, self.is_OK)
, which may contain function names, class names, or code. Output only the next line. | class ResultResponsePayment(ResponsePayment): |
Based on the snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
urlpatterns = patterns('',
<|code_end|>
, predict the immediate next line with the help of imports:
from django.conf.urls.defaults import patterns, url
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from payway.webmoney.views import WebmoneyView, WebmoneyResultView, WebmoneySuccessView, WebmoneyFailView
and context (classes, functions, sometimes code) from other files:
# Path: payway/webmoney/views.py
# class WebmoneyView(TemplateView):
# template_name = 'webmoney/add_money_request.html'
#
# def get(self, request, *args, **kwargs):
# context = super(WebmoneyView, self).get_context_data(**kwargs)
# invoice_uid = int(kwargs.get('invoice_uid', 0))
# invoice = get_object_or_404(Invoice, uid=invoice_uid)
# invoice.update_money_amount_with_percent(WEBMONEY_PERCENT)
#
# try:
# context['request_form'] = RequestPaymentForm(initial={'LMI_PAYMENT_AMOUNT': invoice.money_amount.amount,
# 'LMI_PAYMENT_NO': invoice.uid,
# 'LMI_PAYMENT_DESC': render_to_string(
# 'webmoney/payment_description.txt',
# {'invoice_uid': invoice.uid,
# 'user': request.user}
# ).strip()[:255],
# 'LMI_PAYEE_PURSE': Purse.objects.filter(is_active=True, currency=invoice.money_amount.currency)[0]
# })
# except IndexError:
# raise Http404("WebMoney purse number doesn't exists")
# return self.render_to_response(context)
#
# class WebmoneyResultView(View):
#
# def post(self, request, *args, **kwargs):
# form = PrerequestResponsePaymentForm(request.POST)
# if form.is_valid() and form.cleaned_data['LMI_PREREQUEST']:
# invoice_uid = int(form.cleaned_data['LMI_PAYMENT_NO'])
# try:
# Invoice.objects.get(uid=invoice_uid)
# except ObjectDoesNotExist:
# return HttpResponseBadRequest("Invoice with number %s not found." % invoice_uid)
# return HttpResponse("YES")
#
# form = ResultResponsePaymentForm(request.POST)
# if form.is_valid():
# purse = Purse.objects.get(purse=form.cleaned_data['LMI_PAYEE_PURSE'])
#
# key = u"{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}".format(
# purse.purse,
# form.cleaned_data['LMI_PAYMENT_AMOUNT'],
# form.cleaned_data['LMI_PAYMENT_NO'],
# form.cleaned_data['LMI_MODE'],
# form.cleaned_data['LMI_SYS_INVS_NO'],
# form.cleaned_data['LMI_SYS_TRANS_NO'],
# form.cleaned_data['LMI_SYS_TRANS_DATE'].strftime('%Y%m%d %H:%M:%S'),
# purse.secret_key,
# form.cleaned_data['LMI_PAYER_PURSE'],
# form.cleaned_data['LMI_PAYER_WM']
# )
# generated_hash = md5(key).hexdigest().upper()
# if generated_hash == form.cleaned_data['LMI_HASH']:
# form.save()
# else:
# #TODO: log to somewhere
# return HttpResponseBadRequest("Incorrect hash")
# return HttpResponse("OK")
#
# return HttpResponseBadRequest("Unknown error!")
#
# class WebmoneySuccessView(TemplateView):
# template_name = 'webmoney/success.html'
#
# def post(self, request, *args, **kwargs):
# return self.render_to_response(self.get_context(request))
#
# def get(self, request, *args, **kwargs):
# return self.render_to_response(self.get_context(request))
#
# def get_context(self, request):
# context = super(WebmoneySuccessView, self).get_context_data()
# request_data = request.GET or request.POST
# context['LMI_PAYMENT_NO'] = request_data.get('LMI_PAYMENT_NO')
# return context
#
# class WebmoneyFailView(WebmoneySuccessView):
# template_name = 'webmoney/fail.html'
. Output only the next line. | url(r'add-money/(?P<invoice_uid>\d+)/$', login_required(WebmoneyView.as_view()), name='webmoney_add_money'), |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
urlpatterns = patterns('',
url(r'add-money/(?P<invoice_uid>\d+)/$', login_required(WebmoneyView.as_view()), name='webmoney_add_money'),
<|code_end|>
, predict the next line using imports from the current file:
from django.conf.urls.defaults import patterns, url
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from payway.webmoney.views import WebmoneyView, WebmoneyResultView, WebmoneySuccessView, WebmoneyFailView
and context including class names, function names, and sometimes code from other files:
# Path: payway/webmoney/views.py
# class WebmoneyView(TemplateView):
# template_name = 'webmoney/add_money_request.html'
#
# def get(self, request, *args, **kwargs):
# context = super(WebmoneyView, self).get_context_data(**kwargs)
# invoice_uid = int(kwargs.get('invoice_uid', 0))
# invoice = get_object_or_404(Invoice, uid=invoice_uid)
# invoice.update_money_amount_with_percent(WEBMONEY_PERCENT)
#
# try:
# context['request_form'] = RequestPaymentForm(initial={'LMI_PAYMENT_AMOUNT': invoice.money_amount.amount,
# 'LMI_PAYMENT_NO': invoice.uid,
# 'LMI_PAYMENT_DESC': render_to_string(
# 'webmoney/payment_description.txt',
# {'invoice_uid': invoice.uid,
# 'user': request.user}
# ).strip()[:255],
# 'LMI_PAYEE_PURSE': Purse.objects.filter(is_active=True, currency=invoice.money_amount.currency)[0]
# })
# except IndexError:
# raise Http404("WebMoney purse number doesn't exists")
# return self.render_to_response(context)
#
# class WebmoneyResultView(View):
#
# def post(self, request, *args, **kwargs):
# form = PrerequestResponsePaymentForm(request.POST)
# if form.is_valid() and form.cleaned_data['LMI_PREREQUEST']:
# invoice_uid = int(form.cleaned_data['LMI_PAYMENT_NO'])
# try:
# Invoice.objects.get(uid=invoice_uid)
# except ObjectDoesNotExist:
# return HttpResponseBadRequest("Invoice with number %s not found." % invoice_uid)
# return HttpResponse("YES")
#
# form = ResultResponsePaymentForm(request.POST)
# if form.is_valid():
# purse = Purse.objects.get(purse=form.cleaned_data['LMI_PAYEE_PURSE'])
#
# key = u"{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}".format(
# purse.purse,
# form.cleaned_data['LMI_PAYMENT_AMOUNT'],
# form.cleaned_data['LMI_PAYMENT_NO'],
# form.cleaned_data['LMI_MODE'],
# form.cleaned_data['LMI_SYS_INVS_NO'],
# form.cleaned_data['LMI_SYS_TRANS_NO'],
# form.cleaned_data['LMI_SYS_TRANS_DATE'].strftime('%Y%m%d %H:%M:%S'),
# purse.secret_key,
# form.cleaned_data['LMI_PAYER_PURSE'],
# form.cleaned_data['LMI_PAYER_WM']
# )
# generated_hash = md5(key).hexdigest().upper()
# if generated_hash == form.cleaned_data['LMI_HASH']:
# form.save()
# else:
# #TODO: log to somewhere
# return HttpResponseBadRequest("Incorrect hash")
# return HttpResponse("OK")
#
# return HttpResponseBadRequest("Unknown error!")
#
# class WebmoneySuccessView(TemplateView):
# template_name = 'webmoney/success.html'
#
# def post(self, request, *args, **kwargs):
# return self.render_to_response(self.get_context(request))
#
# def get(self, request, *args, **kwargs):
# return self.render_to_response(self.get_context(request))
#
# def get_context(self, request):
# context = super(WebmoneySuccessView, self).get_context_data()
# request_data = request.GET or request.POST
# context['LMI_PAYMENT_NO'] = request_data.get('LMI_PAYMENT_NO')
# return context
#
# class WebmoneyFailView(WebmoneySuccessView):
# template_name = 'webmoney/fail.html'
. Output only the next line. | url(r'result/$', csrf_exempt(WebmoneyResultView.as_view()), name='webmoney_result'), |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
urlpatterns = patterns('',
url(r'add-money/(?P<invoice_uid>\d+)/$', login_required(WebmoneyView.as_view()), name='webmoney_add_money'),
url(r'result/$', csrf_exempt(WebmoneyResultView.as_view()), name='webmoney_result'),
<|code_end|>
, predict the next line using imports from the current file:
from django.conf.urls.defaults import patterns, url
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from payway.webmoney.views import WebmoneyView, WebmoneyResultView, WebmoneySuccessView, WebmoneyFailView
and context including class names, function names, and sometimes code from other files:
# Path: payway/webmoney/views.py
# class WebmoneyView(TemplateView):
# template_name = 'webmoney/add_money_request.html'
#
# def get(self, request, *args, **kwargs):
# context = super(WebmoneyView, self).get_context_data(**kwargs)
# invoice_uid = int(kwargs.get('invoice_uid', 0))
# invoice = get_object_or_404(Invoice, uid=invoice_uid)
# invoice.update_money_amount_with_percent(WEBMONEY_PERCENT)
#
# try:
# context['request_form'] = RequestPaymentForm(initial={'LMI_PAYMENT_AMOUNT': invoice.money_amount.amount,
# 'LMI_PAYMENT_NO': invoice.uid,
# 'LMI_PAYMENT_DESC': render_to_string(
# 'webmoney/payment_description.txt',
# {'invoice_uid': invoice.uid,
# 'user': request.user}
# ).strip()[:255],
# 'LMI_PAYEE_PURSE': Purse.objects.filter(is_active=True, currency=invoice.money_amount.currency)[0]
# })
# except IndexError:
# raise Http404("WebMoney purse number doesn't exists")
# return self.render_to_response(context)
#
# class WebmoneyResultView(View):
#
# def post(self, request, *args, **kwargs):
# form = PrerequestResponsePaymentForm(request.POST)
# if form.is_valid() and form.cleaned_data['LMI_PREREQUEST']:
# invoice_uid = int(form.cleaned_data['LMI_PAYMENT_NO'])
# try:
# Invoice.objects.get(uid=invoice_uid)
# except ObjectDoesNotExist:
# return HttpResponseBadRequest("Invoice with number %s not found." % invoice_uid)
# return HttpResponse("YES")
#
# form = ResultResponsePaymentForm(request.POST)
# if form.is_valid():
# purse = Purse.objects.get(purse=form.cleaned_data['LMI_PAYEE_PURSE'])
#
# key = u"{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}".format(
# purse.purse,
# form.cleaned_data['LMI_PAYMENT_AMOUNT'],
# form.cleaned_data['LMI_PAYMENT_NO'],
# form.cleaned_data['LMI_MODE'],
# form.cleaned_data['LMI_SYS_INVS_NO'],
# form.cleaned_data['LMI_SYS_TRANS_NO'],
# form.cleaned_data['LMI_SYS_TRANS_DATE'].strftime('%Y%m%d %H:%M:%S'),
# purse.secret_key,
# form.cleaned_data['LMI_PAYER_PURSE'],
# form.cleaned_data['LMI_PAYER_WM']
# )
# generated_hash = md5(key).hexdigest().upper()
# if generated_hash == form.cleaned_data['LMI_HASH']:
# form.save()
# else:
# #TODO: log to somewhere
# return HttpResponseBadRequest("Incorrect hash")
# return HttpResponse("OK")
#
# return HttpResponseBadRequest("Unknown error!")
#
# class WebmoneySuccessView(TemplateView):
# template_name = 'webmoney/success.html'
#
# def post(self, request, *args, **kwargs):
# return self.render_to_response(self.get_context(request))
#
# def get(self, request, *args, **kwargs):
# return self.render_to_response(self.get_context(request))
#
# def get_context(self, request):
# context = super(WebmoneySuccessView, self).get_context_data()
# request_data = request.GET or request.POST
# context['LMI_PAYMENT_NO'] = request_data.get('LMI_PAYMENT_NO')
# return context
#
# class WebmoneyFailView(WebmoneySuccessView):
# template_name = 'webmoney/fail.html'
. Output only the next line. | url(r'success/$', login_required(csrf_exempt(WebmoneySuccessView.as_view())), name='webmoney_success'), |
Continue the code snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
urlpatterns = patterns('',
url(r'add-money/(?P<invoice_uid>\d+)/$', login_required(WebmoneyView.as_view()), name='webmoney_add_money'),
url(r'result/$', csrf_exempt(WebmoneyResultView.as_view()), name='webmoney_result'),
url(r'success/$', login_required(csrf_exempt(WebmoneySuccessView.as_view())), name='webmoney_success'),
<|code_end|>
. Use current file imports:
from django.conf.urls.defaults import patterns, url
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from payway.webmoney.views import WebmoneyView, WebmoneyResultView, WebmoneySuccessView, WebmoneyFailView
and context (classes, functions, or code) from other files:
# Path: payway/webmoney/views.py
# class WebmoneyView(TemplateView):
# template_name = 'webmoney/add_money_request.html'
#
# def get(self, request, *args, **kwargs):
# context = super(WebmoneyView, self).get_context_data(**kwargs)
# invoice_uid = int(kwargs.get('invoice_uid', 0))
# invoice = get_object_or_404(Invoice, uid=invoice_uid)
# invoice.update_money_amount_with_percent(WEBMONEY_PERCENT)
#
# try:
# context['request_form'] = RequestPaymentForm(initial={'LMI_PAYMENT_AMOUNT': invoice.money_amount.amount,
# 'LMI_PAYMENT_NO': invoice.uid,
# 'LMI_PAYMENT_DESC': render_to_string(
# 'webmoney/payment_description.txt',
# {'invoice_uid': invoice.uid,
# 'user': request.user}
# ).strip()[:255],
# 'LMI_PAYEE_PURSE': Purse.objects.filter(is_active=True, currency=invoice.money_amount.currency)[0]
# })
# except IndexError:
# raise Http404("WebMoney purse number doesn't exists")
# return self.render_to_response(context)
#
# class WebmoneyResultView(View):
#
# def post(self, request, *args, **kwargs):
# form = PrerequestResponsePaymentForm(request.POST)
# if form.is_valid() and form.cleaned_data['LMI_PREREQUEST']:
# invoice_uid = int(form.cleaned_data['LMI_PAYMENT_NO'])
# try:
# Invoice.objects.get(uid=invoice_uid)
# except ObjectDoesNotExist:
# return HttpResponseBadRequest("Invoice with number %s not found." % invoice_uid)
# return HttpResponse("YES")
#
# form = ResultResponsePaymentForm(request.POST)
# if form.is_valid():
# purse = Purse.objects.get(purse=form.cleaned_data['LMI_PAYEE_PURSE'])
#
# key = u"{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}".format(
# purse.purse,
# form.cleaned_data['LMI_PAYMENT_AMOUNT'],
# form.cleaned_data['LMI_PAYMENT_NO'],
# form.cleaned_data['LMI_MODE'],
# form.cleaned_data['LMI_SYS_INVS_NO'],
# form.cleaned_data['LMI_SYS_TRANS_NO'],
# form.cleaned_data['LMI_SYS_TRANS_DATE'].strftime('%Y%m%d %H:%M:%S'),
# purse.secret_key,
# form.cleaned_data['LMI_PAYER_PURSE'],
# form.cleaned_data['LMI_PAYER_WM']
# )
# generated_hash = md5(key).hexdigest().upper()
# if generated_hash == form.cleaned_data['LMI_HASH']:
# form.save()
# else:
# #TODO: log to somewhere
# return HttpResponseBadRequest("Incorrect hash")
# return HttpResponse("OK")
#
# return HttpResponseBadRequest("Unknown error!")
#
# class WebmoneySuccessView(TemplateView):
# template_name = 'webmoney/success.html'
#
# def post(self, request, *args, **kwargs):
# return self.render_to_response(self.get_context(request))
#
# def get(self, request, *args, **kwargs):
# return self.render_to_response(self.get_context(request))
#
# def get_context(self, request):
# context = super(WebmoneySuccessView, self).get_context_data()
# request_data = request.GET or request.POST
# context['LMI_PAYMENT_NO'] = request_data.get('LMI_PAYMENT_NO')
# return context
#
# class WebmoneyFailView(WebmoneySuccessView):
# template_name = 'webmoney/fail.html'
. Output only the next line. | url(r'fail/$', login_required(csrf_exempt(WebmoneyFailView.as_view())), name='webmoney_fail'), |
Continue the code snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class AddMoneyView(TemplateView):
template_name = 'accounts/add_money.html'
def get(self, request, *args, **kwargs):
account_uid = int(kwargs.get('account_uid', -1))
invoice_uid = int(kwargs.get('invoice_uid', -1))
account = get_object_or_404(Account, uid=account_uid)
invoice, created = Invoice.objects.get_or_create(
uid=invoice_uid,
account=account,
defaults={
'account': account,
<|code_end|>
. Use current file imports:
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateView
from moneyed.classes import Money
from payway.accounts.conf.settings import ADD_MONEY_INITIAL_SUM
from payway.accounts.forms.add_money import AddMoneyForm, PAYMENT_SYSTEM_CHOICES
from payway.accounts.models import Invoice, Account
and context (classes, functions, or code) from other files:
# Path: payway/accounts/conf/settings.py
# ADD_MONEY_INITIAL_SUM = getattr(settings, 'ADD_MONEY_INITIAL_SUM', 10)
#
# Path: payway/accounts/forms/add_money.py
# class AddMoneyForm(forms.Form):
# money_amount = forms.DecimalField(
# label=_('money amount'),
# min_value=MIN_MONEY_VALUE,
# max_digits=MAX_MONEY_DIGITS,
# decimal_places=MAX_MONEY_PLACES,
# )
# invoice_uid = forms.IntegerField(max_value=RandomUIDAbstractModel.MAX_UID, widget=forms.HiddenInput())
#
# payment_system = ChoiceField(
# label=_('payment system'),
# widget=RadioSelect,
# choices=PAYMENT_SYSTEM_CHOICES
# )
#
# PAYMENT_SYSTEM_CHOICES = (
# ('webmoney_add_money', 'Webmoney WMR'),
# ('qiwi_add_money', 'Qiwi RUB'),
# )
#
# Path: payway/accounts/models.py
# class Invoice(RandomUIDAbstractModel, TimeStampedModel):
#
# account = models.ForeignKey(Account, related_name='invoices', verbose_name=_('account'))
# money_amount = fields.MoneyField(_('money amount'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # for transmit
# money_amount_without_percent = fields.MoneyField(_('money amount without percent'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # befor transmit
# objects = ChainableQuerySetManager(InvoiceQuerySet)
#
# class Meta:
# verbose_name = _('invoice')
# verbose_name_plural = _('invoices')
# db_table = 'payway_invoices'
#
# def __init__(self, *args, **kwargs):
# money_amount = kwargs.get('money_amount') or None
# if money_amount:
# kwargs.setdefault('money_amount_without_percent', money_amount)
# super(Invoice, self).__init__(*args, **kwargs)
#
# def get_success_response_payments(self):
# return self.accounts_responsepayment_related.filter(is_OK=ResponsePayment.OK_STATUS.SUCCESS)
#
# def __unicode__(self):
# return u'{0}'.format(self.uid)
#
# def update_money_amount_with_percent(self, percent=0.0):
# self.money_amount_without_percent = self.money_amount
# self.money_amount += round_down(percent % self.money_amount)
# self.save()
#
# class Account(RandomUIDAbstractModel):
# user = models.ForeignKey(User, related_name='accounts', verbose_name=_('user'))
# currency_code = models.CharField(_('currency code'), max_length=3, choices=CURRENCY_CHOICES, default=moneyed.RUB)
#
# class Meta:
# verbose_name = _('account')
# verbose_name_plural = _('accounts')
# permissions = (('can_view_account_report', 'Can view account report'),)
# db_table = 'payway_accounts'
#
# @property
# def currency(self):
# currency = None
# if self.currency_code:
# currency = get_currency(self.currency_code)
# return currency
#
# def __unicode__(self):
# return u"{0} {1}".format(self.uid, self.currency)
#
# def get_balance(self):
# sum = Transaction.objects.filter(account=self, money_amount_currency=self.currency).sum_values()
# return round_down(Money(sum, self.currency))
#
# def withdraw(self, money, allow_overdraft=False):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't withdraw a negative amount")
#
# if not allow_overdraft and (self.get_balance() - money) < Money(0, self.currency):
# raise Overdraft
#
# return Transaction.objects.create(
# account=self,
# money_amount=money * '-1.0',
# )
#
# def add(self, money):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't add a negative money amount")
#
# return Transaction.objects.create(
# account=self,
# money_amount=money,
# )
#
# def transfer(self, money, to_account):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't transfer a negative money amount")
#
# self.withdraw(money)
#
# return Transaction.objects.create(
# account=to_account,
# money_amount=money,
# )
#
# def assert_correct_currency(self, money):
# if money.currency != self.currency:
# raise ValueError("You can't add money with %s currency. Current currency %s" % (money.currency, self.currency))
. Output only the next line. | 'money_amount': Money(ADD_MONEY_INITIAL_SUM, account.currency) |
Using the snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class AddMoneyView(TemplateView):
template_name = 'accounts/add_money.html'
def get(self, request, *args, **kwargs):
account_uid = int(kwargs.get('account_uid', -1))
invoice_uid = int(kwargs.get('invoice_uid', -1))
account = get_object_or_404(Account, uid=account_uid)
invoice, created = Invoice.objects.get_or_create(
uid=invoice_uid,
account=account,
defaults={
'account': account,
'money_amount': Money(ADD_MONEY_INITIAL_SUM, account.currency)
}
)
context = super(AddMoneyView, self).get_context_data(**kwargs)
<|code_end|>
, determine the next line of code. You have imports:
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateView
from moneyed.classes import Money
from payway.accounts.conf.settings import ADD_MONEY_INITIAL_SUM
from payway.accounts.forms.add_money import AddMoneyForm, PAYMENT_SYSTEM_CHOICES
from payway.accounts.models import Invoice, Account
and context (class names, function names, or code) available:
# Path: payway/accounts/conf/settings.py
# ADD_MONEY_INITIAL_SUM = getattr(settings, 'ADD_MONEY_INITIAL_SUM', 10)
#
# Path: payway/accounts/forms/add_money.py
# class AddMoneyForm(forms.Form):
# money_amount = forms.DecimalField(
# label=_('money amount'),
# min_value=MIN_MONEY_VALUE,
# max_digits=MAX_MONEY_DIGITS,
# decimal_places=MAX_MONEY_PLACES,
# )
# invoice_uid = forms.IntegerField(max_value=RandomUIDAbstractModel.MAX_UID, widget=forms.HiddenInput())
#
# payment_system = ChoiceField(
# label=_('payment system'),
# widget=RadioSelect,
# choices=PAYMENT_SYSTEM_CHOICES
# )
#
# PAYMENT_SYSTEM_CHOICES = (
# ('webmoney_add_money', 'Webmoney WMR'),
# ('qiwi_add_money', 'Qiwi RUB'),
# )
#
# Path: payway/accounts/models.py
# class Invoice(RandomUIDAbstractModel, TimeStampedModel):
#
# account = models.ForeignKey(Account, related_name='invoices', verbose_name=_('account'))
# money_amount = fields.MoneyField(_('money amount'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # for transmit
# money_amount_without_percent = fields.MoneyField(_('money amount without percent'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # befor transmit
# objects = ChainableQuerySetManager(InvoiceQuerySet)
#
# class Meta:
# verbose_name = _('invoice')
# verbose_name_plural = _('invoices')
# db_table = 'payway_invoices'
#
# def __init__(self, *args, **kwargs):
# money_amount = kwargs.get('money_amount') or None
# if money_amount:
# kwargs.setdefault('money_amount_without_percent', money_amount)
# super(Invoice, self).__init__(*args, **kwargs)
#
# def get_success_response_payments(self):
# return self.accounts_responsepayment_related.filter(is_OK=ResponsePayment.OK_STATUS.SUCCESS)
#
# def __unicode__(self):
# return u'{0}'.format(self.uid)
#
# def update_money_amount_with_percent(self, percent=0.0):
# self.money_amount_without_percent = self.money_amount
# self.money_amount += round_down(percent % self.money_amount)
# self.save()
#
# class Account(RandomUIDAbstractModel):
# user = models.ForeignKey(User, related_name='accounts', verbose_name=_('user'))
# currency_code = models.CharField(_('currency code'), max_length=3, choices=CURRENCY_CHOICES, default=moneyed.RUB)
#
# class Meta:
# verbose_name = _('account')
# verbose_name_plural = _('accounts')
# permissions = (('can_view_account_report', 'Can view account report'),)
# db_table = 'payway_accounts'
#
# @property
# def currency(self):
# currency = None
# if self.currency_code:
# currency = get_currency(self.currency_code)
# return currency
#
# def __unicode__(self):
# return u"{0} {1}".format(self.uid, self.currency)
#
# def get_balance(self):
# sum = Transaction.objects.filter(account=self, money_amount_currency=self.currency).sum_values()
# return round_down(Money(sum, self.currency))
#
# def withdraw(self, money, allow_overdraft=False):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't withdraw a negative amount")
#
# if not allow_overdraft and (self.get_balance() - money) < Money(0, self.currency):
# raise Overdraft
#
# return Transaction.objects.create(
# account=self,
# money_amount=money * '-1.0',
# )
#
# def add(self, money):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't add a negative money amount")
#
# return Transaction.objects.create(
# account=self,
# money_amount=money,
# )
#
# def transfer(self, money, to_account):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't transfer a negative money amount")
#
# self.withdraw(money)
#
# return Transaction.objects.create(
# account=to_account,
# money_amount=money,
# )
#
# def assert_correct_currency(self, money):
# if money.currency != self.currency:
# raise ValueError("You can't add money with %s currency. Current currency %s" % (money.currency, self.currency))
. Output only the next line. | context['add_money_form'] = AddMoneyForm({ |
Using the snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class AddMoneyView(TemplateView):
template_name = 'accounts/add_money.html'
def get(self, request, *args, **kwargs):
account_uid = int(kwargs.get('account_uid', -1))
invoice_uid = int(kwargs.get('invoice_uid', -1))
account = get_object_or_404(Account, uid=account_uid)
invoice, created = Invoice.objects.get_or_create(
uid=invoice_uid,
account=account,
defaults={
'account': account,
'money_amount': Money(ADD_MONEY_INITIAL_SUM, account.currency)
}
)
context = super(AddMoneyView, self).get_context_data(**kwargs)
context['add_money_form'] = AddMoneyForm({
'money_amount': invoice.money_amount_without_percent.amount,
<|code_end|>
, determine the next line of code. You have imports:
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateView
from moneyed.classes import Money
from payway.accounts.conf.settings import ADD_MONEY_INITIAL_SUM
from payway.accounts.forms.add_money import AddMoneyForm, PAYMENT_SYSTEM_CHOICES
from payway.accounts.models import Invoice, Account
and context (class names, function names, or code) available:
# Path: payway/accounts/conf/settings.py
# ADD_MONEY_INITIAL_SUM = getattr(settings, 'ADD_MONEY_INITIAL_SUM', 10)
#
# Path: payway/accounts/forms/add_money.py
# class AddMoneyForm(forms.Form):
# money_amount = forms.DecimalField(
# label=_('money amount'),
# min_value=MIN_MONEY_VALUE,
# max_digits=MAX_MONEY_DIGITS,
# decimal_places=MAX_MONEY_PLACES,
# )
# invoice_uid = forms.IntegerField(max_value=RandomUIDAbstractModel.MAX_UID, widget=forms.HiddenInput())
#
# payment_system = ChoiceField(
# label=_('payment system'),
# widget=RadioSelect,
# choices=PAYMENT_SYSTEM_CHOICES
# )
#
# PAYMENT_SYSTEM_CHOICES = (
# ('webmoney_add_money', 'Webmoney WMR'),
# ('qiwi_add_money', 'Qiwi RUB'),
# )
#
# Path: payway/accounts/models.py
# class Invoice(RandomUIDAbstractModel, TimeStampedModel):
#
# account = models.ForeignKey(Account, related_name='invoices', verbose_name=_('account'))
# money_amount = fields.MoneyField(_('money amount'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # for transmit
# money_amount_without_percent = fields.MoneyField(_('money amount without percent'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # befor transmit
# objects = ChainableQuerySetManager(InvoiceQuerySet)
#
# class Meta:
# verbose_name = _('invoice')
# verbose_name_plural = _('invoices')
# db_table = 'payway_invoices'
#
# def __init__(self, *args, **kwargs):
# money_amount = kwargs.get('money_amount') or None
# if money_amount:
# kwargs.setdefault('money_amount_without_percent', money_amount)
# super(Invoice, self).__init__(*args, **kwargs)
#
# def get_success_response_payments(self):
# return self.accounts_responsepayment_related.filter(is_OK=ResponsePayment.OK_STATUS.SUCCESS)
#
# def __unicode__(self):
# return u'{0}'.format(self.uid)
#
# def update_money_amount_with_percent(self, percent=0.0):
# self.money_amount_without_percent = self.money_amount
# self.money_amount += round_down(percent % self.money_amount)
# self.save()
#
# class Account(RandomUIDAbstractModel):
# user = models.ForeignKey(User, related_name='accounts', verbose_name=_('user'))
# currency_code = models.CharField(_('currency code'), max_length=3, choices=CURRENCY_CHOICES, default=moneyed.RUB)
#
# class Meta:
# verbose_name = _('account')
# verbose_name_plural = _('accounts')
# permissions = (('can_view_account_report', 'Can view account report'),)
# db_table = 'payway_accounts'
#
# @property
# def currency(self):
# currency = None
# if self.currency_code:
# currency = get_currency(self.currency_code)
# return currency
#
# def __unicode__(self):
# return u"{0} {1}".format(self.uid, self.currency)
#
# def get_balance(self):
# sum = Transaction.objects.filter(account=self, money_amount_currency=self.currency).sum_values()
# return round_down(Money(sum, self.currency))
#
# def withdraw(self, money, allow_overdraft=False):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't withdraw a negative amount")
#
# if not allow_overdraft and (self.get_balance() - money) < Money(0, self.currency):
# raise Overdraft
#
# return Transaction.objects.create(
# account=self,
# money_amount=money * '-1.0',
# )
#
# def add(self, money):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't add a negative money amount")
#
# return Transaction.objects.create(
# account=self,
# money_amount=money,
# )
#
# def transfer(self, money, to_account):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't transfer a negative money amount")
#
# self.withdraw(money)
#
# return Transaction.objects.create(
# account=to_account,
# money_amount=money,
# )
#
# def assert_correct_currency(self, money):
# if money.currency != self.currency:
# raise ValueError("You can't add money with %s currency. Current currency %s" % (money.currency, self.currency))
. Output only the next line. | 'payment_system': dict(PAYMENT_SYSTEM_CHOICES).keys()[0], |
Continue the code snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class AddMoneyView(TemplateView):
template_name = 'accounts/add_money.html'
def get(self, request, *args, **kwargs):
account_uid = int(kwargs.get('account_uid', -1))
invoice_uid = int(kwargs.get('invoice_uid', -1))
account = get_object_or_404(Account, uid=account_uid)
<|code_end|>
. Use current file imports:
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateView
from moneyed.classes import Money
from payway.accounts.conf.settings import ADD_MONEY_INITIAL_SUM
from payway.accounts.forms.add_money import AddMoneyForm, PAYMENT_SYSTEM_CHOICES
from payway.accounts.models import Invoice, Account
and context (classes, functions, or code) from other files:
# Path: payway/accounts/conf/settings.py
# ADD_MONEY_INITIAL_SUM = getattr(settings, 'ADD_MONEY_INITIAL_SUM', 10)
#
# Path: payway/accounts/forms/add_money.py
# class AddMoneyForm(forms.Form):
# money_amount = forms.DecimalField(
# label=_('money amount'),
# min_value=MIN_MONEY_VALUE,
# max_digits=MAX_MONEY_DIGITS,
# decimal_places=MAX_MONEY_PLACES,
# )
# invoice_uid = forms.IntegerField(max_value=RandomUIDAbstractModel.MAX_UID, widget=forms.HiddenInput())
#
# payment_system = ChoiceField(
# label=_('payment system'),
# widget=RadioSelect,
# choices=PAYMENT_SYSTEM_CHOICES
# )
#
# PAYMENT_SYSTEM_CHOICES = (
# ('webmoney_add_money', 'Webmoney WMR'),
# ('qiwi_add_money', 'Qiwi RUB'),
# )
#
# Path: payway/accounts/models.py
# class Invoice(RandomUIDAbstractModel, TimeStampedModel):
#
# account = models.ForeignKey(Account, related_name='invoices', verbose_name=_('account'))
# money_amount = fields.MoneyField(_('money amount'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # for transmit
# money_amount_without_percent = fields.MoneyField(_('money amount without percent'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # befor transmit
# objects = ChainableQuerySetManager(InvoiceQuerySet)
#
# class Meta:
# verbose_name = _('invoice')
# verbose_name_plural = _('invoices')
# db_table = 'payway_invoices'
#
# def __init__(self, *args, **kwargs):
# money_amount = kwargs.get('money_amount') or None
# if money_amount:
# kwargs.setdefault('money_amount_without_percent', money_amount)
# super(Invoice, self).__init__(*args, **kwargs)
#
# def get_success_response_payments(self):
# return self.accounts_responsepayment_related.filter(is_OK=ResponsePayment.OK_STATUS.SUCCESS)
#
# def __unicode__(self):
# return u'{0}'.format(self.uid)
#
# def update_money_amount_with_percent(self, percent=0.0):
# self.money_amount_without_percent = self.money_amount
# self.money_amount += round_down(percent % self.money_amount)
# self.save()
#
# class Account(RandomUIDAbstractModel):
# user = models.ForeignKey(User, related_name='accounts', verbose_name=_('user'))
# currency_code = models.CharField(_('currency code'), max_length=3, choices=CURRENCY_CHOICES, default=moneyed.RUB)
#
# class Meta:
# verbose_name = _('account')
# verbose_name_plural = _('accounts')
# permissions = (('can_view_account_report', 'Can view account report'),)
# db_table = 'payway_accounts'
#
# @property
# def currency(self):
# currency = None
# if self.currency_code:
# currency = get_currency(self.currency_code)
# return currency
#
# def __unicode__(self):
# return u"{0} {1}".format(self.uid, self.currency)
#
# def get_balance(self):
# sum = Transaction.objects.filter(account=self, money_amount_currency=self.currency).sum_values()
# return round_down(Money(sum, self.currency))
#
# def withdraw(self, money, allow_overdraft=False):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't withdraw a negative amount")
#
# if not allow_overdraft and (self.get_balance() - money) < Money(0, self.currency):
# raise Overdraft
#
# return Transaction.objects.create(
# account=self,
# money_amount=money * '-1.0',
# )
#
# def add(self, money):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't add a negative money amount")
#
# return Transaction.objects.create(
# account=self,
# money_amount=money,
# )
#
# def transfer(self, money, to_account):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't transfer a negative money amount")
#
# self.withdraw(money)
#
# return Transaction.objects.create(
# account=to_account,
# money_amount=money,
# )
#
# def assert_correct_currency(self, money):
# if money.currency != self.currency:
# raise ValueError("You can't add money with %s currency. Current currency %s" % (money.currency, self.currency))
. Output only the next line. | invoice, created = Invoice.objects.get_or_create( |
Next line prediction: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
class AddMoneyView(TemplateView):
template_name = 'accounts/add_money.html'
def get(self, request, *args, **kwargs):
account_uid = int(kwargs.get('account_uid', -1))
invoice_uid = int(kwargs.get('invoice_uid', -1))
<|code_end|>
. Use current file imports:
(from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateView
from moneyed.classes import Money
from payway.accounts.conf.settings import ADD_MONEY_INITIAL_SUM
from payway.accounts.forms.add_money import AddMoneyForm, PAYMENT_SYSTEM_CHOICES
from payway.accounts.models import Invoice, Account)
and context including class names, function names, or small code snippets from other files:
# Path: payway/accounts/conf/settings.py
# ADD_MONEY_INITIAL_SUM = getattr(settings, 'ADD_MONEY_INITIAL_SUM', 10)
#
# Path: payway/accounts/forms/add_money.py
# class AddMoneyForm(forms.Form):
# money_amount = forms.DecimalField(
# label=_('money amount'),
# min_value=MIN_MONEY_VALUE,
# max_digits=MAX_MONEY_DIGITS,
# decimal_places=MAX_MONEY_PLACES,
# )
# invoice_uid = forms.IntegerField(max_value=RandomUIDAbstractModel.MAX_UID, widget=forms.HiddenInput())
#
# payment_system = ChoiceField(
# label=_('payment system'),
# widget=RadioSelect,
# choices=PAYMENT_SYSTEM_CHOICES
# )
#
# PAYMENT_SYSTEM_CHOICES = (
# ('webmoney_add_money', 'Webmoney WMR'),
# ('qiwi_add_money', 'Qiwi RUB'),
# )
#
# Path: payway/accounts/models.py
# class Invoice(RandomUIDAbstractModel, TimeStampedModel):
#
# account = models.ForeignKey(Account, related_name='invoices', verbose_name=_('account'))
# money_amount = fields.MoneyField(_('money amount'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # for transmit
# money_amount_without_percent = fields.MoneyField(_('money amount without percent'), max_digits=MAX_MONEY_DIGITS, decimal_places=MAX_MONEY_PLACES) # befor transmit
# objects = ChainableQuerySetManager(InvoiceQuerySet)
#
# class Meta:
# verbose_name = _('invoice')
# verbose_name_plural = _('invoices')
# db_table = 'payway_invoices'
#
# def __init__(self, *args, **kwargs):
# money_amount = kwargs.get('money_amount') or None
# if money_amount:
# kwargs.setdefault('money_amount_without_percent', money_amount)
# super(Invoice, self).__init__(*args, **kwargs)
#
# def get_success_response_payments(self):
# return self.accounts_responsepayment_related.filter(is_OK=ResponsePayment.OK_STATUS.SUCCESS)
#
# def __unicode__(self):
# return u'{0}'.format(self.uid)
#
# def update_money_amount_with_percent(self, percent=0.0):
# self.money_amount_without_percent = self.money_amount
# self.money_amount += round_down(percent % self.money_amount)
# self.save()
#
# class Account(RandomUIDAbstractModel):
# user = models.ForeignKey(User, related_name='accounts', verbose_name=_('user'))
# currency_code = models.CharField(_('currency code'), max_length=3, choices=CURRENCY_CHOICES, default=moneyed.RUB)
#
# class Meta:
# verbose_name = _('account')
# verbose_name_plural = _('accounts')
# permissions = (('can_view_account_report', 'Can view account report'),)
# db_table = 'payway_accounts'
#
# @property
# def currency(self):
# currency = None
# if self.currency_code:
# currency = get_currency(self.currency_code)
# return currency
#
# def __unicode__(self):
# return u"{0} {1}".format(self.uid, self.currency)
#
# def get_balance(self):
# sum = Transaction.objects.filter(account=self, money_amount_currency=self.currency).sum_values()
# return round_down(Money(sum, self.currency))
#
# def withdraw(self, money, allow_overdraft=False):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't withdraw a negative amount")
#
# if not allow_overdraft and (self.get_balance() - money) < Money(0, self.currency):
# raise Overdraft
#
# return Transaction.objects.create(
# account=self,
# money_amount=money * '-1.0',
# )
#
# def add(self, money):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't add a negative money amount")
#
# return Transaction.objects.create(
# account=self,
# money_amount=money,
# )
#
# def transfer(self, money, to_account):
# self.assert_correct_currency(money)
#
# if money < Money(0, self.currency):
# raise ValueError("You can't transfer a negative money amount")
#
# self.withdraw(money)
#
# return Transaction.objects.create(
# account=to_account,
# money_amount=money,
# )
#
# def assert_correct_currency(self, money):
# if money.currency != self.currency:
# raise ValueError("You can't add money with %s currency. Current currency %s" % (money.currency, self.currency))
. Output only the next line. | account = get_object_or_404(Account, uid=account_uid) |
Using the snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
TNX_ID_RE = re.compile(ur'^\w{1,30}$')
class RequestPaymentForm(forms.Form):
txn_id = forms.RegexField(regex=TNX_ID_RE, label=_(u'unique payment number'), widget=forms.HiddenInput())
to = forms.IntegerField(
label=_('10 digits phone number'),
max_value=9999999999,
widget=forms.TextInput(attrs={'maxlength':'10'})
)
summ = forms.DecimalField(
<|code_end|>
, determine the next line of code. You have imports:
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from payway.accounts.models import MAX_MONEY_DIGITS, MAX_MONEY_PLACES
from payway.qiwi.conf.settings import QIWI_PERCENT
and context (class names, function names, or code) available:
# Path: payway/accounts/models.py
# MAX_MONEY_DIGITS = 20
#
# MAX_MONEY_PLACES = 2
#
# Path: payway/qiwi/conf/settings.py
# QIWI_PERCENT = getattr(settings, 'QIWI_PERCENT', 4.0)
. Output only the next line. | max_digits=MAX_MONEY_DIGITS, |
Predict the next line for this snippet: <|code_start|># -*- coding: UTF-8 -*-
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
TNX_ID_RE = re.compile(ur'^\w{1,30}$')
class RequestPaymentForm(forms.Form):
txn_id = forms.RegexField(regex=TNX_ID_RE, label=_(u'unique payment number'), widget=forms.HiddenInput())
to = forms.IntegerField(
label=_('10 digits phone number'),
max_value=9999999999,
widget=forms.TextInput(attrs={'maxlength':'10'})
)
summ = forms.DecimalField(
max_digits=MAX_MONEY_DIGITS,
<|code_end|>
with the help of current file imports:
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from payway.accounts.models import MAX_MONEY_DIGITS, MAX_MONEY_PLACES
from payway.qiwi.conf.settings import QIWI_PERCENT
and context from other files:
# Path: payway/accounts/models.py
# MAX_MONEY_DIGITS = 20
#
# MAX_MONEY_PLACES = 2
#
# Path: payway/qiwi/conf/settings.py
# QIWI_PERCENT = getattr(settings, 'QIWI_PERCENT', 4.0)
, which may contain function names, class names, or code. Output only the next line. | decimal_places=MAX_MONEY_PLACES, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.