Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Based on the snippet: <|code_start|># For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributed(Dense(INPUT_DIMENSION_SIZE+2)))
# model.add(Activation('softmax'))
# model.add(Activation('hard_sigmoid'))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
# loss='mse',
optimizer='adam',
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_associative_recall.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
for iteration in range(1, 200):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, predict the immediate next line with the help of imports:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from algorithm_learning.util import LossHistory # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven Robot
import time # Add by Steven Robot
import os # Add by Steven Robot
and context (classes, functions, sometimes code) from other files:
# Path: algorithm_learning/util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
. Output only the next line. | history = LossHistory() |
Given snippet: <|code_start|>epsilon = 1e-8
ADAM_ = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
model.compile(loss='binary_crossentropy',
# loss='mse',
# optimizer='adam',
optimizer=ADAM_,
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_associative_recall.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
losses = []
acces = []
for iteration in range(1, 2):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from algorithm_learning.util import LossHistory # Add by Steven Robot
from keras.optimizers import Adam # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven Robot
import time # Add by Steven Robot
import os # Add by Steven Robot
import sys # Add by Steven Robot
and context:
# Path: algorithm_learning/util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
which might include code, classes, or functions. Output only the next line. | history = LossHistory() |
Based on the snippet: <|code_start|># compile the model
model.compile(loss='binary_crossentropy',
# loss='mse',
# optimizer='adam',
optimizer=ADAM_,
metrics=['accuracy'])
# show the information of the model
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_associative_recall.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
# begain training
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
losses = []
acces = []
for iteration in range(1, 3):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, predict the immediate next line with the help of imports:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, recurrent
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.utils.visualize_util import plot # Add by Steven Robot
from keras.optimizers import Adam # Add by Steven Robot
from util import LossHistory # Add by Steven Robot
from keras.callbacks import LambdaCallback # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import time # Add by Steven Robot
import os # Add by Steven Robot
import sys # Add by Steven Robot
import matplotlib.pyplot as plt
import visualization
and context (classes, functions, sometimes code) from other files:
# Path: util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
. Output only the next line. | history = LossHistory() |
Based on the snippet: <|code_start|>model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
# loss='mse',
optimizer='adam',
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_repeat_copying.png")
print("Model summary")
print(model.summary())
print("Model config")
print(model.get_config())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
losses = []
acces = []
for iteration in range(1, 3):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, predict the immediate next line with the help of imports:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven Robot
from keras.layers import Merge # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from util import LossHistory # Add by Steven Robot
from keras_tqdm import TQDMNotebookCallback # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven Robot
import time # Add by Steven Robot
import time # Add by Steven Robot
import os # Add by Steven Robot
import sys # Add by Steven Robot
and context (classes, functions, sometimes code) from other files:
# Path: util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
. Output only the next line. | history = LossHistory() |
Given the code snippet: <|code_start|>model.add(TimeDistributed(Dense(INPUT_DIMENSION_SIZE)))
# model.add(Activation('softmax'))
# model.add(Activation('hard_sigmoid'))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
# loss='mse',
optimizer='adam',
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"simple_rnn_for_copying.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
for iteration in range(1, 200):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, generate the next line using the imports in this file:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from algorithm_learning.util import LossHistory # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven Robot
import time # Add by Steven Robot
import os # Add by Steven Robot
and context (functions, classes, or occasionally code) from other files:
# Path: algorithm_learning/util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
. Output only the next line. | history = LossHistory() |
Given the following code snippet before the placeholder: <|code_start|>model.add(TimeDistributed(Dense(INPUT_DIMENSION_SIZE+2)))
# model.add(Activation('softmax'))
# model.add(Activation('hard_sigmoid'))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
#loss='mse',
#loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_priority_sort.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
for iteration in range(1, 200):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, predict the next line using imports from the current file:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven
from keras.layers import Merge # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from algorithm_learning.util import LossHistory # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven
import time # Add by Steven Robot
import os
and context including class names, function names, and sometimes code from other files:
# Path: algorithm_learning/util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
. Output only the next line. | history = LossHistory() |
Using the snippet: <|code_start|># For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributed(Dense(INPUT_DIMENSION_SIZE)))
# model.add(Activation('softmax'))
# model.add(Activation('hard_sigmoid'))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
# loss='mse',
optimizer='adam',
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_repeat_copying.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
for iteration in range(1, 200):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, determine the next line of code. You have imports:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven Robot
from keras.layers import Merge # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from algorithm_learning.util import LossHistory # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven Robot
import time # Add by Steven Robot
import os # Add by Steven Robot
and context (class names, function names, or code) available:
# Path: algorithm_learning/util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
. Output only the next line. | history = LossHistory() |
Predict the next line for this snippet: <|code_start|>model.add(TimeDistributed(Dense(INPUT_DIMENSION_SIZE+1)))
model.add(Activation('softmax'))
# model.add(Activation('hard_sigmoid'))
# model.add(Activation('sigmoid'))
model.compile(#loss='binary_crossentropy',
# loss='mse',
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_n_gram.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
for iteration in range(1, 200):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
with the help of current file imports:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven
from keras.layers import Merge # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from algorithm_learning.util import LossHistory # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven
import time # Add by Steven Robot
import cPickle as pickle
import random
import os
and context from other files:
# Path: algorithm_learning/util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
, which may contain function names, class names, or code. Output only the next line. | history = LossHistory() |
Given the following code snippet before the placeholder: <|code_start|># model.add(TimeDistributed(Dense(INPUT_DIMENSION_SIZE+2)))
# # model.add(Activation('softmax'))
# # model.add(Activation('hard_sigmoid'))
# model.add(Activation('sigmoid'))
print('Compile...')
model.compile(loss='binary_crossentropy',
# loss='mse',
optimizer='adam',
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_associative_recall.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
for iteration in range(1, 200):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, predict the next line using imports from the current file:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from util import LossHistory # Add by Steven Robot
from keras.layers import Input, Dense
from keras.models import Model
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven Robot
import time # Add by Steven Robot
import os # Add by Steven Robot
import ntm # Add by Steven Robot
and context including class names, function names, and sometimes code from other files:
# Path: util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
. Output only the next line. | history = LossHistory() |
Given the following code snippet before the placeholder: <|code_start|># For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributed(Dense(INPUT_DIMENSION_SIZE+2)))
# model.add(Activation('softmax'))
# model.add(Activation('hard_sigmoid'))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
# loss='mse',
optimizer='adam',
metrics=['accuracy'])
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER+"lstm_associative_recall.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())
print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
for iteration in range(1, 200):
print()
print('-' * 78)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print('Iteration', iteration)
<|code_end|>
, predict the next line using imports from the current file:
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
from keras.utils.visualize_util import plot # Add by Steven Robot
from keras.callbacks import ModelCheckpoint # Add by Steven Robot
from keras.callbacks import Callback # Add by Steven Robot
from util import LossHistory # Add by Steven Robot
import numpy as np
import dataset # Add by Steven Robot
import visualization # Add by Steven Robot
import time # Add by Steven Robot
import os # Add by Steven Robot
import lstm2ntm # Add by Steven Robot
and context including class names, function names, and sometimes code from other files:
# Path: util.py
# class LossHistory(Callback):
# def on_train_begin(self, logs={}):
# self.losses = []
# self.acces = []
#
# def on_batch_end(self, batch, logs={}):
# self.losses.append(logs.get('loss'))
# self.acces.append(logs.get('acc'))
. Output only the next line. | history = LossHistory() |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/preferencesDialog.ui'
#
# Created: Sat Jul 25 12:17:10 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_CanardPreferencesDialog(object):
def setupUi(self, CanardPreferencesDialog):
CanardPreferencesDialog.setObjectName(_fromUtf8("CanardPreferencesDialog"))
CanardPreferencesDialog.resize(512, 375)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/actions/preferences-system-2.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
CanardPreferencesDialog.setWindowIcon(icon)
self.verticalLayout = QtGui.QVBoxLayout(CanardPreferencesDialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
<|code_end|>
, predict the next line using imports from the current file:
from PyQt4 import QtCore, QtGui
from SQBLWidgets.FingerTabWidget import FingerTabWidget
import canard_rc
and context including class names, function names, and sometimes code from other files:
# Path: SQBLWidgets/FingerTabWidget.py
# class FingerTabWidget(QtGui.QTabWidget):
# """A QTabWidget equivalent which uses our FingerTabBarWidget"""
# def __init__(self, parent, **kwargs):
# QtGui.QTabWidget.__init__(self, parent, **kwargs)
# self.setTabBar(FingerTabBarWidget(self))
# self.tabBar().tabLayoutChange()
. Output only the next line. | self.tabWidget = FingerTabWidget(CanardPreferencesDialog) |
Predict the next line for this snippet: <|code_start|>
def createURL(self):
repository = self.repositoryURLs.currentText()
searchTerms = self.searchTerms.text()
if self.objectType.isVisible():
objectType = self.objectType.currentText()
else:
objectType = self.objectTypeValue
return "%s/%s/?s=%s"%(repository,objectType,searchTerms)
def updateDesc(self):
self.selected = self.retrievedItems.currentRow()
current = self.items[self.selected]
self.currentURI = current.get('uri')
name = current.xpath("t:TextComponent[@xml:lang='%s']/t:Name"%self.language,namespaces=_namespaces)[0].text
desc = current.xpath("t:TextComponent[@xml:lang='%s']/t:Description"%self.language,namespaces=_namespaces)[0].text
text = "<b><a href='{url}'>{name}</a></b><p>{desc}</p>".format(
url = current.get('additionalInformation'),
name = name,
desc = desc
)
self.itemDetails.setText(text)
def getValues(self):
# Add Object to the cache
repository = self.repositoryURLs.currentText()
if self.objectType.isVisible():
objectType = self.objectType.currentText()
else:
objectType = self.objectTypeValue
<|code_end|>
with the help of current file imports:
import sqblUI
import isoLangCodes
import logging
import urllib2
import os, sys
from PyQt4 import QtCore, QtGui
from SQBLmodel import _ns, _namespaces
from lxml import etree
from Canard_settings import TUSLObjectCache as cache
and context from other files:
# Path: Canard_settings.py
# class TUSLCache:
# def __init__(self):
# def __del__(self):
# def setProperty(self,uri,obj):
# def getProperty(self,uri=None):
# def setObjectClass(self,uri,obj):
# def getObjectClass(self,uri=None):
# def insertCache(self,uri,objType,obj):
# def retrieveCache(self,uri,objType):
# def getPref(key,default=None):
# def setPref(key,value):
, which may contain function names, class names, or code. Output only the next line. | cache.insertCache(uri=self.currentURI,objType=objectType,obj=etree.tostring(self.items[self.selected])) |
Using the snippet: <|code_start|> if node[-1]:
params = [s.str for s in node[-1]]
params = ", ".join(params) + ", "
else:
params = ""
returns = [s.str for s in node[:-1]]
returns = ", ".join(returns)
return "%(name)s(" + params + returns + ") ;"
def Returns(node):
"""Return value in function definition with zero or multiple returns
Adds type prefix and '&' (since they are to be placed in parameters)
Contains: Return*
Examples:
>>> print(matlab2cpp.qscript("function [a,b]=f(); a=1, b=2."))
void f(int& a, double& b)
{
a = 1 ;
b = 2. ;
}
"""
out = ""
for child in node[:]:
<|code_end|>
, determine the next line of code. You have imports:
import matlab2cpp
import doctest
from .function import type_string
from .variables import Get
and context (class names, function names, or code) available:
# Path: src/matlab2cpp/rules/function.py
# def type_string(node):
# """
# Determine string represnentation of type.
#
# Outside scalars and armadillo, the datatype name and their declaration do not
# match. This function converts simple datatype declaration and translate them to
# equivalent C++ declarations.
#
# +-----------------+-----------------------+
# | Input | Output |
# +=================+=======================+
# | numerical types | node.type |
# +-----------------+-----------------------+
# | struct, structs | struct container name |
# +-----------------+-----------------------+
# | func_lambda | std::function<...> |
# +-----------------+-----------------------+
# | string | std::string |
# +-----------------+-----------------------+
#
# Args:
# node (Node): location in tree
# Returns:
# str: String representation of node type
# """
#
# # lambda-function
# if node.type == "func_lambda":
#
# # link to actual lambda-function
# func = None
#
# if hasattr(node.declare, "reference"):
# func = node.declare.reference
#
# elif "_"+node.name in node.program[1].names:
# func = node.program[1]["_"+node.name]
#
# if not (func is None):
#
# # no returns in lambda
# if len(func[1]) == 0:
# ret = "void"
# prm = ", ".join([p.type for p in func[2]])
#
# # single return
# elif len(func[1]) == 1:
# ret = func[1][0].type
# prm = ", ".join([p.type for p in func[2]])
#
# # multiple return
# else:
# ret = "void"
# prm = ", ".join([p.type for p in func[2][:]+func[1][:]])
#
# return "std::function<" + ret + "(" + prm + ")>"
#
# else:
# node.warning("lambda function content not found")
# return "std::function"
#
# # struct scalar and array type
# elif node.type in ("struct", "structs"):
# declare = node.declare
# if declare.parent.cls == "Struct":
# declare = declare.parent
# return "_" + declare.name.capitalize()
#
# elif node.type == "string":
# return "std::string"
#
# return node.type
#
# Path: src/matlab2cpp/rules/variables.py
# def Get(node):
# return "%(name)s(", ", ", ")"
. Output only the next line. | out += ", " + type_string(child) + "& " + str(child) |
Given snippet: <|code_start|>%(3)s
}"""
return """void %(name)s(%(2)s%(1)s)
{
%(3)s
}"""
def Var(node):
"""Function call as variable
Writing a function as if a variable, is equivalent to calling the function
without arguments.
Property: name (of variable)
Examples:
>>> print(matlab2cpp.qscript("function f(); end; function g(); f"))
void f()
{
// Empty block
}
<BLANKLINE>
void g()
{
f() ;
}
"""
# push the job over to Get
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import matlab2cpp
import doctest
from .function import type_string
from .variables import Get
and context:
# Path: src/matlab2cpp/rules/function.py
# def type_string(node):
# """
# Determine string represnentation of type.
#
# Outside scalars and armadillo, the datatype name and their declaration do not
# match. This function converts simple datatype declaration and translate them to
# equivalent C++ declarations.
#
# +-----------------+-----------------------+
# | Input | Output |
# +=================+=======================+
# | numerical types | node.type |
# +-----------------+-----------------------+
# | struct, structs | struct container name |
# +-----------------+-----------------------+
# | func_lambda | std::function<...> |
# +-----------------+-----------------------+
# | string | std::string |
# +-----------------+-----------------------+
#
# Args:
# node (Node): location in tree
# Returns:
# str: String representation of node type
# """
#
# # lambda-function
# if node.type == "func_lambda":
#
# # link to actual lambda-function
# func = None
#
# if hasattr(node.declare, "reference"):
# func = node.declare.reference
#
# elif "_"+node.name in node.program[1].names:
# func = node.program[1]["_"+node.name]
#
# if not (func is None):
#
# # no returns in lambda
# if len(func[1]) == 0:
# ret = "void"
# prm = ", ".join([p.type for p in func[2]])
#
# # single return
# elif len(func[1]) == 1:
# ret = func[1][0].type
# prm = ", ".join([p.type for p in func[2]])
#
# # multiple return
# else:
# ret = "void"
# prm = ", ".join([p.type for p in func[2][:]+func[1][:]])
#
# return "std::function<" + ret + "(" + prm + ")>"
#
# else:
# node.warning("lambda function content not found")
# return "std::function"
#
# # struct scalar and array type
# elif node.type in ("struct", "structs"):
# declare = node.declare
# if declare.parent.cls == "Struct":
# declare = declare.parent
# return "_" + declare.name.capitalize()
#
# elif node.type == "string":
# return "std::string"
#
# return node.type
#
# Path: src/matlab2cpp/rules/variables.py
# def Get(node):
# return "%(name)s(", ", ", ")"
which might include code, classes, or functions. Output only the next line. | return Get(node) |
Here is a snippet: <|code_start|>
def vec(node):
if len(node) != 1:
if not len(node):
return
elif len(node) == 2 and node[1].cls == "Int" and node[1].value == "1":
pass
else:
return
<|code_end|>
. Write the next line using the current file imports:
from ..rules import armadillo as arma
and context from other files:
# Path: src/matlab2cpp/rules/armadillo.py
# def configure_arg(node, index):
# def scalar_assign(node):
# def include(node):
, which may include functions, classes, or code. Output only the next line. | arg, dim = arma.configure_arg(node[0], 0) |
Predict the next line for this snippet: <|code_start|> The RPCServer thread provides an API for external programs to interact
with MOM.
"""
def __init__(self, config, momFuncs):
threading.Thread.__init__(self, name="RPCServer")
self.setDaemon(True)
self.config = config
self.momFuncs = momFuncs
self.logger = logging.getLogger('mom.RPCServer')
self.server = None
self.start()
def thread_ok(self):
if self.server is None:
return True
return self.is_alive()
def create_server(self):
try:
unix_port = None
port = self.config.getint('main', 'rpc-port')
except ValueError:
port = None
unix_port = self.config.get('main', 'rpc-port')
self.logger.info("Using unix socket "+unix_port)
if unix_port is None and (port is None or port < 0):
return None
if unix_port:
<|code_end|>
with the help of current file imports:
import threading
import six
from six.moves.xmlrpc_client import Marshaller
from six.moves.xmlrpc_server import SimpleXMLRPCServer
from six.moves.xmlrpc_server import SimpleXMLRPCRequestHandler
from .unixrpc import UnixXmlRpcServer
from .LogUtils import *
and context from other files:
# Path: mom/unixrpc.py
# class UnixXmlRpcServer(socketserver.UnixStreamServer,
# xmlrpc_server.SimpleXMLRPCDispatcher):
# address_family = socket.AF_UNIX
# allow_address_reuse = True
#
# def __init__(self, sock_path, request_handler=UnixXmlRpcHandler,
# logRequests=0):
# if os.path.exists(sock_path):
# os.unlink(sock_path)
# self.logRequests = logRequests
# xmlrpc_server.SimpleXMLRPCDispatcher.__init__(self,
# encoding=None,
# allow_none=1)
# socketserver.UnixStreamServer.__init__(self, sock_path,
# request_handler)
, which may contain function names, class names, or code. Output only the next line. | self.server = UnixXmlRpcServer(unix_port) |
Based on the snippet: <|code_start|> self._check_status(response)
return response['vmList'][0]['pid']
except vdsmException as e:
e.handle_exception()
return None
def getVmList(self):
vmIds = []
try:
response = self.vdsm_api.getVMList()
self._check_status(response)
vm_list = response['vmList']
for vm in vm_list:
if self._vmIsRunning(vm):
vmIds.append(vm['vmId'])
self.logger.debug('VM List: %s', vmIds)
return vmIds
except vdsmException as e:
e.handle_exception()
return None
def getVmMemoryStats(self, uuid):
ret = {}
try:
vm = API.VM(uuid)
response = vm.getStats()
self._check_status(response)
usage = int(response['statsList'][0]['memUsage'])
if usage == 0:
msg = "VM %s - The ovirt-guest-agent is not active" % uuid
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import API
import supervdsm
import logging
import traceback
from mom.HypervisorInterfaces.HypervisorInterface import HypervisorInterface, \
HypervisorInterfaceError
and context (classes, functions, sometimes code) from other files:
# Path: mom/HypervisorInterfaces/HypervisorInterface.py
# class HypervisorInterface(object):
# """
# HypervisorInterface is an abstract class which defines all interfaces
# used by MOM to get guest memory statistics and control guest memory
# ballooning. Its sub classes libvirt and vdsm need implement all these
# interfaces by calling their respective API.
# """
# def getVmList(self):
# """
# This method returns a list, which is composed of the active guests'
# identifiers.
# """
# pass
#
# def getVmInfo(self, uuid):
# """
# This method returns basic information of a given guest, including
# name, uuid and pid.
# """
# pass
#
# def startVmMemoryStats(self, uuid):
# """
# This method activates the memory statistics of a given guest.
# """
# pass
#
# def getVmMemoryStats(self, uuid):
# """
# This method returns the memory statistics of a given guest. The stat
# fields are decided by the real hypervisor interface.
# """
# pass
#
# def getVmBalloonInfo(self, uuid):
# """
# This method returns the balloon info a given guest, which includes two
# fields:
# balloon_max - The maximum amount of memory the guest may use
# balloon_cur - The current memory limit (set by ballooning)
# """
# pass
#
# def setVmBalloonTarget(self, uuid, target):
# """
# This method sets the balloon target of a given guest. It's used by the
# controller Balloon to inflate or deflate the balloon according to this
# guest's memory usage.
# """
# pass
#
# def ksmTune(self, tuningParams):
# """
# This method is used to set KSM tuning parameters by the controller KSM.
# """
# pass
#
# class HypervisorInterfaceError(Exception):
# pass
. Output only the next line. | raise HypervisorInterfaceError(msg) |
Using the snippet: <|code_start|># Memory Overcommitment Manager
# Copyright (C) 2016 Martin Sivak, Red Hat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
class JsonRpcVdsmBulkInterface(JsonRpcVdsmInterface):
"""
JsonRpcVdsmBulkInterface extends the JsonRpcVdsmInterface and
overrides the getIoTune and getIoTunePolicy methods so that
the new vdsm api can be utilized
"""
def __init__(self):
super(JsonRpcVdsmBulkInterface, self).__init__()
<|code_end|>
, determine the next line of code. You have imports:
from .vdsmCommon import memoize
from mom.HypervisorInterfaces.vdsmjsonrpcInterface import JsonRpcVdsmInterface, \
CACHE_EXPIRATION
and context (class names, function names, or code) available:
# Path: mom/HypervisorInterfaces/vdsmCommon.py
# def memoize(expiration):
# def decorator(obj):
# lock = threading.Lock()
# cache = obj._cache = {}
# timestamps = obj._timestamps = {}
#
# @functools.wraps(obj)
# def memoizer(*args, **kwargs):
# key = str(args) + str(kwargs)
# now = time.time()
#
# # use absolute value of the time difference to avoid issues
# # with time changing to the past
#
# with lock:
# if key not in cache or abs(now - timestamps[key]) > expiration:
# cache[key] = obj(*args, **kwargs)
# timestamps[key] = now
# return cache[key]
# return memoizer
# return decorator
#
# Path: mom/HypervisorInterfaces/vdsmjsonrpcInterface.py
# class JsonRpcVdsmInterface(VdsmRpcBase):
# """
# vdsmInterface provides a wrapper for the VDSM API so that VDSM-
# related error handling can be consolidated in one place. An instance of
# this class provides a single VDSM connection that can be shared by all
# threads.
# """
#
# def __init__(self):
# super(JsonRpcVdsmInterface, self).__init__()
# self._vdsm_api = self.checked_call(jsonrpcvdscli.connect)\
# .orRaise(ConnectionError, 'No connection to VDSM.')
#
# self.checked_call(self._vdsm_api.ping)
#
# def _check_status(self, response):
# try:
# if response['status']['code']:
# raise vdsmException(response, self._logger)
#
# # This does not look as RPC response, ignore this check
# except (AttributeError, TypeError):
# pass
#
#
# @memoize(expiration=CACHE_EXPIRATION)
# def getAllVmStats(self):
# vms = {}
# ret = self.checked_call(self._vdsm_api.getAllVmStats)
#
# # the possible missing key is handled
# # by the Optional result type transparently
# for vm in ret['result']:
# vms[vm['vmId']] = vm
#
# for vm in ret['items']:
# vms[vm['vmId']] = vm
#
# return vms
#
# def setVmBalloonTarget(self, uuid, target):
# self.checked_call(self._vdsm_api.setBalloonTarget, uuid, target)
#
# def setVmCpuTune(self, uuid, quota, period):
# self.checked_call(self._vdsm_api.setCpuTuneQuota, uuid, quota)
# self.checked_call(self._vdsm_api.setCpuTunePeriod, uuid, period)
#
# def getVmIoTunePolicy(self, vmId):
# result = self.checked_call(self._vdsm_api.getIoTunePolicy, vmId)
# return result.get('items', []).orNone()
#
# def getVmIoTune(self, vmId):
# result = self.checked_call(self._vdsm_api.getIoTune, vmId)
# return result.get('items', []).orNone()
#
# def setVmIoTune(self, vmId, tunables):
# self.checked_call(self._vdsm_api.setIoTune, vmId, tunables)
#
# def ksmTune(self, tuningParams):
# self.checked_call(self._vdsm_api.setKsmTune, tuningParams)
#
# def checked_call(self, vdsm_method, *args, **kwargs):
# try:
# response = vdsm_method(*args, **kwargs)
# self._check_status(response)
# return Optional(response)
# except socket.error as e:
# self._logger.error("Cannot connect to VDSM! {0}".format(e))
# return Optional.missing()
# except vdsmException as e:
# e.handle_exception()
# return Optional.missing()
# except jsonrpcvdscli.JsonRpcNoResponseError as e:
# self._logger.error("No response from VDSM arrived! {0}".format(e))
# return Optional.missing()
#
# CACHE_EXPIRATION = 5
. Output only the next line. | @memoize(expiration=CACHE_EXPIRATION) |
Next line prediction: <|code_start|># Memory Overcommitment Manager
# Copyright (C) 2016 Martin Sivak, Red Hat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
class JsonRpcVdsmBulkInterface(JsonRpcVdsmInterface):
"""
JsonRpcVdsmBulkInterface extends the JsonRpcVdsmInterface and
overrides the getIoTune and getIoTunePolicy methods so that
the new vdsm api can be utilized
"""
def __init__(self):
super(JsonRpcVdsmBulkInterface, self).__init__()
<|code_end|>
. Use current file imports:
(from .vdsmCommon import memoize
from mom.HypervisorInterfaces.vdsmjsonrpcInterface import JsonRpcVdsmInterface, \
CACHE_EXPIRATION)
and context including class names, function names, or small code snippets from other files:
# Path: mom/HypervisorInterfaces/vdsmCommon.py
# def memoize(expiration):
# def decorator(obj):
# lock = threading.Lock()
# cache = obj._cache = {}
# timestamps = obj._timestamps = {}
#
# @functools.wraps(obj)
# def memoizer(*args, **kwargs):
# key = str(args) + str(kwargs)
# now = time.time()
#
# # use absolute value of the time difference to avoid issues
# # with time changing to the past
#
# with lock:
# if key not in cache or abs(now - timestamps[key]) > expiration:
# cache[key] = obj(*args, **kwargs)
# timestamps[key] = now
# return cache[key]
# return memoizer
# return decorator
#
# Path: mom/HypervisorInterfaces/vdsmjsonrpcInterface.py
# class JsonRpcVdsmInterface(VdsmRpcBase):
# """
# vdsmInterface provides a wrapper for the VDSM API so that VDSM-
# related error handling can be consolidated in one place. An instance of
# this class provides a single VDSM connection that can be shared by all
# threads.
# """
#
# def __init__(self):
# super(JsonRpcVdsmInterface, self).__init__()
# self._vdsm_api = self.checked_call(jsonrpcvdscli.connect)\
# .orRaise(ConnectionError, 'No connection to VDSM.')
#
# self.checked_call(self._vdsm_api.ping)
#
# def _check_status(self, response):
# try:
# if response['status']['code']:
# raise vdsmException(response, self._logger)
#
# # This does not look as RPC response, ignore this check
# except (AttributeError, TypeError):
# pass
#
#
# @memoize(expiration=CACHE_EXPIRATION)
# def getAllVmStats(self):
# vms = {}
# ret = self.checked_call(self._vdsm_api.getAllVmStats)
#
# # the possible missing key is handled
# # by the Optional result type transparently
# for vm in ret['result']:
# vms[vm['vmId']] = vm
#
# for vm in ret['items']:
# vms[vm['vmId']] = vm
#
# return vms
#
# def setVmBalloonTarget(self, uuid, target):
# self.checked_call(self._vdsm_api.setBalloonTarget, uuid, target)
#
# def setVmCpuTune(self, uuid, quota, period):
# self.checked_call(self._vdsm_api.setCpuTuneQuota, uuid, quota)
# self.checked_call(self._vdsm_api.setCpuTunePeriod, uuid, period)
#
# def getVmIoTunePolicy(self, vmId):
# result = self.checked_call(self._vdsm_api.getIoTunePolicy, vmId)
# return result.get('items', []).orNone()
#
# def getVmIoTune(self, vmId):
# result = self.checked_call(self._vdsm_api.getIoTune, vmId)
# return result.get('items', []).orNone()
#
# def setVmIoTune(self, vmId, tunables):
# self.checked_call(self._vdsm_api.setIoTune, vmId, tunables)
#
# def ksmTune(self, tuningParams):
# self.checked_call(self._vdsm_api.setKsmTune, tuningParams)
#
# def checked_call(self, vdsm_method, *args, **kwargs):
# try:
# response = vdsm_method(*args, **kwargs)
# self._check_status(response)
# return Optional(response)
# except socket.error as e:
# self._logger.error("Cannot connect to VDSM! {0}".format(e))
# return Optional.missing()
# except vdsmException as e:
# e.handle_exception()
# return Optional.missing()
# except jsonrpcvdscli.JsonRpcNoResponseError as e:
# self._logger.error("No response from VDSM arrived! {0}".format(e))
# return Optional.missing()
#
# CACHE_EXPIRATION = 5
. Output only the next line. | @memoize(expiration=CACHE_EXPIRATION) |
Based on the snippet: <|code_start|> # Parse the data string
result = {}
for item in data.split(","):
parts = item.split(":")
result[parts[0]] = int(parts[1])
# Construct the return dict
ret = {}
for key in self.getFields():
if key in result:
ret[key] = result[key]
return ret
def getFields(self):
return {'mem_available', 'mem_unused', 'major_fault', 'minor_fault',
'swap_in', 'swap_out'}
#
# Begin Server-side code that runs on the guest
#
class _Server:
"""
A simple TCP server that implements the guest side of the guest network
Collector.
"""
def __init__(self, config):
self.config = config
self.logger = logging.getLogger('mom.Collectors.GuestNetworkDaemon.Server')
# Borrow a HostMemory Collector to get the needed data
<|code_end|>
, predict the immediate next line with the help of imports:
import socket
from subprocess import *
from mom.Collectors.Collector import *
from mom.Collectors.HostMemory import HostMemory
and context (classes, functions, sometimes code) from other files:
# Path: mom/Collectors/HostMemory.py
# class HostMemory(Collector):
# """
# This Collctor returns memory statistics about the host by examining
# /proc/meminfo and /proc/vmstat. The fields provided are:
# mem_available - The total amount of available memory (kB)
# mem_unused - The amount of memory that is not being used for any purpose (kB)
# mem_free - The amount of free memory including some caches (kB)
# swap_in - The amount of memory swapped in since the last collection (pages)
# swap_out - The amount of memory swapped out since the last collection (pages)
# anon_pages - The amount of memory used for anonymous memory areas (kB)
# """
# def __init__(self, properties):
# self.meminfo = open_datafile("/proc/meminfo")
# self.vmstat = open_datafile("/proc/vmstat")
# self.swap_in_prev = None
# self.swap_in_cur = None
# self.swap_out_prev = None
# self.swap_out_cur = None
#
# def __del__(self):
# if self.meminfo is not None:
# self.meminfo.close()
# if self.vmstat is not None:
# self.vmstat.close()
#
# def collect(self):
# self.meminfo.seek(0)
# self.vmstat.seek(0)
#
# contents = self.meminfo.read()
# avail = parse_int("^MemTotal: (.*) kB", contents)
# anon = parse_int("^AnonPages: (.*) kB", contents)
# unused = parse_int("^MemFree: (.*) kB", contents)
# buffers = parse_int("^Buffers: (.*) kB", contents)
# cached = parse_int("^Cached: (.*) kB", contents)
# free = unused + buffers + cached
# swap_total = parse_int("^SwapTotal: (.*) kB", contents)
# swap_free = parse_int("^SwapFree: (.*) kB", contents)
#
# # /proc/vmstat reports cumulative statistics so we must subtract the
# # previous values to get the difference since the last collection.
# contents = self.vmstat.read()
# self.swap_in_prev = self.swap_in_cur
# self.swap_out_prev = self.swap_out_cur
# self.swap_in_cur = parse_int("^pswpin (.*)", contents)
# self.swap_out_cur = parse_int("^pswpout (.*)", contents)
# if self.swap_in_prev is None:
# self.swap_in_prev = self.swap_in_cur
# if self.swap_out_prev is None:
# self.swap_out_prev = self.swap_out_cur
# swap_in = self.swap_in_cur - self.swap_in_prev
# swap_out = self.swap_out_cur - self.swap_out_prev
#
#
# data = { 'mem_available': avail, 'mem_unused': unused, \
# 'mem_free': free, 'swap_in': swap_in, 'swap_out': swap_out, \
# 'anon_pages': anon, 'swap_total': swap_total, \
# 'swap_usage': swap_total - swap_free }
# return data
#
# def getFields(self):
# return {'mem_available', 'mem_unused', 'mem_free', 'swap_in', 'swap_out',
# 'anon_pages', 'swap_total', 'swap_usage'}
. Output only the next line. | self.collector = HostMemory(None) |
Given the code snippet: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
class VdsmRpcBase(HypervisorInterface):
def __init__(self):
self._logger = logging.getLogger('mom.VdsmRpcBase')
def getVmList(self):
vmIds = []
vm_list = self.getAllVmStats().values()
for vm in vm_list:
if vm['status'] == 'Up':
vmIds.append(vm['vmId'])
self._logger.debug('VM List: %s', vmIds)
return vmIds
def getVmMemoryStats(self, uuid):
vm = self._getVmStats(uuid)
usage = int(vm['memUsage'])
if usage == 0:
msg = "The ovirt-guest-agent is not active"
<|code_end|>
, generate the next line using the imports in this file:
import logging
from .HypervisorInterface import \
HypervisorInterface, HypervisorInterfaceError
and context (functions, classes, or occasionally code) from other files:
# Path: mom/HypervisorInterfaces/HypervisorInterface.py
# class HypervisorInterface(object):
# """
# HypervisorInterface is an abstract class which defines all interfaces
# used by MOM to get guest memory statistics and control guest memory
# ballooning. Its sub classes libvirt and vdsm need implement all these
# interfaces by calling their respective API.
# """
# def getVmList(self):
# """
# This method returns a list, which is composed of the active guests'
# identifiers.
# """
# pass
#
# def getVmInfo(self, uuid):
# """
# This method returns basic information of a given guest, including
# name, uuid and pid.
# """
# pass
#
# def startVmMemoryStats(self, uuid):
# """
# This method activates the memory statistics of a given guest.
# """
# pass
#
# def getVmMemoryStats(self, uuid):
# """
# This method returns the memory statistics of a given guest. The stat
# fields are decided by the real hypervisor interface.
# """
# pass
#
# def getVmBalloonInfo(self, uuid):
# """
# This method returns the balloon info a given guest, which includes two
# fields:
# balloon_max - The maximum amount of memory the guest may use
# balloon_cur - The current memory limit (set by ballooning)
# """
# pass
#
# def setVmBalloonTarget(self, uuid, target):
# """
# This method sets the balloon target of a given guest. It's used by the
# controller Balloon to inflate or deflate the balloon according to this
# guest's memory usage.
# """
# pass
#
# def ksmTune(self, tuningParams):
# """
# This method is used to set KSM tuning parameters by the controller KSM.
# """
# pass
#
# class HypervisorInterfaceError(Exception):
# pass
. Output only the next line. | raise HypervisorInterfaceError(msg) |
Based on the snippet: <|code_start|> self.logger.error("Guest Manager crashed", exc_info=True)
else:
self.logger.info("Guest Manager ending")
def _spawn_guest_monitors(self, domain_list):
"""
Get the list of running domains and spawn GuestMonitors for any guests
we are not already tracking. The GuestMonitor constructor might block
so don't hold guests_sem while calling it.
"""
with self.guests_lock:
spawn_list = set(domain_list) - set(self.guests)
new_guests = {}
for id in spawn_list:
info = self.hypervisor_iface.getVmInfo(id)
if info is None:
self.logger.error("Failed to get guest:%s information -- monitor "\
"can't start", id)
continue
new_guests[id] = self._create_monitor(info)
if new_guests:
with self.guests_lock:
for id, guest in new_guests.items():
if is_running(guest):
self._register_guest(id, guest)
def _create_monitor(self, info):
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import namedtuple
from mom.GuestMonitor import GuestMonitor
from mom.GuestMonitor import GuestMonitorThread
import threading
import time
import logging
and context (classes, functions, sometimes code) from other files:
# Path: mom/GuestMonitor.py
# class GuestMonitor(Monitor):
# """
# A GuestMonitor thread collects and reports statistics about 1 running guest
# """
# def __init__(self, config, info, hypervisor_iface):
# self.config = config
# self.logger = logging.getLogger('mom.GuestMonitor')
# self.interval = self.config.getint('main', 'guest-monitor-interval')
#
# Monitor.__init__(self, config, info['name'])
# with self.data_lock:
# self.properties.update(info)
# self.properties['hypervisor_iface'] = hypervisor_iface
#
# collector_list = self.config.get('guest', 'collectors')
# self.collectors = Collector.get_collectors(collector_list,
# self.properties, self.config)
# if self.collectors is None:
# self.logger.error("Guest Monitor initialization failed")
#
# def getGuestName(self):
# """
# Provide structured access to the guest name without calling hypervisor
# interface.
# """
# return self.properties.get('name')
#
# Path: mom/GuestMonitor.py
# class GuestMonitorThread(threading.Thread):
# def __init__(self, info, monitor):
# threading.Thread.__init__(self)
#
# name = "GuestMonitor-%s" % info['name']
# if six.PY2:
# # In python 2 the name should not have type 'unicode'.
# # The name is only used for logging, so it should
# # be safe to represent it in utf-8 encoding.
# name = name.encode('utf-8')
#
# self.setName(name)
# self.setDaemon(True)
# self.logger = logging.getLogger('mom.GuestMonitor.Thread')
#
# self._mon = monitor
#
# def run(self):
# try:
# self.logger.info("%s starting", self.getName())
# while self._mon.should_run():
# self._mon.collect()
# time.sleep(self._mon.interval)
# except Exception:
# self.logger.exception("%s crashed", self.getName())
# else:
# self.logger.info("%s ending", self.getName())
. Output only the next line. | guest = GuestMonitor(self.config, info, self.hypervisor_iface) |
Given the code snippet: <|code_start|> self.logger.info("Guest Manager ending")
def _spawn_guest_monitors(self, domain_list):
"""
Get the list of running domains and spawn GuestMonitors for any guests
we are not already tracking. The GuestMonitor constructor might block
so don't hold guests_sem while calling it.
"""
with self.guests_lock:
spawn_list = set(domain_list) - set(self.guests)
new_guests = {}
for id in spawn_list:
info = self.hypervisor_iface.getVmInfo(id)
if info is None:
self.logger.error("Failed to get guest:%s information -- monitor "\
"can't start", id)
continue
new_guests[id] = self._create_monitor(info)
if new_guests:
with self.guests_lock:
for id, guest in new_guests.items():
if is_running(guest):
self._register_guest(id, guest)
def _create_monitor(self, info):
guest = GuestMonitor(self.config, info, self.hypervisor_iface)
if self._threaded:
<|code_end|>
, generate the next line using the imports in this file:
from collections import namedtuple
from mom.GuestMonitor import GuestMonitor
from mom.GuestMonitor import GuestMonitorThread
import threading
import time
import logging
and context (functions, classes, or occasionally code) from other files:
# Path: mom/GuestMonitor.py
# class GuestMonitor(Monitor):
# """
# A GuestMonitor thread collects and reports statistics about 1 running guest
# """
# def __init__(self, config, info, hypervisor_iface):
# self.config = config
# self.logger = logging.getLogger('mom.GuestMonitor')
# self.interval = self.config.getint('main', 'guest-monitor-interval')
#
# Monitor.__init__(self, config, info['name'])
# with self.data_lock:
# self.properties.update(info)
# self.properties['hypervisor_iface'] = hypervisor_iface
#
# collector_list = self.config.get('guest', 'collectors')
# self.collectors = Collector.get_collectors(collector_list,
# self.properties, self.config)
# if self.collectors is None:
# self.logger.error("Guest Monitor initialization failed")
#
# def getGuestName(self):
# """
# Provide structured access to the guest name without calling hypervisor
# interface.
# """
# return self.properties.get('name')
#
# Path: mom/GuestMonitor.py
# class GuestMonitorThread(threading.Thread):
# def __init__(self, info, monitor):
# threading.Thread.__init__(self)
#
# name = "GuestMonitor-%s" % info['name']
# if six.PY2:
# # In python 2 the name should not have type 'unicode'.
# # The name is only used for logging, so it should
# # be safe to represent it in utf-8 encoding.
# name = name.encode('utf-8')
#
# self.setName(name)
# self.setDaemon(True)
# self.logger = logging.getLogger('mom.GuestMonitor.Thread')
#
# self._mon = monitor
#
# def run(self):
# try:
# self.logger.info("%s starting", self.getName())
# while self._mon.should_run():
# self._mon.collect()
# time.sleep(self._mon.interval)
# except Exception:
# self.logger.exception("%s crashed", self.getName())
# else:
# self.logger.info("%s ending", self.getName())
. Output only the next line. | thread = GuestMonitorThread(info, guest) |
Based on the snippet: <|code_start|>
class PublicTest(BaseTest):
def test_public_anon(self):
factory = APIRequestFactory()
request = factory.get("/")
<|code_end|>
, predict the immediate next line with the help of imports:
from rest_framework.test import APIRequestFactory
from .base import BaseTest
from ..permissions import Public, StaffOnly
and context (classes, functions, sometimes code) from other files:
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
#
# Path: src/oscarapicheckout/permissions.py
# class Public(PaymentMethodPermission):
# def is_permitted(self, request=None, user=None):
# return True
#
# class StaffOnly(PaymentMethodPermission):
# def is_permitted(self, request=None, user=None):
# return user and user.is_authenticated and user.is_staff
. Output only the next line. | self.assertTrue(Public().is_permitted(request)) |
Next line prediction: <|code_start|>
class PublicTest(BaseTest):
def test_public_anon(self):
factory = APIRequestFactory()
request = factory.get("/")
self.assertTrue(Public().is_permitted(request))
def test_public_not_staff(self):
user = self.login(is_staff=False)
factory = APIRequestFactory()
request = factory.get("/")
self.assertTrue(Public().is_permitted(request, user))
def test_public_is_staff(self):
user = self.login(is_staff=True)
factory = APIRequestFactory()
request = factory.get("/")
self.assertTrue(Public().is_permitted(request, user))
class StaffOnlyTest(BaseTest):
def test_staff_only_anon(self):
factory = APIRequestFactory()
request = factory.get("/")
<|code_end|>
. Use current file imports:
(from rest_framework.test import APIRequestFactory
from .base import BaseTest
from ..permissions import Public, StaffOnly)
and context including class names, function names, or small code snippets from other files:
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
#
# Path: src/oscarapicheckout/permissions.py
# class Public(PaymentMethodPermission):
# def is_permitted(self, request=None, user=None):
# return True
#
# class StaffOnly(PaymentMethodPermission):
# def is_permitted(self, request=None, user=None):
# return user and user.is_authenticated and user.is_staff
. Output only the next line. | self.assertFalse(StaffOnly().is_permitted(request)) |
Given the following code snippet before the placeholder: <|code_start|>
logger = logging.getLogger(__name__)
order_status_changed = get_class("order.signals", "order_status_changed")
@receiver(order_payment_authorized)
def send_order_confirmation_message(sender, order, request, **kwargs):
<|code_end|>
, predict the next line using imports from the current file:
from django.dispatch import receiver
from oscar.core.loading import get_class
from .email import OrderMessageSender
from .settings import ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_authorized
import logging
and context including class names, function names, and sometimes code from other files:
# Path: src/oscarapicheckout/email.py
# class OrderMessageSender(OrderPlacementMixin):
# def __init__(self, request):
# self.request = request
#
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
. Output only the next line. | OrderMessageSender(request).send_order_placed_email(order) |
Given the code snippet: <|code_start|>
logger = logging.getLogger(__name__)
order_status_changed = get_class("order.signals", "order_status_changed")
@receiver(order_payment_authorized)
def send_order_confirmation_message(sender, order, request, **kwargs):
OrderMessageSender(request).send_order_placed_email(order)
@receiver(order_status_changed)
def update_basket_status_upon_order_status_change(
sender, order, old_status, new_status, **kwargs
):
"""
When an order transitions from "Payment Declined" to any other status, make sure
it's associated basket is not still editable.
"""
basket = order.basket
if not basket:
return
<|code_end|>
, generate the next line using the imports in this file:
from django.dispatch import receiver
from oscar.core.loading import get_class
from .email import OrderMessageSender
from .settings import ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_authorized
import logging
and context (functions, classes, or occasionally code) from other files:
# Path: src/oscarapicheckout/email.py
# class OrderMessageSender(OrderPlacementMixin):
# def __init__(self, request):
# self.request = request
#
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
. Output only the next line. | if new_status == ORDER_STATUS_PAYMENT_DECLINED: |
Given the code snippet: <|code_start|> )
self.assertPaymentSources(
order_resp.data["number"],
sources=[
dict(
source_name="Cash",
reference="",
allocated=D("10.00"),
debited=D("10.00"),
),
],
)
def test_signal_ordering(self):
self.login(is_staff=True)
_test = {"last_signal_called": None}
def handle_pre_calculate_total(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], None)
_test["last_signal_called"] = "pre_calculate_total"
def handle_order_placed(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], "pre_calculate_total")
_test["last_signal_called"] = "order_placed"
def handle_order_payment_authorized(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], "order_placed")
_test["last_signal_called"] = "order_payment_authorized"
<|code_end|>
, generate the next line using the imports in this file:
from decimal import Decimal as D
from django.contrib.auth.models import User
from oscar.core.loading import get_model, get_class
from oscar.test import factories
from rest_framework import status
from rest_framework.reverse import reverse
from ..signals import pre_calculate_total, order_placed, order_payment_authorized
from ..utils import _set_order_payment_declined
from ..serializers import OrderTokenField
from .base import BaseTest
from unittest import mock
and context (functions, classes, or occasionally code) from other files:
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/utils.py
# def _set_order_payment_declined(order, request):
# # Set the order status
# order.set_status(ORDER_STATUS_PAYMENT_DECLINED)
#
# voucher_applications = order.voucherapplication_set.all()
#
# for voucher_application in voucher_applications:
# voucher = voucher_application.voucher
#
# parent = getattr(voucher, "parent", None)
# if parent:
# parent.num_orders = F("num_orders") - 1
# parent.save(update_children=False)
#
# voucher.num_orders = F("num_orders") - 1
# voucher.save()
#
# # Delete some related objects
# order.discounts.all().delete()
# order.line_prices.all().delete()
# voucher_applications.delete()
#
# # Thaw the basket and put it back into the request.session so that it can be retried
# order.basket.thaw()
# operations.store_basket_in_session(order.basket, request.session)
#
# # Send a signal
# order_payment_declined.send(sender=order, order=order, request=request)
#
# Path: src/oscarapicheckout/serializers.py
# class OrderTokenField(SignedTokenRelatedField):
# @classmethod
# def get_order_token(cls, order):
# return cls().get_token(order)
#
# def __init__(self, **kwargs):
# kwargs["queryset"] = Order.objects.all()
# kwargs["slug_field"] = "number"
# super().__init__(**kwargs)
#
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
. Output only the next line. | pre_calculate_total.connect(handle_pre_calculate_total) |
Given the following code snippet before the placeholder: <|code_start|> self.assertPaymentSources(
order_resp.data["number"],
sources=[
dict(
source_name="Cash",
reference="",
allocated=D("10.00"),
debited=D("10.00"),
),
],
)
def test_signal_ordering(self):
self.login(is_staff=True)
_test = {"last_signal_called": None}
def handle_pre_calculate_total(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], None)
_test["last_signal_called"] = "pre_calculate_total"
def handle_order_placed(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], "pre_calculate_total")
_test["last_signal_called"] = "order_placed"
def handle_order_payment_authorized(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], "order_placed")
_test["last_signal_called"] = "order_payment_authorized"
pre_calculate_total.connect(handle_pre_calculate_total)
<|code_end|>
, predict the next line using imports from the current file:
from decimal import Decimal as D
from django.contrib.auth.models import User
from oscar.core.loading import get_model, get_class
from oscar.test import factories
from rest_framework import status
from rest_framework.reverse import reverse
from ..signals import pre_calculate_total, order_placed, order_payment_authorized
from ..utils import _set_order_payment_declined
from ..serializers import OrderTokenField
from .base import BaseTest
from unittest import mock
and context including class names, function names, and sometimes code from other files:
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/utils.py
# def _set_order_payment_declined(order, request):
# # Set the order status
# order.set_status(ORDER_STATUS_PAYMENT_DECLINED)
#
# voucher_applications = order.voucherapplication_set.all()
#
# for voucher_application in voucher_applications:
# voucher = voucher_application.voucher
#
# parent = getattr(voucher, "parent", None)
# if parent:
# parent.num_orders = F("num_orders") - 1
# parent.save(update_children=False)
#
# voucher.num_orders = F("num_orders") - 1
# voucher.save()
#
# # Delete some related objects
# order.discounts.all().delete()
# order.line_prices.all().delete()
# voucher_applications.delete()
#
# # Thaw the basket and put it back into the request.session so that it can be retried
# order.basket.thaw()
# operations.store_basket_in_session(order.basket, request.session)
#
# # Send a signal
# order_payment_declined.send(sender=order, order=order, request=request)
#
# Path: src/oscarapicheckout/serializers.py
# class OrderTokenField(SignedTokenRelatedField):
# @classmethod
# def get_order_token(cls, order):
# return cls().get_token(order)
#
# def __init__(self, **kwargs):
# kwargs["queryset"] = Order.objects.all()
# kwargs["slug_field"] = "number"
# super().__init__(**kwargs)
#
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
. Output only the next line. | order_placed.connect(handle_order_placed) |
Using the snippet: <|code_start|> order_resp.data["number"],
sources=[
dict(
source_name="Cash",
reference="",
allocated=D("10.00"),
debited=D("10.00"),
),
],
)
def test_signal_ordering(self):
self.login(is_staff=True)
_test = {"last_signal_called": None}
def handle_pre_calculate_total(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], None)
_test["last_signal_called"] = "pre_calculate_total"
def handle_order_placed(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], "pre_calculate_total")
_test["last_signal_called"] = "order_placed"
def handle_order_payment_authorized(*args, **kwargs):
self.assertEqual(_test["last_signal_called"], "order_placed")
_test["last_signal_called"] = "order_payment_authorized"
pre_calculate_total.connect(handle_pre_calculate_total)
order_placed.connect(handle_order_placed)
<|code_end|>
, determine the next line of code. You have imports:
from decimal import Decimal as D
from django.contrib.auth.models import User
from oscar.core.loading import get_model, get_class
from oscar.test import factories
from rest_framework import status
from rest_framework.reverse import reverse
from ..signals import pre_calculate_total, order_placed, order_payment_authorized
from ..utils import _set_order_payment_declined
from ..serializers import OrderTokenField
from .base import BaseTest
from unittest import mock
and context (class names, function names, or code) available:
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/utils.py
# def _set_order_payment_declined(order, request):
# # Set the order status
# order.set_status(ORDER_STATUS_PAYMENT_DECLINED)
#
# voucher_applications = order.voucherapplication_set.all()
#
# for voucher_application in voucher_applications:
# voucher = voucher_application.voucher
#
# parent = getattr(voucher, "parent", None)
# if parent:
# parent.num_orders = F("num_orders") - 1
# parent.save(update_children=False)
#
# voucher.num_orders = F("num_orders") - 1
# voucher.save()
#
# # Delete some related objects
# order.discounts.all().delete()
# order.line_prices.all().delete()
# voucher_applications.delete()
#
# # Thaw the basket and put it back into the request.session so that it can be retried
# order.basket.thaw()
# operations.store_basket_in_session(order.basket, request.session)
#
# # Send a signal
# order_payment_declined.send(sender=order, order=order, request=request)
#
# Path: src/oscarapicheckout/serializers.py
# class OrderTokenField(SignedTokenRelatedField):
# @classmethod
# def get_order_token(cls, order):
# return cls().get_token(order)
#
# def __init__(self, **kwargs):
# kwargs["queryset"] = Order.objects.all()
# kwargs["slug_field"] = "number"
# super().__init__(**kwargs)
#
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
. Output only the next line. | order_payment_authorized.connect(handle_order_payment_authorized) |
Here is a snippet: <|code_start|> # API should return a new Basket now
self.assertNotEqual(self._get_basket_id(), basket_id)
def test_voucher_for_order_payment_declined(self):
# Login as one user
self.login(is_staff=False)
# Make a basket
basket_id = self._prepare_basket()
voucher = factories.create_voucher()
self._add_voucher(voucher)
self.assertEqual(voucher.num_orders, 0)
data = self._get_checkout_data(basket_id)
data["payment"] = {
"credit-card": {
"enabled": True,
"pay_balance": True,
}
}
url = reverse("api-checkout")
order_resp = self.client.post(url, data, format="json")
order_number = order_resp.data["number"]
order = Order.objects.get(number=order_number)
voucher.refresh_from_db()
self.assertEqual(voucher.num_orders, 1)
<|code_end|>
. Write the next line using the current file imports:
from decimal import Decimal as D
from django.contrib.auth.models import User
from oscar.core.loading import get_model, get_class
from oscar.test import factories
from rest_framework import status
from rest_framework.reverse import reverse
from ..signals import pre_calculate_total, order_placed, order_payment_authorized
from ..utils import _set_order_payment_declined
from ..serializers import OrderTokenField
from .base import BaseTest
from unittest import mock
and context from other files:
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/utils.py
# def _set_order_payment_declined(order, request):
# # Set the order status
# order.set_status(ORDER_STATUS_PAYMENT_DECLINED)
#
# voucher_applications = order.voucherapplication_set.all()
#
# for voucher_application in voucher_applications:
# voucher = voucher_application.voucher
#
# parent = getattr(voucher, "parent", None)
# if parent:
# parent.num_orders = F("num_orders") - 1
# parent.save(update_children=False)
#
# voucher.num_orders = F("num_orders") - 1
# voucher.save()
#
# # Delete some related objects
# order.discounts.all().delete()
# order.line_prices.all().delete()
# voucher_applications.delete()
#
# # Thaw the basket and put it back into the request.session so that it can be retried
# order.basket.thaw()
# operations.store_basket_in_session(order.basket, request.session)
#
# # Send a signal
# order_payment_declined.send(sender=order, order=order, request=request)
#
# Path: src/oscarapicheckout/serializers.py
# class OrderTokenField(SignedTokenRelatedField):
# @classmethod
# def get_order_token(cls, order):
# return cls().get_token(order)
#
# def __init__(self, **kwargs):
# kwargs["queryset"] = Order.objects.all()
# kwargs["slug_field"] = "number"
# super().__init__(**kwargs)
#
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
, which may include functions, classes, or code. Output only the next line. | _set_order_payment_declined(order, order_resp.wsgi_request) |
Predict the next line after this snippet: <|code_start|> self.assertEqual(states_resp.status_code, status.HTTP_200_OK)
self.assertEqual(states_resp.data["order_status"], "Pending")
self.assertEqual(
states_resp.data["payment_method_states"].keys(), set(["pay-later"])
)
self.assertEqual(
states_resp.data["payment_method_states"]["pay-later"]["status"], "Deferred"
)
self.assertEqual(
states_resp.data["payment_method_states"]["pay-later"]["amount"], "0.00"
)
self.assertIsNone(
states_resp.data["payment_method_states"]["pay-later"]["required_action"]
)
self.assertPaymentSources(
order_resp.data["number"],
sources=[
dict(
source_name="Pay Later",
reference="",
allocated=D("0.00"),
debited=D("0.00"),
),
],
)
# Complete payment with cash method
order = Order.objects.get(number=order_resp.data["number"])
order_resp = self._complete_deferred_payment(
{
<|code_end|>
using the current file's imports:
from decimal import Decimal as D
from django.contrib.auth.models import User
from oscar.core.loading import get_model, get_class
from oscar.test import factories
from rest_framework import status
from rest_framework.reverse import reverse
from ..signals import pre_calculate_total, order_placed, order_payment_authorized
from ..utils import _set_order_payment_declined
from ..serializers import OrderTokenField
from .base import BaseTest
from unittest import mock
and any relevant context from other files:
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/utils.py
# def _set_order_payment_declined(order, request):
# # Set the order status
# order.set_status(ORDER_STATUS_PAYMENT_DECLINED)
#
# voucher_applications = order.voucherapplication_set.all()
#
# for voucher_application in voucher_applications:
# voucher = voucher_application.voucher
#
# parent = getattr(voucher, "parent", None)
# if parent:
# parent.num_orders = F("num_orders") - 1
# parent.save(update_children=False)
#
# voucher.num_orders = F("num_orders") - 1
# voucher.save()
#
# # Delete some related objects
# order.discounts.all().delete()
# order.line_prices.all().delete()
# voucher_applications.delete()
#
# # Thaw the basket and put it back into the request.session so that it can be retried
# order.basket.thaw()
# operations.store_basket_in_session(order.basket, request.session)
#
# # Send a signal
# order_payment_declined.send(sender=order, order=order, request=request)
#
# Path: src/oscarapicheckout/serializers.py
# class OrderTokenField(SignedTokenRelatedField):
# @classmethod
# def get_order_token(cls, order):
# return cls().get_token(order)
#
# def __init__(self, **kwargs):
# kwargs["queryset"] = Order.objects.all()
# kwargs["slug_field"] = "number"
# super().__init__(**kwargs)
#
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
. Output only the next line. | "order": OrderTokenField.get_order_token(order), |
Based on the snippet: <|code_start|>
Order = get_model("order", "Order")
Basket = get_model("basket", "Basket")
Default = get_class("partner.strategy", "Default")
OrderCreator = get_class("order.utils", "OrderCreator")
<|code_end|>
, predict the immediate next line with the help of imports:
from decimal import Decimal as D
from django.contrib.auth.models import User
from oscar.core.loading import get_model, get_class
from oscar.test import factories
from rest_framework import status
from rest_framework.reverse import reverse
from ..signals import pre_calculate_total, order_placed, order_payment_authorized
from ..utils import _set_order_payment_declined
from ..serializers import OrderTokenField
from .base import BaseTest
from unittest import mock
and context (classes, functions, sometimes code) from other files:
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/utils.py
# def _set_order_payment_declined(order, request):
# # Set the order status
# order.set_status(ORDER_STATUS_PAYMENT_DECLINED)
#
# voucher_applications = order.voucherapplication_set.all()
#
# for voucher_application in voucher_applications:
# voucher = voucher_application.voucher
#
# parent = getattr(voucher, "parent", None)
# if parent:
# parent.num_orders = F("num_orders") - 1
# parent.save(update_children=False)
#
# voucher.num_orders = F("num_orders") - 1
# voucher.save()
#
# # Delete some related objects
# order.discounts.all().delete()
# order.line_prices.all().delete()
# voucher_applications.delete()
#
# # Thaw the basket and put it back into the request.session so that it can be retried
# order.basket.thaw()
# operations.store_basket_in_session(order.basket, request.session)
#
# # Send a signal
# order_payment_declined.send(sender=order, order=order, request=request)
#
# Path: src/oscarapicheckout/serializers.py
# class OrderTokenField(SignedTokenRelatedField):
# @classmethod
# def get_order_token(cls, order):
# return cls().get_token(order)
#
# def __init__(self, **kwargs):
# kwargs["queryset"] = Order.objects.all()
# kwargs["slug_field"] = "number"
# super().__init__(**kwargs)
#
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
. Output only the next line. | class CheckoutAPITest(BaseTest): |
Next line prediction: <|code_start|>Order = get_model("order", "Order")
OrderCreator = get_class("order.utils", "OrderCreator")
CHECKOUT_ORDER_ID = "api_checkout_pending_order_id"
CHECKOUT_PAYMENT_STEPS = "api_checkout_payment_steps"
def _session_pickle(obj):
pickled = pickle.dumps(obj)
base64ed = base64.standard_b64encode(pickled)
utfed = base64ed.decode("utf8")
return utfed
def _session_unpickle(utfed):
base64ed = utfed.encode("utf8")
pickled = base64.standard_b64decode(base64ed)
obj = pickle.loads(pickled)
return obj
def _update_payment_method_state(request, method_key, state):
states = request.session.get(CHECKOUT_PAYMENT_STEPS, {})
states[method_key] = _session_pickle(state)
request.session[CHECKOUT_PAYMENT_STEPS] = states
request.session.modified = True
def _set_order_authorized(order, request):
# Set the order status
<|code_end|>
. Use current file imports:
(from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64)
and context including class names, function names, or small code snippets from other files:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
. Output only the next line. | order.set_status(ORDER_STATUS_AUTHORIZED) |
Continue the code snippet: <|code_start|> pickled = base64.standard_b64decode(base64ed)
obj = pickle.loads(pickled)
return obj
def _update_payment_method_state(request, method_key, state):
states = request.session.get(CHECKOUT_PAYMENT_STEPS, {})
states[method_key] = _session_pickle(state)
request.session[CHECKOUT_PAYMENT_STEPS] = states
request.session.modified = True
def _set_order_authorized(order, request):
# Set the order status
order.set_status(ORDER_STATUS_AUTHORIZED)
# Mark the basket as submitted
order.basket.submit()
# Update the owner of the basket to match the order
if order.user != order.basket.owner:
order.basket.owner = order.user
order.basket.save()
# Send a signal
order_payment_authorized.send(sender=order, order=order, request=request)
def _set_order_payment_declined(order, request):
# Set the order status
<|code_end|>
. Use current file imports:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and context (classes, functions, or code) from other files:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
. Output only the next line. | order.set_status(ORDER_STATUS_PAYMENT_DECLINED) |
Given snippet: <|code_start|> order_payment_authorized.send(sender=order, order=order, request=request)
def _set_order_payment_declined(order, request):
# Set the order status
order.set_status(ORDER_STATUS_PAYMENT_DECLINED)
voucher_applications = order.voucherapplication_set.all()
for voucher_application in voucher_applications:
voucher = voucher_application.voucher
parent = getattr(voucher, "parent", None)
if parent:
parent.num_orders = F("num_orders") - 1
parent.save(update_children=False)
voucher.num_orders = F("num_orders") - 1
voucher.save()
# Delete some related objects
order.discounts.all().delete()
order.line_prices.all().delete()
voucher_applications.delete()
# Thaw the basket and put it back into the request.session so that it can be retried
order.basket.thaw()
operations.store_basket_in_session(order.basket, request.session)
# Send a signal
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and context:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
which might include code, classes, or functions. Output only the next line. | order_payment_declined.send(sender=order, order=order, request=request) |
Continue the code snippet: <|code_start|> return utfed
def _session_unpickle(utfed):
base64ed = utfed.encode("utf8")
pickled = base64.standard_b64decode(base64ed)
obj = pickle.loads(pickled)
return obj
def _update_payment_method_state(request, method_key, state):
states = request.session.get(CHECKOUT_PAYMENT_STEPS, {})
states[method_key] = _session_pickle(state)
request.session[CHECKOUT_PAYMENT_STEPS] = states
request.session.modified = True
def _set_order_authorized(order, request):
# Set the order status
order.set_status(ORDER_STATUS_AUTHORIZED)
# Mark the basket as submitted
order.basket.submit()
# Update the owner of the basket to match the order
if order.user != order.basket.owner:
order.basket.owner = order.user
order.basket.save()
# Send a signal
<|code_end|>
. Use current file imports:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and context (classes, functions, or code) from other files:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
. Output only the next line. | order_payment_authorized.send(sender=order, order=order, request=request) |
Using the snippet: <|code_start|> voucher = voucher_application.voucher
parent = getattr(voucher, "parent", None)
if parent:
parent.num_orders = F("num_orders") - 1
parent.save(update_children=False)
voucher.num_orders = F("num_orders") - 1
voucher.save()
# Delete some related objects
order.discounts.all().delete()
order.line_prices.all().delete()
voucher_applications.delete()
# Thaw the basket and put it back into the request.session so that it can be retried
order.basket.thaw()
operations.store_basket_in_session(order.basket, request.session)
# Send a signal
order_payment_declined.send(sender=order, order=order, request=request)
def _update_order_status(order, request):
states = list_payment_method_states(request)
declined = [s for k, s in states.items() if s.status == DECLINED]
if len(declined) > 0:
_set_order_payment_declined(order, request)
<|code_end|>
, determine the next line of code. You have imports:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and context (class names, function names, or code) available:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
. Output only the next line. | not_complete = [s for k, s in states.items() if s.status != COMPLETE] |
Here is a snippet: <|code_start|>
voucher_applications = order.voucherapplication_set.all()
for voucher_application in voucher_applications:
voucher = voucher_application.voucher
parent = getattr(voucher, "parent", None)
if parent:
parent.num_orders = F("num_orders") - 1
parent.save(update_children=False)
voucher.num_orders = F("num_orders") - 1
voucher.save()
# Delete some related objects
order.discounts.all().delete()
order.line_prices.all().delete()
voucher_applications.delete()
# Thaw the basket and put it back into the request.session so that it can be retried
order.basket.thaw()
operations.store_basket_in_session(order.basket, request.session)
# Send a signal
order_payment_declined.send(sender=order, order=order, request=request)
def _update_order_status(order, request):
states = list_payment_method_states(request)
<|code_end|>
. Write the next line using the current file imports:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and context from other files:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
, which may include functions, classes, or code. Output only the next line. | declined = [s for k, s in states.items() if s.status == DECLINED] |
Given the code snippet: <|code_start|> not_complete = [s for k, s in states.items() if s.status != COMPLETE]
if len(not_complete) <= 0:
# Authorized the order and consume all the payments
_set_order_authorized(order, request)
for key, state in states.items():
mark_payment_method_consumed(
order,
request,
key,
state.amount,
source_id=getattr(state, "source_id", None),
)
def list_payment_method_states(request):
states = request.session.get(CHECKOUT_PAYMENT_STEPS, {})
return {
method_key: _session_unpickle(state) for method_key, state in states.items()
}
def clear_payment_method_states(request):
request.session[CHECKOUT_PAYMENT_STEPS] = {}
request.session.modified = True
def clear_consumed_payment_method_states(request):
curr_states = list_payment_method_states(request)
new_states = {}
for key, state in curr_states.items():
<|code_end|>
, generate the next line using the imports in this file:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and context (functions, classes, or occasionally code) from other files:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
. Output only the next line. | if state.status != CONSUMED: |
Predict the next line after this snippet: <|code_start|>def clear_payment_method_states(request):
request.session[CHECKOUT_PAYMENT_STEPS] = {}
request.session.modified = True
def clear_consumed_payment_method_states(request):
curr_states = list_payment_method_states(request)
new_states = {}
for key, state in curr_states.items():
if state.status != CONSUMED:
new_states[key] = state
clear_payment_method_states(request)
for key, state in new_states.items():
_update_payment_method_state(request, key, state)
def update_payment_method_state(order, request, method_key, state):
_update_payment_method_state(request, method_key, state)
_update_order_status(order, request)
def set_payment_method_states(order, request, states):
clear_payment_method_states(request)
for method_key, state in states.items():
_update_payment_method_state(request, method_key, state)
_update_order_status(order, request)
def mark_payment_method_completed(order, request, method_key, amount, source_id=None):
update_payment_method_state(
<|code_end|>
using the current file's imports:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and any relevant context from other files:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
. Output only the next line. | order, request, method_key, Complete(amount, source_id=source_id) |
Predict the next line for this snippet: <|code_start|> curr_states = list_payment_method_states(request)
new_states = {}
for key, state in curr_states.items():
if state.status != CONSUMED:
new_states[key] = state
clear_payment_method_states(request)
for key, state in new_states.items():
_update_payment_method_state(request, key, state)
def update_payment_method_state(order, request, method_key, state):
_update_payment_method_state(request, method_key, state)
_update_order_status(order, request)
def set_payment_method_states(order, request, states):
clear_payment_method_states(request)
for method_key, state in states.items():
_update_payment_method_state(request, method_key, state)
_update_order_status(order, request)
def mark_payment_method_completed(order, request, method_key, amount, source_id=None):
update_payment_method_state(
order, request, method_key, Complete(amount, source_id=source_id)
)
def mark_payment_method_declined(order, request, method_key, amount, source_id=None):
update_payment_method_state(
<|code_end|>
with the help of current file imports:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and context from other files:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
, which may contain function names, class names, or code. Output only the next line. | order, request, method_key, Declined(amount, source_id=source_id) |
Predict the next line for this snippet: <|code_start|> for key, state in new_states.items():
_update_payment_method_state(request, key, state)
def update_payment_method_state(order, request, method_key, state):
_update_payment_method_state(request, method_key, state)
_update_order_status(order, request)
def set_payment_method_states(order, request, states):
clear_payment_method_states(request)
for method_key, state in states.items():
_update_payment_method_state(request, method_key, state)
_update_order_status(order, request)
def mark_payment_method_completed(order, request, method_key, amount, source_id=None):
update_payment_method_state(
order, request, method_key, Complete(amount, source_id=source_id)
)
def mark_payment_method_declined(order, request, method_key, amount, source_id=None):
update_payment_method_state(
order, request, method_key, Declined(amount, source_id=source_id)
)
def mark_payment_method_consumed(order, request, method_key, amount, source_id=None):
update_payment_method_state(
<|code_end|>
with the help of current file imports:
from decimal import Decimal
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscarapi.basket import operations
from .settings import ORDER_STATUS_AUTHORIZED, ORDER_STATUS_PAYMENT_DECLINED
from .signals import order_payment_declined, order_payment_authorized
from .states import COMPLETE, DECLINED, CONSUMED, Complete, Declined, Consumed
import pickle
import base64
and context from other files:
# Path: src/oscarapicheckout/settings.py
# ORDER_STATUS_AUTHORIZED = overridable("ORDER_STATUS_AUTHORIZED", "Authorized")
#
# ORDER_STATUS_PAYMENT_DECLINED = overridable(
# "ORDER_STATUS_PAYMENT_DECLINED", "Payment Declined"
# )
#
# Path: src/oscarapicheckout/signals.py
#
# Path: src/oscarapicheckout/states.py
# COMPLETE = "Complete"
#
# DECLINED = "Declined"
#
# CONSUMED = "Consumed"
#
# class Complete(SourceBoundPaymentStatus):
# status = COMPLETE
#
# class Declined(SourceBoundPaymentStatus):
# status = DECLINED
#
# class Consumed(SourceBoundPaymentStatus):
# status = CONSUMED
, which may contain function names, class names, or code. Output only the next line. | order, request, method_key, Consumed(amount, source_id=source_id) |
Based on the snippet: <|code_start|>
Country = get_model("address", "Country")
class EmailAddressCacheTest(BaseTest):
def test_cache(self):
<|code_end|>
, predict the immediate next line with the help of imports:
from oscar.core.loading import get_model
from .base import BaseTest
from ..cache import (
EmailAddressCache,
ShippingAddressCache,
BillingAddressCache,
ShippingMethodCache,
)
and context (classes, functions, sometimes code) from other files:
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
#
# Path: src/oscarapicheckout/cache.py
# class EmailAddressCache(AbstractCheckoutCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "email_address", "oscarapicheckout.cache.EmailAddressSerializer"
# )
#
# class ShippingAddressCache(AbstractCheckoutAddressCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "shipping_address", "oscarapi.serializers.checkout.ShippingAddressSerializer"
# )
#
# class BillingAddressCache(AbstractCheckoutAddressCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "billing_address", "oscarapi.serializers.checkout.BillingAddressSerializer"
# )
#
# class ShippingMethodCache(AbstractCheckoutCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "shipping_method", "oscarapicheckout.cache.ShippingMethodSerializer"
# )
. Output only the next line. | EmailAddressCache(1).set({"email": "foo1@example.com"}) |
Predict the next line for this snippet: <|code_start|>
Country = get_model("address", "Country")
class EmailAddressCacheTest(BaseTest):
def test_cache(self):
EmailAddressCache(1).set({"email": "foo1@example.com"})
EmailAddressCache(2).set({"email": "foo2@example.com"})
self.assertEqual(EmailAddressCache(1).get(), {"email": "foo1@example.com"})
self.assertEqual(EmailAddressCache(2).get(), {"email": "foo2@example.com"})
class ShippingAddressCacheTest(BaseTest):
def test_cache(self):
<|code_end|>
with the help of current file imports:
from oscar.core.loading import get_model
from .base import BaseTest
from ..cache import (
EmailAddressCache,
ShippingAddressCache,
BillingAddressCache,
ShippingMethodCache,
)
and context from other files:
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
#
# Path: src/oscarapicheckout/cache.py
# class EmailAddressCache(AbstractCheckoutCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "email_address", "oscarapicheckout.cache.EmailAddressSerializer"
# )
#
# class ShippingAddressCache(AbstractCheckoutAddressCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "shipping_address", "oscarapi.serializers.checkout.ShippingAddressSerializer"
# )
#
# class BillingAddressCache(AbstractCheckoutAddressCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "billing_address", "oscarapi.serializers.checkout.BillingAddressSerializer"
# )
#
# class ShippingMethodCache(AbstractCheckoutCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "shipping_method", "oscarapicheckout.cache.ShippingMethodSerializer"
# )
, which may contain function names, class names, or code. Output only the next line. | ShippingAddressCache(1).set( |
Predict the next line for this snippet: <|code_start|>
class ShippingAddressCacheTest(BaseTest):
def test_cache(self):
ShippingAddressCache(1).set(
{
"first_name": "Bart",
"last_name": "Simpson",
"line1": "123 Evergreen Terrace",
"line4": "Springfield",
"state": "NY",
"postcode": "10001",
"country": Country.objects.get(iso_3166_1_a3="USA"),
}
)
self.assertEqual(
ShippingAddressCache(1).get(),
{
"first_name": "Bart",
"last_name": "Simpson",
"line1": "123 Evergreen Terrace",
"line4": "Springfield",
"state": "NY",
"postcode": "10001",
"country": Country.objects.get(iso_3166_1_a3="USA"),
},
)
class BillingAddressCacheTest(BaseTest):
def test_cache(self):
<|code_end|>
with the help of current file imports:
from oscar.core.loading import get_model
from .base import BaseTest
from ..cache import (
EmailAddressCache,
ShippingAddressCache,
BillingAddressCache,
ShippingMethodCache,
)
and context from other files:
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
#
# Path: src/oscarapicheckout/cache.py
# class EmailAddressCache(AbstractCheckoutCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "email_address", "oscarapicheckout.cache.EmailAddressSerializer"
# )
#
# class ShippingAddressCache(AbstractCheckoutAddressCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "shipping_address", "oscarapi.serializers.checkout.ShippingAddressSerializer"
# )
#
# class BillingAddressCache(AbstractCheckoutAddressCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "billing_address", "oscarapi.serializers.checkout.BillingAddressSerializer"
# )
#
# class ShippingMethodCache(AbstractCheckoutCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "shipping_method", "oscarapicheckout.cache.ShippingMethodSerializer"
# )
, which may contain function names, class names, or code. Output only the next line. | BillingAddressCache(1).set( |
Here is a snippet: <|code_start|>
class BillingAddressCacheTest(BaseTest):
def test_cache(self):
BillingAddressCache(1).set(
{
"first_name": "Lisa",
"last_name": "Simpson",
"line1": "123 Evergreen Terrace",
"line4": "Springfield",
"state": "NY",
"postcode": "10001",
"country": Country.objects.get(iso_3166_1_a3="USA"),
}
)
self.assertEqual(
BillingAddressCache(1).get(),
{
"first_name": "Lisa",
"last_name": "Simpson",
"line1": "123 Evergreen Terrace",
"line4": "Springfield",
"state": "NY",
"postcode": "10001",
"country": Country.objects.get(iso_3166_1_a3="USA"),
},
)
class ShippingMethodCacheTest(BaseTest):
def test_cache(self):
<|code_end|>
. Write the next line using the current file imports:
from oscar.core.loading import get_model
from .base import BaseTest
from ..cache import (
EmailAddressCache,
ShippingAddressCache,
BillingAddressCache,
ShippingMethodCache,
)
and context from other files:
# Path: src/oscarapicheckout/tests/base.py
# class BaseTest(APITestCase):
# def setUp(self):
# Country.objects.create(
# display_order=0,
# is_shipping_country=True,
# iso_3166_1_a2="US",
# iso_3166_1_a3="USA",
# iso_3166_1_numeric="840",
# name="United States of America",
# printable_name="United States",
# )
#
# def login(self, is_staff=False, email="joe@example.com"):
# user = User.objects.create_user(username="joe", password="schmoe", email=email)
# user.is_staff = is_staff
# user.save()
# self.client.login(username="joe", password="schmoe")
# return user
#
# Path: src/oscarapicheckout/cache.py
# class EmailAddressCache(AbstractCheckoutCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "email_address", "oscarapicheckout.cache.EmailAddressSerializer"
# )
#
# class ShippingAddressCache(AbstractCheckoutAddressCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "shipping_address", "oscarapi.serializers.checkout.ShippingAddressSerializer"
# )
#
# class BillingAddressCache(AbstractCheckoutAddressCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "billing_address", "oscarapi.serializers.checkout.BillingAddressSerializer"
# )
#
# class ShippingMethodCache(AbstractCheckoutCache):
# serializer_class_path = pkgsettings.CHECKOUT_CACHE_SERIALIZERS.get(
# "shipping_method", "oscarapicheckout.cache.ShippingMethodSerializer"
# )
, which may include functions, classes, or code. Output only the next line. | ShippingMethodCache(1).set( |
Continue the code snippet: <|code_start|>
Order = get_model("order", "Order")
class GetCardTokenView(generics.GenericAPIView):
def post(self, request):
amount = Decimal(request.data["amount"])
order_number = request.data["reference_number"]
order = get_object_or_404(Order, number=order_number)
# Get the method key
method_key = Signer().unsign(request.data["transaction_id"])
# Decline the payment
if request.data.get("deny"):
utils.mark_payment_method_declined(
order, request, method_key, request.data["amount"]
)
return Response(
{
"status": "Declined",
}
)
# Require the client to do another form post
<|code_end|>
. Use current file imports:
from decimal import Decimal
from django.shortcuts import get_object_or_404
from django.core.signing import Signer
from rest_framework import generics
from rest_framework.response import Response
from oscar.core.loading import get_model
from oscarapicheckout import utils
from .methods import CreditCard
and context (classes, functions, or code) from other files:
# Path: sandbox/sandbox/creditcards/methods.py
# class CreditCard(PaymentMethod):
# """
# This is an example of how to implement a payment method that required some off-site
# interaction, like Cybersource Secure Acceptance, for example. It returns a pending
# status initially that requires the client app to make a form post, which in-turn
# redirects back to us. This is a common pattern in PCI SAQ A-EP ecommerce sites.
# """
#
# name = "Credit Card"
# code = "credit-card"
# serializer_class = PaymentMethodSerializer
#
# def _record_payment(self, request, order, method_key, amount, reference, **kwargs):
# """Payment Step 1"""
# fields = [
# {
# "key": "amount",
# "value": amount,
# },
# {
# "key": "reference_number",
# "value": order.number,
# },
# {
# "key": "transaction_id",
# "value": Signer().sign(method_key),
# },
# ]
# # Return a response showing we need to post some fields to the given
# # URL to finishing processing this payment method
# return FormPostRequired(
# amount=amount,
# name="get-token",
# url=reverse("creditcards-get-token"),
# fields=fields,
# )
#
# def require_authorization_post(self, order, method_key, amount):
# """Payment Step 2"""
# fields = [
# {
# "key": "amount",
# "value": amount,
# },
# {
# "key": "reference_number",
# "value": order.number,
# },
# {
# "key": "transaction_id",
# "value": Signer().sign(method_key),
# },
# ]
# return FormPostRequired(
# amount=amount,
# name="authorize",
# url=reverse("creditcards-authorize"),
# fields=fields,
# )
#
# def record_successful_authorization(self, order, amount, reference):
# """Payment Step 3 Succeeded"""
# source = self.get_source(order, reference)
#
# source.allocate(amount, reference=reference, status="ACCEPTED")
# event = self.make_authorize_event(order, amount, reference)
# for line in order.lines.all():
# self.make_event_quantity(event, line, line.quantity)
#
# return Complete(amount, source_id=source.pk)
#
# def record_declined_authorization(self, order, amount, reference):
# """Payment Step 3 Failed"""
# source = self.get_source(order, reference)
# source._create_transaction(
# Transaction.AUTHORISE, amount, reference=reference, status="DECLINED"
# )
# return Declined(amount, source_id=source.pk)
. Output only the next line. | new_state = CreditCard().require_authorization_post(order, method_key, amount) |
Given the following code snippet before the placeholder: <|code_start|> try:
host, port = sock.split(":")
port = int(port)
if not host:
raise
bind_param = (host, port)
except Exception:
logger.exception('error on sock params in socket endpoint')
raise
sock_obj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
bind_param = sock.split(":", 1)[1]
sock_obj = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if reuse_port:
SO_REUSEPORT = 15
sock_obj.setsockopt(socket.SOL_SOCKET, SO_REUSEPORT, 1)
sock_obj.bind(bind_param)
else:
sock_obj = sock
return sock_obj
def make_socket(self):
"""
make and bind socket if string object is passed
"""
self.sock_obj = self.mk_socket(self.sock, self.reuse_port)
<|code_end|>
, predict the next line using imports from the current file:
import asyncio
import logging
import socket
from pypeman.helpers import lazyload # noqa: E402
and context including class names, function names, and sometimes code from other files:
# Path: pypeman/helpers/lazyload.py
# def load_class(module, class_, deps):
# def load(selfmodname, module, class_, dep=None):
# def init(*args, **kwargs):
# def __init__(self, wrapped):
# def add_lazy(self, module, name, deps):
# def __getattr__(self, name):
# C = load_class(module, class_, dep)
# class Wrapper(object):
. Output only the next line. | wrap = lazyload.Wrapper(__name__) |
Based on the snippet: <|code_start|>
# TODO: might refactor to another file?
def mk_msgs_w_ctx(*ctx_ids):
"""
helper to create a msg with a few contexts
"""
msg = generate_msg(
message_content="test",
message_meta=dict(entry1="meta1"),
)
ctx_msgs = []
for ctx_id in ctx_ids:
meta = dict(entry1="meta_%s" % ctx_id)
ctx_msg = generate_msg(
message_content={"val_%s" % ctx_id: "data_%s" % ctx_id},
message_meta=meta,
)
msg.add_context(ctx_id, ctx_msg)
ctx_msgs.append(ctx_msg)
return msg, ctx_msgs
@pytest.mark.usefixtures("clear_graph")
def test_combine_ctx_not_two_names(event_loop):
with pytest.raises(NodeException):
<|code_end|>
, predict the immediate next line with the help of imports:
import asyncio
import pytest
from pypeman.contrib.ctx import CombineCtx
from pypeman.nodes import NodeException
from pypeman.tests.pytest_helpers import clear_graph # noqa: F401
from pypeman.tests.common import generate_msg
from pypeman.tests.test_nodes import FakeChannel
and context (classes, functions, sometimes code) from other files:
# Path: pypeman/contrib/ctx.py
# class CombineCtx(nodes.BaseNode):
# """
# creates payload dict with combination of different contexts
# """
#
# def __init__(self, ctx_names, meta_from=None, flatten=False, *args, **kwargs):
# """
# param ctx_names: list of context names to save or dict with mapping
# of context name to payload keys
# param: flatten: if true all ctx payloads (must be dicts) will be combined
# in one dict
# param: meta_from: specifies which meta the resulting message should have
# """
# super().__init__(*args, **kwargs)
# if not isinstance(ctx_names, dict):
# if flatten:
# ctx_dst = len(ctx_names) * [None]
# else:
# ctx_dst = ctx_names
# ctx_names = dict(zip(ctx_names, ctx_dst))
# self.ctx_names = ctx_names
# if len(ctx_names) < 2:
# raise NodeException("must have at least two contexts for combining")
# if meta_from is None:
# meta_from = next(iter(ctx_names.keys()))
# self.meta_from = meta_from
#
# def process(self, msg):
# payload = {}
# ch_logger = self.channel.logger
# ch_logger.debug(
# "combine ctx_names = %s / meta from %r",
# repr(self.ctx_names),
# self.meta_from,
# )
# # TODO: can we just copy the meta if the payload changed?
# # TODO: If not, then rmv meta_from param and create new meta
# msg.meta = msg.ctx[self.meta_from]['meta']
#
# for ctx_name, dst in self.ctx_names.items():
# ch_logger.debug("combine ctx = %s -> %s", ctx_name, dst)
# if dst is None:
# ctx_payload = msg.ctx[ctx_name]['payload']
# ch_logger.debug(
# "upd payload with ctx payload of %s ( %s )",
# ctx_name, repr(ctx_payload))
# payload.update(ctx_payload)
# else:
# payload[dst] = msg.ctx[ctx_name]['payload']
#
# msg.payload = payload
#
# return msg
#
# Path: pypeman/nodes.py
# class NodeException(Exception):
# """ custom exception """
#
# Path: pypeman/tests/pytest_helpers.py
# @pytest.fixture(scope="function")
# def clear_graph():
# """
# ensure, that before and after a pypeman test
# the pypeman graph is entirely cleared
# """
# n_nodes = len(pypeman.nodes.all_nodes)
# n_channels = len(pypeman.channels.all_channels)
# n_endpoints = len(pypeman.endpoints.all_endpoints)
# pypeman.nodes.reset_pypeman_nodes()
# pypeman.channels.reset_pypeman_channels()
# pypeman.endpoints.reset_pypeman_endpoints()
# yield n_nodes, n_channels, n_endpoints
# pypeman.nodes.reset_pypeman_nodes()
# pypeman.channels.reset_pypeman_channels()
# pypeman.endpoints.reset_pypeman_endpoints()
#
# Path: pypeman/tests/common.py
# def generate_msg(timestamp=None, message_content=default_message_content,
# message_meta=None, with_context=False):
# """ generates a default message """
# m = message.Message()
# if timestamp:
# if isinstance(timestamp, tuple):
# m.timestamp = datetime.datetime(*timestamp)
# else: # assume it's a datetime object
# m.timestamp = timestamp
# else: # just use current time
# m.timestamp = datetime.datetime.utcnow()
#
# m.payload = message_content
#
# if message_meta is None:
# m.meta = dict(default_message_meta)
# else:
# m.meta = message_meta
#
# if with_context:
# # Add context message
# mctx = generate_msg(message_content={'question': 'known'}, message_meta={'answer': 43})
# m.add_context('test', mctx)
#
# return m
#
# Path: pypeman/tests/test_nodes.py
# class FakeChannel():
# def __init__(self, loop):
# self.logger = mock.MagicMock()
# self.uuid = 'fakeChannel'
# self.name = 'fakeChannel'
# self.parent_uids = "parent_uid"
# self.parent_names = ["parent_names"]
#
# self.loop = loop
. Output only the next line. | CombineCtx([], name="ctx1") |
Given snippet: <|code_start|>
# TODO: might refactor to another file?
def mk_msgs_w_ctx(*ctx_ids):
"""
helper to create a msg with a few contexts
"""
msg = generate_msg(
message_content="test",
message_meta=dict(entry1="meta1"),
)
ctx_msgs = []
for ctx_id in ctx_ids:
meta = dict(entry1="meta_%s" % ctx_id)
ctx_msg = generate_msg(
message_content={"val_%s" % ctx_id: "data_%s" % ctx_id},
message_meta=meta,
)
msg.add_context(ctx_id, ctx_msg)
ctx_msgs.append(ctx_msg)
return msg, ctx_msgs
@pytest.mark.usefixtures("clear_graph")
def test_combine_ctx_not_two_names(event_loop):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import asyncio
import pytest
from pypeman.contrib.ctx import CombineCtx
from pypeman.nodes import NodeException
from pypeman.tests.pytest_helpers import clear_graph # noqa: F401
from pypeman.tests.common import generate_msg
from pypeman.tests.test_nodes import FakeChannel
and context:
# Path: pypeman/contrib/ctx.py
# class CombineCtx(nodes.BaseNode):
# """
# creates payload dict with combination of different contexts
# """
#
# def __init__(self, ctx_names, meta_from=None, flatten=False, *args, **kwargs):
# """
# param ctx_names: list of context names to save or dict with mapping
# of context name to payload keys
# param: flatten: if true all ctx payloads (must be dicts) will be combined
# in one dict
# param: meta_from: specifies which meta the resulting message should have
# """
# super().__init__(*args, **kwargs)
# if not isinstance(ctx_names, dict):
# if flatten:
# ctx_dst = len(ctx_names) * [None]
# else:
# ctx_dst = ctx_names
# ctx_names = dict(zip(ctx_names, ctx_dst))
# self.ctx_names = ctx_names
# if len(ctx_names) < 2:
# raise NodeException("must have at least two contexts for combining")
# if meta_from is None:
# meta_from = next(iter(ctx_names.keys()))
# self.meta_from = meta_from
#
# def process(self, msg):
# payload = {}
# ch_logger = self.channel.logger
# ch_logger.debug(
# "combine ctx_names = %s / meta from %r",
# repr(self.ctx_names),
# self.meta_from,
# )
# # TODO: can we just copy the meta if the payload changed?
# # TODO: If not, then rmv meta_from param and create new meta
# msg.meta = msg.ctx[self.meta_from]['meta']
#
# for ctx_name, dst in self.ctx_names.items():
# ch_logger.debug("combine ctx = %s -> %s", ctx_name, dst)
# if dst is None:
# ctx_payload = msg.ctx[ctx_name]['payload']
# ch_logger.debug(
# "upd payload with ctx payload of %s ( %s )",
# ctx_name, repr(ctx_payload))
# payload.update(ctx_payload)
# else:
# payload[dst] = msg.ctx[ctx_name]['payload']
#
# msg.payload = payload
#
# return msg
#
# Path: pypeman/nodes.py
# class NodeException(Exception):
# """ custom exception """
#
# Path: pypeman/tests/pytest_helpers.py
# @pytest.fixture(scope="function")
# def clear_graph():
# """
# ensure, that before and after a pypeman test
# the pypeman graph is entirely cleared
# """
# n_nodes = len(pypeman.nodes.all_nodes)
# n_channels = len(pypeman.channels.all_channels)
# n_endpoints = len(pypeman.endpoints.all_endpoints)
# pypeman.nodes.reset_pypeman_nodes()
# pypeman.channels.reset_pypeman_channels()
# pypeman.endpoints.reset_pypeman_endpoints()
# yield n_nodes, n_channels, n_endpoints
# pypeman.nodes.reset_pypeman_nodes()
# pypeman.channels.reset_pypeman_channels()
# pypeman.endpoints.reset_pypeman_endpoints()
#
# Path: pypeman/tests/common.py
# def generate_msg(timestamp=None, message_content=default_message_content,
# message_meta=None, with_context=False):
# """ generates a default message """
# m = message.Message()
# if timestamp:
# if isinstance(timestamp, tuple):
# m.timestamp = datetime.datetime(*timestamp)
# else: # assume it's a datetime object
# m.timestamp = timestamp
# else: # just use current time
# m.timestamp = datetime.datetime.utcnow()
#
# m.payload = message_content
#
# if message_meta is None:
# m.meta = dict(default_message_meta)
# else:
# m.meta = message_meta
#
# if with_context:
# # Add context message
# mctx = generate_msg(message_content={'question': 'known'}, message_meta={'answer': 43})
# m.add_context('test', mctx)
#
# return m
#
# Path: pypeman/tests/test_nodes.py
# class FakeChannel():
# def __init__(self, loop):
# self.logger = mock.MagicMock()
# self.uuid = 'fakeChannel'
# self.name = 'fakeChannel'
# self.parent_uids = "parent_uid"
# self.parent_names = ["parent_names"]
#
# self.loop = loop
which might include code, classes, or functions. Output only the next line. | with pytest.raises(NodeException): |
Given the code snippet: <|code_start|>"""
tests for pypeman.contrib.ctx
"""
# TODO: might refactor to another file?
def mk_msgs_w_ctx(*ctx_ids):
"""
helper to create a msg with a few contexts
"""
<|code_end|>
, generate the next line using the imports in this file:
import asyncio
import pytest
from pypeman.contrib.ctx import CombineCtx
from pypeman.nodes import NodeException
from pypeman.tests.pytest_helpers import clear_graph # noqa: F401
from pypeman.tests.common import generate_msg
from pypeman.tests.test_nodes import FakeChannel
and context (functions, classes, or occasionally code) from other files:
# Path: pypeman/contrib/ctx.py
# class CombineCtx(nodes.BaseNode):
# """
# creates payload dict with combination of different contexts
# """
#
# def __init__(self, ctx_names, meta_from=None, flatten=False, *args, **kwargs):
# """
# param ctx_names: list of context names to save or dict with mapping
# of context name to payload keys
# param: flatten: if true all ctx payloads (must be dicts) will be combined
# in one dict
# param: meta_from: specifies which meta the resulting message should have
# """
# super().__init__(*args, **kwargs)
# if not isinstance(ctx_names, dict):
# if flatten:
# ctx_dst = len(ctx_names) * [None]
# else:
# ctx_dst = ctx_names
# ctx_names = dict(zip(ctx_names, ctx_dst))
# self.ctx_names = ctx_names
# if len(ctx_names) < 2:
# raise NodeException("must have at least two contexts for combining")
# if meta_from is None:
# meta_from = next(iter(ctx_names.keys()))
# self.meta_from = meta_from
#
# def process(self, msg):
# payload = {}
# ch_logger = self.channel.logger
# ch_logger.debug(
# "combine ctx_names = %s / meta from %r",
# repr(self.ctx_names),
# self.meta_from,
# )
# # TODO: can we just copy the meta if the payload changed?
# # TODO: If not, then rmv meta_from param and create new meta
# msg.meta = msg.ctx[self.meta_from]['meta']
#
# for ctx_name, dst in self.ctx_names.items():
# ch_logger.debug("combine ctx = %s -> %s", ctx_name, dst)
# if dst is None:
# ctx_payload = msg.ctx[ctx_name]['payload']
# ch_logger.debug(
# "upd payload with ctx payload of %s ( %s )",
# ctx_name, repr(ctx_payload))
# payload.update(ctx_payload)
# else:
# payload[dst] = msg.ctx[ctx_name]['payload']
#
# msg.payload = payload
#
# return msg
#
# Path: pypeman/nodes.py
# class NodeException(Exception):
# """ custom exception """
#
# Path: pypeman/tests/pytest_helpers.py
# @pytest.fixture(scope="function")
# def clear_graph():
# """
# ensure, that before and after a pypeman test
# the pypeman graph is entirely cleared
# """
# n_nodes = len(pypeman.nodes.all_nodes)
# n_channels = len(pypeman.channels.all_channels)
# n_endpoints = len(pypeman.endpoints.all_endpoints)
# pypeman.nodes.reset_pypeman_nodes()
# pypeman.channels.reset_pypeman_channels()
# pypeman.endpoints.reset_pypeman_endpoints()
# yield n_nodes, n_channels, n_endpoints
# pypeman.nodes.reset_pypeman_nodes()
# pypeman.channels.reset_pypeman_channels()
# pypeman.endpoints.reset_pypeman_endpoints()
#
# Path: pypeman/tests/common.py
# def generate_msg(timestamp=None, message_content=default_message_content,
# message_meta=None, with_context=False):
# """ generates a default message """
# m = message.Message()
# if timestamp:
# if isinstance(timestamp, tuple):
# m.timestamp = datetime.datetime(*timestamp)
# else: # assume it's a datetime object
# m.timestamp = timestamp
# else: # just use current time
# m.timestamp = datetime.datetime.utcnow()
#
# m.payload = message_content
#
# if message_meta is None:
# m.meta = dict(default_message_meta)
# else:
# m.meta = message_meta
#
# if with_context:
# # Add context message
# mctx = generate_msg(message_content={'question': 'known'}, message_meta={'answer': 43})
# m.add_context('test', mctx)
#
# return m
#
# Path: pypeman/tests/test_nodes.py
# class FakeChannel():
# def __init__(self, loop):
# self.logger = mock.MagicMock()
# self.uuid = 'fakeChannel'
# self.name = 'fakeChannel'
# self.parent_uids = "parent_uid"
# self.parent_names = ["parent_names"]
#
# self.loop = loop
. Output only the next line. | msg = generate_msg( |
Next line prediction: <|code_start|> message_content="test",
message_meta=dict(entry1="meta1"),
)
ctx_msgs = []
for ctx_id in ctx_ids:
meta = dict(entry1="meta_%s" % ctx_id)
ctx_msg = generate_msg(
message_content={"val_%s" % ctx_id: "data_%s" % ctx_id},
message_meta=meta,
)
msg.add_context(ctx_id, ctx_msg)
ctx_msgs.append(ctx_msg)
return msg, ctx_msgs
@pytest.mark.usefixtures("clear_graph")
def test_combine_ctx_not_two_names(event_loop):
with pytest.raises(NodeException):
CombineCtx([], name="ctx1")
with pytest.raises(NodeException):
CombineCtx(["a"], name="ctx2")
@pytest.mark.usefixtures("clear_graph")
def test_combine_ctx_2_names(event_loop):
loop = event_loop
asyncio.set_event_loop(loop)
# nut == Node Under Test
nut = CombineCtx(["a", "b"], name="ctx1")
<|code_end|>
. Use current file imports:
(import asyncio
import pytest
from pypeman.contrib.ctx import CombineCtx
from pypeman.nodes import NodeException
from pypeman.tests.pytest_helpers import clear_graph # noqa: F401
from pypeman.tests.common import generate_msg
from pypeman.tests.test_nodes import FakeChannel)
and context including class names, function names, or small code snippets from other files:
# Path: pypeman/contrib/ctx.py
# class CombineCtx(nodes.BaseNode):
# """
# creates payload dict with combination of different contexts
# """
#
# def __init__(self, ctx_names, meta_from=None, flatten=False, *args, **kwargs):
# """
# param ctx_names: list of context names to save or dict with mapping
# of context name to payload keys
# param: flatten: if true all ctx payloads (must be dicts) will be combined
# in one dict
# param: meta_from: specifies which meta the resulting message should have
# """
# super().__init__(*args, **kwargs)
# if not isinstance(ctx_names, dict):
# if flatten:
# ctx_dst = len(ctx_names) * [None]
# else:
# ctx_dst = ctx_names
# ctx_names = dict(zip(ctx_names, ctx_dst))
# self.ctx_names = ctx_names
# if len(ctx_names) < 2:
# raise NodeException("must have at least two contexts for combining")
# if meta_from is None:
# meta_from = next(iter(ctx_names.keys()))
# self.meta_from = meta_from
#
# def process(self, msg):
# payload = {}
# ch_logger = self.channel.logger
# ch_logger.debug(
# "combine ctx_names = %s / meta from %r",
# repr(self.ctx_names),
# self.meta_from,
# )
# # TODO: can we just copy the meta if the payload changed?
# # TODO: If not, then rmv meta_from param and create new meta
# msg.meta = msg.ctx[self.meta_from]['meta']
#
# for ctx_name, dst in self.ctx_names.items():
# ch_logger.debug("combine ctx = %s -> %s", ctx_name, dst)
# if dst is None:
# ctx_payload = msg.ctx[ctx_name]['payload']
# ch_logger.debug(
# "upd payload with ctx payload of %s ( %s )",
# ctx_name, repr(ctx_payload))
# payload.update(ctx_payload)
# else:
# payload[dst] = msg.ctx[ctx_name]['payload']
#
# msg.payload = payload
#
# return msg
#
# Path: pypeman/nodes.py
# class NodeException(Exception):
# """ custom exception """
#
# Path: pypeman/tests/pytest_helpers.py
# @pytest.fixture(scope="function")
# def clear_graph():
# """
# ensure, that before and after a pypeman test
# the pypeman graph is entirely cleared
# """
# n_nodes = len(pypeman.nodes.all_nodes)
# n_channels = len(pypeman.channels.all_channels)
# n_endpoints = len(pypeman.endpoints.all_endpoints)
# pypeman.nodes.reset_pypeman_nodes()
# pypeman.channels.reset_pypeman_channels()
# pypeman.endpoints.reset_pypeman_endpoints()
# yield n_nodes, n_channels, n_endpoints
# pypeman.nodes.reset_pypeman_nodes()
# pypeman.channels.reset_pypeman_channels()
# pypeman.endpoints.reset_pypeman_endpoints()
#
# Path: pypeman/tests/common.py
# def generate_msg(timestamp=None, message_content=default_message_content,
# message_meta=None, with_context=False):
# """ generates a default message """
# m = message.Message()
# if timestamp:
# if isinstance(timestamp, tuple):
# m.timestamp = datetime.datetime(*timestamp)
# else: # assume it's a datetime object
# m.timestamp = timestamp
# else: # just use current time
# m.timestamp = datetime.datetime.utcnow()
#
# m.payload = message_content
#
# if message_meta is None:
# m.meta = dict(default_message_meta)
# else:
# m.meta = message_meta
#
# if with_context:
# # Add context message
# mctx = generate_msg(message_content={'question': 'known'}, message_meta={'answer': 43})
# m.add_context('test', mctx)
#
# return m
#
# Path: pypeman/tests/test_nodes.py
# class FakeChannel():
# def __init__(self, loop):
# self.logger = mock.MagicMock()
# self.uuid = 'fakeChannel'
# self.name = 'fakeChannel'
# self.parent_uids = "parent_uid"
# self.parent_names = ["parent_names"]
#
# self.loop = loop
. Output only the next line. | nut.channel = FakeChannel(loop) |
Using the snippet: <|code_start|> reverse = True
sort_key = order_by[1:]
else:
reverse = False
sort_key = order_by
result = []
for value in sorted(self.messages.values(), key=lambda x: x[sort_key], reverse=reverse):
resp = dict(value)
resp['message'] = Message.from_dict(resp['message'])
result.append(resp)
return result[start: start + count]
async def total(self):
return len(self.messages)
class FileMessageStoreFactory(MessageStoreFactory):
"""
Generate a FileMessageStore message store instance.
Store a file in `<base_path>/<store_id>/<month>/<day>/` hierachy.
"""
# TODO add an option to reguraly archive old file or delete them
def __init__(self, path):
super().__init__()
if path is None:
<|code_end|>
, determine the next line of code. You have imports:
import logging
import os
import re
from collections import OrderedDict
from pypeman.message import Message
from pypeman.errors import PypemanConfigError
and context (class names, function names, or code) available:
# Path: pypeman/errors.py
# class PypemanConfigError(PypemanError):
# """ custom error """
. Output only the next line. | raise PypemanConfigError('file message store requires a path') |
Based on the snippet: <|code_start|>"""
Global graph related pypeman functions.
On the long run some commands of pypeman.commands might be candidates
to be moved into this module
"""
def load_project():
<|code_end|>
, predict the immediate next line with the help of imports:
import importlib
import sys
import time
import traceback
from pypeman.conf import settings
from pypeman import channels
from pypeman.errors import PypemanError
from pypeman.plugin_mgr import manager as plugin_manager
and context (classes, functions, sometimes code) from other files:
# Path: pypeman/conf.py
# NOT_FOUND = object() # sentinel object
# class ConfigError(ImportError):
# class Settings():
# def __init__(self, module_name=None):
# def init_settings(self):
# def __getattr__(self, name):
# def __setattr__(self, name, value):
#
# Path: pypeman/channels.py
# class Dropped(Exception):
# class Rejected(Exception):
# class ChannelStopped(Exception):
# class BaseChannel:
# class SubChannel(BaseChannel):
# class ConditionSubChannel(BaseChannel):
# class Case():
# class FileWatcherChannel(BaseChannel):
# STARTING, WAITING, PROCESSING, STOPPING, STOPPED = range(5)
# STATE_NAMES = ['STARTING', 'WAITING', 'PROCESSING', 'STOPPING', 'STOPPED']
# NEW, UNCHANGED, MODIFIED, DELETED = range(4)
# def __init__(self, name=None, parent_channel=None, loop=None, message_store_factory=None):
# def status_id_to_str(cls, state_id):
# def status_str_to_id(cls, state):
# def status(self):
# def status(self, value):
# def is_stopped(self):
# async def start(self):
# def init_node_graph(self):
# async def stop(self):
# def _reset_test(self):
# def add(self, *args):
# def _register_node(self, node):
# def get_node(self, name):
# def append(self, *args):
# def fork(self, name=None, message_store_factory=None):
# def when(self, condition, name=None, message_store_factory=None):
# def case(self, *conditions, names=None, message_store_factory=None):
# def handle_and_wait(self, msg):
# async def handle(self, msg):
# async def subhandle(self, msg):
# async def process(self, msg):
# async def replay(self, msg_id):
# def to_dict(self):
# def subchannels(self):
# def graph(self, prefix='', dot=False):
# def graph_dot(self, end=''):
# def __str__(self):
# def __repr__(self):
# def reset_pypeman_channels():
# def callback(self, fut):
# async def process(self, msg):
# def __init__(self, condition=lambda x: True, **kwargs):
# def test_condition(self, msg):
# async def subhandle(self, msg):
# def __init__(self, *args, names=None, parent_channel=None, message_store_factory=None, loop=None):
# def _reset_test(self):
# def test_condition(self, condition, msg):
# async def handle(self, msg):
# def __init__(self, *args, basedir='', regex='.*', interval=1, binary_file=False, path='', **kwargs):
# async def start(self):
# def file_status(self, filename):
# def _handle_callback(self, future):
# async def watch_for_file(self):
#
# Path: pypeman/errors.py
# class PypemanError(Exception):
# """ custom error """
. Output only the next line. | settings.init_settings() |
Given the following code snippet before the placeholder: <|code_start|> importlib.import_module(project_module)
except ImportError as exc:
msg = str(exc)
if 'No module' not in msg:
print("IMPORT ERROR %s" % project_module)
raise
if project_module not in msg:
print("IMPORT ERROR %s" % project_module)
raise
print("Missing '%s' module !" % project_module)
sys.exit(-1)
except Exception:
traceback.print_exc()
raise
plugin_manager.init_plugins()
plugin_manager.ready_plugins()
def wait_for_loop(tmax=5.0):
"""
wait until the loop variable of a pypeman graph
has been initialized
can be used from any thread that's not the main thread
"""
# TODO: might factor out this function to a helper module
loop = None
steps = int(tmax / 0.1)
for i in range(steps, -1, -1):
try:
<|code_end|>
, predict the next line using imports from the current file:
import importlib
import sys
import time
import traceback
from pypeman.conf import settings
from pypeman import channels
from pypeman.errors import PypemanError
from pypeman.plugin_mgr import manager as plugin_manager
and context including class names, function names, and sometimes code from other files:
# Path: pypeman/conf.py
# NOT_FOUND = object() # sentinel object
# class ConfigError(ImportError):
# class Settings():
# def __init__(self, module_name=None):
# def init_settings(self):
# def __getattr__(self, name):
# def __setattr__(self, name, value):
#
# Path: pypeman/channels.py
# class Dropped(Exception):
# class Rejected(Exception):
# class ChannelStopped(Exception):
# class BaseChannel:
# class SubChannel(BaseChannel):
# class ConditionSubChannel(BaseChannel):
# class Case():
# class FileWatcherChannel(BaseChannel):
# STARTING, WAITING, PROCESSING, STOPPING, STOPPED = range(5)
# STATE_NAMES = ['STARTING', 'WAITING', 'PROCESSING', 'STOPPING', 'STOPPED']
# NEW, UNCHANGED, MODIFIED, DELETED = range(4)
# def __init__(self, name=None, parent_channel=None, loop=None, message_store_factory=None):
# def status_id_to_str(cls, state_id):
# def status_str_to_id(cls, state):
# def status(self):
# def status(self, value):
# def is_stopped(self):
# async def start(self):
# def init_node_graph(self):
# async def stop(self):
# def _reset_test(self):
# def add(self, *args):
# def _register_node(self, node):
# def get_node(self, name):
# def append(self, *args):
# def fork(self, name=None, message_store_factory=None):
# def when(self, condition, name=None, message_store_factory=None):
# def case(self, *conditions, names=None, message_store_factory=None):
# def handle_and_wait(self, msg):
# async def handle(self, msg):
# async def subhandle(self, msg):
# async def process(self, msg):
# async def replay(self, msg_id):
# def to_dict(self):
# def subchannels(self):
# def graph(self, prefix='', dot=False):
# def graph_dot(self, end=''):
# def __str__(self):
# def __repr__(self):
# def reset_pypeman_channels():
# def callback(self, fut):
# async def process(self, msg):
# def __init__(self, condition=lambda x: True, **kwargs):
# def test_condition(self, msg):
# async def subhandle(self, msg):
# def __init__(self, *args, names=None, parent_channel=None, message_store_factory=None, loop=None):
# def _reset_test(self):
# def test_condition(self, condition, msg):
# async def handle(self, msg):
# def __init__(self, *args, basedir='', regex='.*', interval=1, binary_file=False, path='', **kwargs):
# async def start(self):
# def file_status(self, filename):
# def _handle_callback(self, future):
# async def watch_for_file(self):
#
# Path: pypeman/errors.py
# class PypemanError(Exception):
# """ custom error """
. Output only the next line. | channel = channels.all_channels[0] |
Given the code snippet: <|code_start|> if project_module not in msg:
print("IMPORT ERROR %s" % project_module)
raise
print("Missing '%s' module !" % project_module)
sys.exit(-1)
except Exception:
traceback.print_exc()
raise
plugin_manager.init_plugins()
plugin_manager.ready_plugins()
def wait_for_loop(tmax=5.0):
"""
wait until the loop variable of a pypeman graph
has been initialized
can be used from any thread that's not the main thread
"""
# TODO: might factor out this function to a helper module
loop = None
steps = int(tmax / 0.1)
for i in range(steps, -1, -1):
try:
channel = channels.all_channels[0]
# print("channel =", channel)
loop = channel.loop
break
except Exception:
if i == 0:
<|code_end|>
, generate the next line using the imports in this file:
import importlib
import sys
import time
import traceback
from pypeman.conf import settings
from pypeman import channels
from pypeman.errors import PypemanError
from pypeman.plugin_mgr import manager as plugin_manager
and context (functions, classes, or occasionally code) from other files:
# Path: pypeman/conf.py
# NOT_FOUND = object() # sentinel object
# class ConfigError(ImportError):
# class Settings():
# def __init__(self, module_name=None):
# def init_settings(self):
# def __getattr__(self, name):
# def __setattr__(self, name, value):
#
# Path: pypeman/channels.py
# class Dropped(Exception):
# class Rejected(Exception):
# class ChannelStopped(Exception):
# class BaseChannel:
# class SubChannel(BaseChannel):
# class ConditionSubChannel(BaseChannel):
# class Case():
# class FileWatcherChannel(BaseChannel):
# STARTING, WAITING, PROCESSING, STOPPING, STOPPED = range(5)
# STATE_NAMES = ['STARTING', 'WAITING', 'PROCESSING', 'STOPPING', 'STOPPED']
# NEW, UNCHANGED, MODIFIED, DELETED = range(4)
# def __init__(self, name=None, parent_channel=None, loop=None, message_store_factory=None):
# def status_id_to_str(cls, state_id):
# def status_str_to_id(cls, state):
# def status(self):
# def status(self, value):
# def is_stopped(self):
# async def start(self):
# def init_node_graph(self):
# async def stop(self):
# def _reset_test(self):
# def add(self, *args):
# def _register_node(self, node):
# def get_node(self, name):
# def append(self, *args):
# def fork(self, name=None, message_store_factory=None):
# def when(self, condition, name=None, message_store_factory=None):
# def case(self, *conditions, names=None, message_store_factory=None):
# def handle_and_wait(self, msg):
# async def handle(self, msg):
# async def subhandle(self, msg):
# async def process(self, msg):
# async def replay(self, msg_id):
# def to_dict(self):
# def subchannels(self):
# def graph(self, prefix='', dot=False):
# def graph_dot(self, end=''):
# def __str__(self):
# def __repr__(self):
# def reset_pypeman_channels():
# def callback(self, fut):
# async def process(self, msg):
# def __init__(self, condition=lambda x: True, **kwargs):
# def test_condition(self, msg):
# async def subhandle(self, msg):
# def __init__(self, *args, names=None, parent_channel=None, message_store_factory=None, loop=None):
# def _reset_test(self):
# def test_condition(self, condition, msg):
# async def handle(self, msg):
# def __init__(self, *args, basedir='', regex='.*', interval=1, binary_file=False, path='', **kwargs):
# async def start(self):
# def file_status(self, filename):
# def _handle_callback(self, future):
# async def watch_for_file(self):
#
# Path: pypeman/errors.py
# class PypemanError(Exception):
# """ custom error """
. Output only the next line. | raise PypemanError("couldn't obtain graph's loop") |
Given the code snippet: <|code_start|>"""
This module contains all persistence related things.
"""
SENTINEL = object()
_backend = None
async def get_backend(loop):
"""
Return the configured backend instance.
:param loop: Asyncio loop to use. Passed backend instance.
"""
global _backend
if not _backend:
# Load backend on first use
<|code_end|>
, generate the next line using the imports in this file:
import asyncio
import importlib
from concurrent.futures import ThreadPoolExecutor
from collections import defaultdict
from sqlitedict import SqliteDict
from pypeman.conf import settings
and context (functions, classes, or occasionally code) from other files:
# Path: pypeman/conf.py
# NOT_FOUND = object() # sentinel object
# class ConfigError(ImportError):
# class Settings():
# def __init__(self, module_name=None):
# def init_settings(self):
# def __getattr__(self, name):
# def __setattr__(self, name, value):
. Output only the next line. | if not settings.PERSISTENCE_BACKEND: |
Predict the next line for this snippet: <|code_start|>
tst_lock = threading.Lock()
# migth refactor to a helper directory
@contextmanager
def chdir(path=None):
"""
enter a certain directory for a given test
"""
if path is None:
path = os.path.realpath(os.path.dirname(__file__))
prev_dir = os.getcwd()
os.chdir(path)
yield prev_dir
os.chdir(prev_dir)
def start_pm_graph():
try:
sys.argv[1:] = ["--no-daemon"]
pypeman.commands.start()
except SystemExit:
pass
def tst_thread(errors):
"""
testing thread for test_one
"""
<|code_end|>
with the help of current file imports:
import os
import sys
import threading
import time
import pypeman.commands
from contextlib import contextmanager
from pypeman.graph import wait_for_loop
and context from other files:
# Path: pypeman/graph.py
# def wait_for_loop(tmax=5.0):
# """
# wait until the loop variable of a pypeman graph
# has been initialized
#
# can be used from any thread that's not the main thread
# """
# # TODO: might factor out this function to a helper module
# loop = None
# steps = int(tmax / 0.1)
# for i in range(steps, -1, -1):
# try:
# channel = channels.all_channels[0]
# # print("channel =", channel)
# loop = channel.loop
# break
# except Exception:
# if i == 0:
# raise PypemanError("couldn't obtain graph's loop")
# time.sleep(0.1)
# return loop
, which may contain function names, class names, or code. Output only the next line. | loop = wait_for_loop() |
Given the following code snippet before the placeholder: <|code_start|>
It can also encourage development of plugins outside of pypeman, that can lateron
be merged into to pypeman.contrib.plugins or pypeman.plugins.
"""
logger = logging.getLogger(__name__)
class PluginManager():
def __init__(self):
self.imported = False
self.plugin_classes = [] # list of plugin modules
self.plugins = []
self.loop = None
def set_loop(self, loop):
self.loop = loop
for plugin in self.plugins:
plugin.set_loop(loop)
def import_plugins(self):
"""
import plugin modules and store the classes in a list
"""
if self.imported:
return
<|code_end|>
, predict the next line using imports from the current file:
import logging
from importlib import import_module
from pypeman.conf import settings
and context including class names, function names, and sometimes code from other files:
# Path: pypeman/conf.py
# NOT_FOUND = object() # sentinel object
# class ConfigError(ImportError):
# class Settings():
# def __init__(self, module_name=None):
# def init_settings(self):
# def __getattr__(self, name):
# def __setattr__(self, name, value):
. Output only the next line. | for plugin_name in settings.PLUGINS: |
Next line prediction: <|code_start|>
:param msg: incoming message
:return: modified message after a process call and some treatment
"""
# TODO : Make sure exceptions are well raised (does not happen if i.e 1/0 here atm)
if self.store_input_as:
msg.add_context(self.store_input_as, msg)
if self.passthrough:
old_msg = msg.copy()
# Allow process as coroutine function
if asyncio.iscoroutinefunction(self.process):
result = await self.async_run(msg)
else:
result = self.run(msg)
self.processed += 1
if isinstance(result, asyncio.Future):
result = await result
if self.next_node:
if isinstance(result, types.GeneratorType):
gene = result
result = msg # Necessary if all nodes result are dropped
for res in gene:
try:
result = await self.next_node.handle(res)
<|code_end|>
. Use current file imports:
(import asyncio
import base64
import json
import logging
import os
import smtplib
import types
import warnings
from datetime import datetime
from collections import OrderedDict
from fnmatch import fnmatch
from email.mime.text import MIMEText
from urllib import parse
from concurrent.futures import ThreadPoolExecutor
from pypeman.message import Message
from pypeman.channels import Dropped
from pypeman.persistence import get_backend
from pypeman.helpers import lazyload # noqa: E402)
and context including class names, function names, or small code snippets from other files:
# Path: pypeman/channels.py
# class Dropped(Exception):
# """ Used to stop process as message is processed. Default success should be returned.
# """
#
# Path: pypeman/persistence.py
# async def get_backend(loop):
# """
# Return the configured backend instance.
#
# :param loop: Asyncio loop to use. Passed backend instance.
# """
# global _backend
# if not _backend:
# # Load backend on first use
# if not settings.PERSISTENCE_BACKEND:
# raise Exception("Persistence backend not configured.")
#
# module, _, class_ = settings.PERSISTENCE_BACKEND.rpartition('.')
# loaded_module = importlib.import_module(module)
# _backend = getattr(loaded_module, class_)(loop=loop, **settings.PERSISTENCE_CONFIG)
#
# await _backend.start()
#
# return _backend
#
# Path: pypeman/helpers/lazyload.py
# def load_class(module, class_, deps):
# def load(selfmodname, module, class_, dep=None):
# def init(*args, **kwargs):
# def __init__(self, wrapped):
# def add_lazy(self, module, name, deps):
# def __getattr__(self, name):
# C = load_class(module, class_, dep)
# class Wrapper(object):
. Output only the next line. | except Dropped: |
Next line prediction: <|code_start|> result = await self._handle(msg)
return result
async def async_run(self, msg):
""" Used to overload behaviour like thread Node without rewriting handle process """
result = await self.process(msg)
return result
def run(self, msg):
""" Used to overload behaviour like thread Node without rewriting handle process """
result = self.process(msg)
return result
def process(self, msg):
""" Implement this function in child classes to create
a new Node.
:param msg: The incoming message
:return: The processed message
"""
return msg
async def save_data(self, key, value):
"""
Save data in configured persistence backend for next usage.
:param key: Key of saved data.
:param value: Value saved.
"""
<|code_end|>
. Use current file imports:
(import asyncio
import base64
import json
import logging
import os
import smtplib
import types
import warnings
from datetime import datetime
from collections import OrderedDict
from fnmatch import fnmatch
from email.mime.text import MIMEText
from urllib import parse
from concurrent.futures import ThreadPoolExecutor
from pypeman.message import Message
from pypeman.channels import Dropped
from pypeman.persistence import get_backend
from pypeman.helpers import lazyload # noqa: E402)
and context including class names, function names, or small code snippets from other files:
# Path: pypeman/channels.py
# class Dropped(Exception):
# """ Used to stop process as message is processed. Default success should be returned.
# """
#
# Path: pypeman/persistence.py
# async def get_backend(loop):
# """
# Return the configured backend instance.
#
# :param loop: Asyncio loop to use. Passed backend instance.
# """
# global _backend
# if not _backend:
# # Load backend on first use
# if not settings.PERSISTENCE_BACKEND:
# raise Exception("Persistence backend not configured.")
#
# module, _, class_ = settings.PERSISTENCE_BACKEND.rpartition('.')
# loaded_module = importlib.import_module(module)
# _backend = getattr(loaded_module, class_)(loop=loop, **settings.PERSISTENCE_CONFIG)
#
# await _backend.start()
#
# return _backend
#
# Path: pypeman/helpers/lazyload.py
# def load_class(module, class_, deps):
# def load(selfmodname, module, class_, dep=None):
# def init(*args, **kwargs):
# def __init__(self, wrapped):
# def add_lazy(self, module, name, deps):
# def __getattr__(self, name):
# C = load_class(module, class_, dep)
# class Wrapper(object):
. Output only the next line. | await (await get_backend(self.channel.loop)).store(self.fullpath(), key, value) |
Given the code snippet: <|code_start|>
def process(self, msg):
content = choose_first_not_none(self.content, msg.payload)
subject = choose_first_not_none(self.subject, msg.meta.get('subject'), 'No subject')
sender = choose_first_not_none(self.sender, msg.meta.get('sender'), 'pypeman@example.com')
recipients = choose_first_not_none(self.recipients, msg.meta.get('recipients'), [])
if isinstance(recipients, str):
recipients = [recipients]
self.send_email(subject, sender, recipients, content)
return msg
def reset_pypeman_nodes():
"""
clears book keeping of all channels
Can be useful for unit testing.
"""
logger.info("clearing all_nodes and BaseNode._used-names.")
all_nodes.clear()
BaseNode._used_names.clear()
# Contrib nodes
# TODO: can we move this line to top of file?
<|code_end|>
, generate the next line using the imports in this file:
import asyncio
import base64
import json
import logging
import os
import smtplib
import types
import warnings
from datetime import datetime
from collections import OrderedDict
from fnmatch import fnmatch
from email.mime.text import MIMEText
from urllib import parse
from concurrent.futures import ThreadPoolExecutor
from pypeman.message import Message
from pypeman.channels import Dropped
from pypeman.persistence import get_backend
from pypeman.helpers import lazyload # noqa: E402
and context (functions, classes, or occasionally code) from other files:
# Path: pypeman/channels.py
# class Dropped(Exception):
# """ Used to stop process as message is processed. Default success should be returned.
# """
#
# Path: pypeman/persistence.py
# async def get_backend(loop):
# """
# Return the configured backend instance.
#
# :param loop: Asyncio loop to use. Passed backend instance.
# """
# global _backend
# if not _backend:
# # Load backend on first use
# if not settings.PERSISTENCE_BACKEND:
# raise Exception("Persistence backend not configured.")
#
# module, _, class_ = settings.PERSISTENCE_BACKEND.rpartition('.')
# loaded_module = importlib.import_module(module)
# _backend = getattr(loaded_module, class_)(loop=loop, **settings.PERSISTENCE_CONFIG)
#
# await _backend.start()
#
# return _backend
#
# Path: pypeman/helpers/lazyload.py
# def load_class(module, class_, deps):
# def load(selfmodname, module, class_, dep=None):
# def init(*args, **kwargs):
# def __init__(self, wrapped):
# def add_lazy(self, module, name, deps):
# def __getattr__(self, name):
# C = load_class(module, class_, dep)
# class Wrapper(object):
. Output only the next line. | wrap = lazyload.Wrapper(__name__) |
Predict the next line for this snippet: <|code_start|>"""
Additional Nodes for working with the context
"""
class CombineCtx(nodes.BaseNode):
"""
creates payload dict with combination of different contexts
"""
def __init__(self, ctx_names, meta_from=None, flatten=False, *args, **kwargs):
"""
param ctx_names: list of context names to save or dict with mapping
of context name to payload keys
param: flatten: if true all ctx payloads (must be dicts) will be combined
in one dict
param: meta_from: specifies which meta the resulting message should have
"""
super().__init__(*args, **kwargs)
if not isinstance(ctx_names, dict):
if flatten:
ctx_dst = len(ctx_names) * [None]
else:
ctx_dst = ctx_names
ctx_names = dict(zip(ctx_names, ctx_dst))
self.ctx_names = ctx_names
if len(ctx_names) < 2:
<|code_end|>
with the help of current file imports:
import pypeman.nodes as nodes
from pypeman.nodes import NodeException
and context from other files:
# Path: pypeman/nodes.py
# class NodeException(Exception):
# """ custom exception """
, which may contain function names, class names, or code. Output only the next line. | raise NodeException("must have at least two contexts for combining") |
Continue the code snippet: <|code_start|>
class PolandSpecProvider(BaseSpecProvider):
"""Class that provides special data for Poland (pl)."""
def __init__(self, seed: Seed = None) -> None:
"""Initialize attributes."""
super().__init__(locale=Locale.PL, seed=seed)
class Meta:
"""The name of the provider."""
name: Final[str] = "poland_provider"
def nip(self) -> str:
"""Generate random valid 10-digit NIP.
:return: Valid 10-digit NIP
"""
nip_digits = [int(d) for d in str(self.random.randint(101, 998))]
nip_digits += [self.random.randint(0, 9) for _ in range(6)]
nip_coefficients = (6, 5, 7, 2, 3, 4, 5, 6, 7)
sum_v = sum(nc * nd for nc, nd in zip(nip_coefficients, nip_digits))
checksum_digit = sum_v % 11
if checksum_digit > 9:
return self.nip()
nip_digits.append(checksum_digit)
return "".join(map(str, nip_digits))
def pesel(
<|code_end|>
. Use current file imports:
from typing import Final, Optional
from mimesis.builtins.base import BaseSpecProvider
from mimesis.enums import Gender
from mimesis.locales import Locale
from mimesis.providers import Datetime
from mimesis.types import DateTime, Seed
and context (classes, functions, or code) from other files:
# Path: mimesis/builtins/base.py
# class BaseSpecProvider(BaseDataProvider):
# """Base provider for specific data providers."""
#
# def __init__(self, *args: Any, **kwargs: Any) -> None:
# """Initialize attributes of superclass."""
# super().__init__(*args, **kwargs)
# self._datafile = "builtin.json"
#
# Path: mimesis/types.py
# JSON = Dict[str, Any]
. Output only the next line. | self, birth_date: Optional[DateTime] = None, gender: Optional[Gender] = None |
Next line prediction: <|code_start|>
__all__ = ["BaseField", "Field", "Schema"]
class BaseField:
"""
BaseField is a class for generating data by the name of the method.
Instance of this object takes any string which represents the name
of any method of any supported data provider (:class:`~mimesis.Generic`)
and the ``**kwargs`` of the method.
See :class:`~mimesis.schema.BaseField.perform` for more details.
"""
class Meta:
base = True
def __init__(
self,
locale: Locale = Locale.DEFAULT,
seed: Seed = None,
providers: Optional[Sequence[Any]] = None,
) -> None:
"""Initialize field.
:param locale: Locale
:param seed: Seed for random.
"""
<|code_end|>
. Use current file imports:
(import csv
import json
import pickle
import warnings
from typing import Any, Callable, ClassVar, Iterator, List, Optional, Sequence
from mimesis.exceptions import FieldError, SchemaError
from mimesis.locales import Locale
from mimesis.providers.generic import Generic
from mimesis.types import JSON, SchemaType, Seed)
and context including class names, function names, or small code snippets from other files:
# Path: mimesis/providers/generic.py
# class Generic(BaseProvider):
# """Class which contain all providers at one."""
#
# _DEFAULT_PROVIDERS = (
# Address,
# BinaryFile,
# Finance,
# Choice,
# Code,
# Choice,
# Datetime,
# Development,
# File,
# Food,
# Hardware,
# Internet,
# Numeric,
# Path,
# Payment,
# Person,
# Science,
# Text,
# Transport,
# Cryptographic,
# )
#
# def __init__(self, locale: Locale = Locale.DEFAULT, seed: Seed = None) -> None:
# """Initialize attributes lazily."""
# super().__init__(seed=seed)
# self.locale = locale
#
# for provider in self._DEFAULT_PROVIDERS:
# name = getattr(provider.Meta, "name") # type: ignore
#
# # Check if a provider is locale-dependent.
# if issubclass(provider, BaseDataProvider):
# setattr(self, f"_{name}", provider)
# elif issubclass(provider, BaseProvider):
# setattr(self, name, provider(seed=self.seed))
#
# class Meta:
# """Class for metadata."""
#
# name: Final[str] = "generic"
#
# def __getattr__(self, attrname: str) -> Any:
# """Get attribute without underscore.
#
# :param attrname: Attribute name.
# :return: An attribute.
# """
# attribute = object.__getattribute__(self, "_" + attrname)
# if attribute and callable(attribute):
# self.__dict__[attrname] = attribute(
# self.locale,
# self.seed,
# )
# return self.__dict__[attrname]
#
# def __dir__(self) -> List[str]:
# """Available data providers.
#
# The list of result will be used in AbstractField to
# determine method's class.
#
# :return: List of attributes.
# """
# attributes = []
# exclude = BaseProvider().__dict__.keys()
#
# for a in self.__dict__:
# if a not in exclude:
# if a.startswith("_"):
# attribute = a.replace("_", "", 1)
# attributes.append(attribute)
# else:
# attributes.append(a)
# return attributes
#
# def reseed(self, seed: Seed = None) -> None:
# """Reseed the internal random generator.
#
# Overrides method `BaseProvider.reseed()`.
#
# :param seed: Seed for random.
# """
# # Ensure that we reseed the random generator on Generic itself.
# super().reseed(seed)
#
# for attr in self.__dir__():
# try:
# provider = getattr(self, attr)
# provider.reseed(seed)
# except AttributeError:
# continue
#
# def add_provider(self, cls: Type[BaseProvider], **kwargs: Any) -> None:
# """Add a custom provider to Generic() object.
#
# :param cls: Custom provider.
# :return: None
# :raises TypeError: if cls is not class or is not a subclass
# of BaseProvider.
# """
# if inspect.isclass(cls):
# if not issubclass(cls, BaseProvider):
# raise TypeError(
# "The provider must be a "
# "subclass of mimesis.providers.BaseProvider"
# )
# try:
# name = cls.Meta.name # type: ignore
# except AttributeError:
# name = cls.__name__.lower()
#
# if "seed" in kwargs:
# kwargs.pop("seed")
#
# setattr(self, name, cls(seed=self.seed, **kwargs))
# else:
# raise TypeError("The provider must be a class")
#
# def add_providers(self, *providers: Type[BaseProvider]) -> None:
# """Add a lot of custom providers to Generic() object.
#
# :param providers: Custom providers.
# :return: None
# """
# for provider in providers:
# self.add_provider(provider)
#
# def __str__(self) -> str:
# """Human-readable representation of locale."""
# return f"{self.__class__.__name__} <{self.locale}>"
#
# Path: mimesis/types.py
# JSON = Dict[str, Any]
. Output only the next line. | self._gen = Generic(locale, seed) |
Predict the next line for this snippet: <|code_start|> dict_writer = csv.DictWriter(fp, fieldnames, **kwargs)
dict_writer.writeheader()
dict_writer.writerows(data)
def to_json(self, file_path: str, iterations: int = 100, **kwargs: Any) -> None:
"""Export a schema as a JSON file.
:param file_path: File path.
:param iterations: The required number of rows.
:param kwargs: Extra keyword arguments for :py:func:`json.dump` class.
*New in version 5.3.0*
"""
data = self.create(iterations)
with open(file_path, "w") as fp:
json.dump(data, fp, **kwargs)
def to_pickle(self, file_path: str, iterations: int = 100, **kwargs: Any) -> None:
"""Export a schema as the pickled representation of the object to the file.
:param file_path: File path.
:param iterations: The required number of rows.
:param kwargs: Extra keyword arguments for :py:func:`pickle.dump` class.
*New in version 5.3.0*
"""
data = self.create(iterations)
with open(file_path, "wb") as fp:
pickle.dump(data, fp, **kwargs)
<|code_end|>
with the help of current file imports:
import csv
import json
import pickle
import warnings
from typing import Any, Callable, ClassVar, Iterator, List, Optional, Sequence
from mimesis.exceptions import FieldError, SchemaError
from mimesis.locales import Locale
from mimesis.providers.generic import Generic
from mimesis.types import JSON, SchemaType, Seed
and context from other files:
# Path: mimesis/providers/generic.py
# class Generic(BaseProvider):
# """Class which contain all providers at one."""
#
# _DEFAULT_PROVIDERS = (
# Address,
# BinaryFile,
# Finance,
# Choice,
# Code,
# Choice,
# Datetime,
# Development,
# File,
# Food,
# Hardware,
# Internet,
# Numeric,
# Path,
# Payment,
# Person,
# Science,
# Text,
# Transport,
# Cryptographic,
# )
#
# def __init__(self, locale: Locale = Locale.DEFAULT, seed: Seed = None) -> None:
# """Initialize attributes lazily."""
# super().__init__(seed=seed)
# self.locale = locale
#
# for provider in self._DEFAULT_PROVIDERS:
# name = getattr(provider.Meta, "name") # type: ignore
#
# # Check if a provider is locale-dependent.
# if issubclass(provider, BaseDataProvider):
# setattr(self, f"_{name}", provider)
# elif issubclass(provider, BaseProvider):
# setattr(self, name, provider(seed=self.seed))
#
# class Meta:
# """Class for metadata."""
#
# name: Final[str] = "generic"
#
# def __getattr__(self, attrname: str) -> Any:
# """Get attribute without underscore.
#
# :param attrname: Attribute name.
# :return: An attribute.
# """
# attribute = object.__getattribute__(self, "_" + attrname)
# if attribute and callable(attribute):
# self.__dict__[attrname] = attribute(
# self.locale,
# self.seed,
# )
# return self.__dict__[attrname]
#
# def __dir__(self) -> List[str]:
# """Available data providers.
#
# The list of result will be used in AbstractField to
# determine method's class.
#
# :return: List of attributes.
# """
# attributes = []
# exclude = BaseProvider().__dict__.keys()
#
# for a in self.__dict__:
# if a not in exclude:
# if a.startswith("_"):
# attribute = a.replace("_", "", 1)
# attributes.append(attribute)
# else:
# attributes.append(a)
# return attributes
#
# def reseed(self, seed: Seed = None) -> None:
# """Reseed the internal random generator.
#
# Overrides method `BaseProvider.reseed()`.
#
# :param seed: Seed for random.
# """
# # Ensure that we reseed the random generator on Generic itself.
# super().reseed(seed)
#
# for attr in self.__dir__():
# try:
# provider = getattr(self, attr)
# provider.reseed(seed)
# except AttributeError:
# continue
#
# def add_provider(self, cls: Type[BaseProvider], **kwargs: Any) -> None:
# """Add a custom provider to Generic() object.
#
# :param cls: Custom provider.
# :return: None
# :raises TypeError: if cls is not class or is not a subclass
# of BaseProvider.
# """
# if inspect.isclass(cls):
# if not issubclass(cls, BaseProvider):
# raise TypeError(
# "The provider must be a "
# "subclass of mimesis.providers.BaseProvider"
# )
# try:
# name = cls.Meta.name # type: ignore
# except AttributeError:
# name = cls.__name__.lower()
#
# if "seed" in kwargs:
# kwargs.pop("seed")
#
# setattr(self, name, cls(seed=self.seed, **kwargs))
# else:
# raise TypeError("The provider must be a class")
#
# def add_providers(self, *providers: Type[BaseProvider]) -> None:
# """Add a lot of custom providers to Generic() object.
#
# :param providers: Custom providers.
# :return: None
# """
# for provider in providers:
# self.add_provider(provider)
#
# def __str__(self) -> str:
# """Human-readable representation of locale."""
# return f"{self.__class__.__name__} <{self.locale}>"
#
# Path: mimesis/types.py
# JSON = Dict[str, Any]
, which may contain function names, class names, or code. Output only the next line. | def create(self, iterations: int = 1) -> List[JSON]: |
Given the following code snippet before the placeholder: <|code_start|> return result
except KeyError:
raise FieldError(name)
def __str__(self) -> str:
return f"{self.__class__.__name__} <{self._gen.locale}>"
class Field(BaseField):
"""Greedy field.
The field whcih evaluates immediately.
Example:
>>> _ = Field()
>>> _('username')
Dogtag_1836
"""
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self.perform(*args, **kwargs)
class Schema:
"""Class which return list of filled schemas."""
_MIN_ITERATIONS_VALUE: ClassVar[int] = 1
__slots__ = ("_schema",)
<|code_end|>
, predict the next line using imports from the current file:
import csv
import json
import pickle
import warnings
from typing import Any, Callable, ClassVar, Iterator, List, Optional, Sequence
from mimesis.exceptions import FieldError, SchemaError
from mimesis.locales import Locale
from mimesis.providers.generic import Generic
from mimesis.types import JSON, SchemaType, Seed
and context including class names, function names, and sometimes code from other files:
# Path: mimesis/providers/generic.py
# class Generic(BaseProvider):
# """Class which contain all providers at one."""
#
# _DEFAULT_PROVIDERS = (
# Address,
# BinaryFile,
# Finance,
# Choice,
# Code,
# Choice,
# Datetime,
# Development,
# File,
# Food,
# Hardware,
# Internet,
# Numeric,
# Path,
# Payment,
# Person,
# Science,
# Text,
# Transport,
# Cryptographic,
# )
#
# def __init__(self, locale: Locale = Locale.DEFAULT, seed: Seed = None) -> None:
# """Initialize attributes lazily."""
# super().__init__(seed=seed)
# self.locale = locale
#
# for provider in self._DEFAULT_PROVIDERS:
# name = getattr(provider.Meta, "name") # type: ignore
#
# # Check if a provider is locale-dependent.
# if issubclass(provider, BaseDataProvider):
# setattr(self, f"_{name}", provider)
# elif issubclass(provider, BaseProvider):
# setattr(self, name, provider(seed=self.seed))
#
# class Meta:
# """Class for metadata."""
#
# name: Final[str] = "generic"
#
# def __getattr__(self, attrname: str) -> Any:
# """Get attribute without underscore.
#
# :param attrname: Attribute name.
# :return: An attribute.
# """
# attribute = object.__getattribute__(self, "_" + attrname)
# if attribute and callable(attribute):
# self.__dict__[attrname] = attribute(
# self.locale,
# self.seed,
# )
# return self.__dict__[attrname]
#
# def __dir__(self) -> List[str]:
# """Available data providers.
#
# The list of result will be used in AbstractField to
# determine method's class.
#
# :return: List of attributes.
# """
# attributes = []
# exclude = BaseProvider().__dict__.keys()
#
# for a in self.__dict__:
# if a not in exclude:
# if a.startswith("_"):
# attribute = a.replace("_", "", 1)
# attributes.append(attribute)
# else:
# attributes.append(a)
# return attributes
#
# def reseed(self, seed: Seed = None) -> None:
# """Reseed the internal random generator.
#
# Overrides method `BaseProvider.reseed()`.
#
# :param seed: Seed for random.
# """
# # Ensure that we reseed the random generator on Generic itself.
# super().reseed(seed)
#
# for attr in self.__dir__():
# try:
# provider = getattr(self, attr)
# provider.reseed(seed)
# except AttributeError:
# continue
#
# def add_provider(self, cls: Type[BaseProvider], **kwargs: Any) -> None:
# """Add a custom provider to Generic() object.
#
# :param cls: Custom provider.
# :return: None
# :raises TypeError: if cls is not class or is not a subclass
# of BaseProvider.
# """
# if inspect.isclass(cls):
# if not issubclass(cls, BaseProvider):
# raise TypeError(
# "The provider must be a "
# "subclass of mimesis.providers.BaseProvider"
# )
# try:
# name = cls.Meta.name # type: ignore
# except AttributeError:
# name = cls.__name__.lower()
#
# if "seed" in kwargs:
# kwargs.pop("seed")
#
# setattr(self, name, cls(seed=self.seed, **kwargs))
# else:
# raise TypeError("The provider must be a class")
#
# def add_providers(self, *providers: Type[BaseProvider]) -> None:
# """Add a lot of custom providers to Generic() object.
#
# :param providers: Custom providers.
# :return: None
# """
# for provider in providers:
# self.add_provider(provider)
#
# def __str__(self) -> str:
# """Human-readable representation of locale."""
# return f"{self.__class__.__name__} <{self.locale}>"
#
# Path: mimesis/types.py
# JSON = Dict[str, Any]
. Output only the next line. | def __init__(self, schema: SchemaType) -> None: |
Given the following code snippet before the placeholder: <|code_start|>"""Implements classes for generating data by schema."""
__all__ = ["BaseField", "Field", "Schema"]
class BaseField:
"""
BaseField is a class for generating data by the name of the method.
Instance of this object takes any string which represents the name
of any method of any supported data provider (:class:`~mimesis.Generic`)
and the ``**kwargs`` of the method.
See :class:`~mimesis.schema.BaseField.perform` for more details.
"""
class Meta:
base = True
def __init__(
self,
locale: Locale = Locale.DEFAULT,
<|code_end|>
, predict the next line using imports from the current file:
import csv
import json
import pickle
import warnings
from typing import Any, Callable, ClassVar, Iterator, List, Optional, Sequence
from mimesis.exceptions import FieldError, SchemaError
from mimesis.locales import Locale
from mimesis.providers.generic import Generic
from mimesis.types import JSON, SchemaType, Seed
and context including class names, function names, and sometimes code from other files:
# Path: mimesis/providers/generic.py
# class Generic(BaseProvider):
# """Class which contain all providers at one."""
#
# _DEFAULT_PROVIDERS = (
# Address,
# BinaryFile,
# Finance,
# Choice,
# Code,
# Choice,
# Datetime,
# Development,
# File,
# Food,
# Hardware,
# Internet,
# Numeric,
# Path,
# Payment,
# Person,
# Science,
# Text,
# Transport,
# Cryptographic,
# )
#
# def __init__(self, locale: Locale = Locale.DEFAULT, seed: Seed = None) -> None:
# """Initialize attributes lazily."""
# super().__init__(seed=seed)
# self.locale = locale
#
# for provider in self._DEFAULT_PROVIDERS:
# name = getattr(provider.Meta, "name") # type: ignore
#
# # Check if a provider is locale-dependent.
# if issubclass(provider, BaseDataProvider):
# setattr(self, f"_{name}", provider)
# elif issubclass(provider, BaseProvider):
# setattr(self, name, provider(seed=self.seed))
#
# class Meta:
# """Class for metadata."""
#
# name: Final[str] = "generic"
#
# def __getattr__(self, attrname: str) -> Any:
# """Get attribute without underscore.
#
# :param attrname: Attribute name.
# :return: An attribute.
# """
# attribute = object.__getattribute__(self, "_" + attrname)
# if attribute and callable(attribute):
# self.__dict__[attrname] = attribute(
# self.locale,
# self.seed,
# )
# return self.__dict__[attrname]
#
# def __dir__(self) -> List[str]:
# """Available data providers.
#
# The list of result will be used in AbstractField to
# determine method's class.
#
# :return: List of attributes.
# """
# attributes = []
# exclude = BaseProvider().__dict__.keys()
#
# for a in self.__dict__:
# if a not in exclude:
# if a.startswith("_"):
# attribute = a.replace("_", "", 1)
# attributes.append(attribute)
# else:
# attributes.append(a)
# return attributes
#
# def reseed(self, seed: Seed = None) -> None:
# """Reseed the internal random generator.
#
# Overrides method `BaseProvider.reseed()`.
#
# :param seed: Seed for random.
# """
# # Ensure that we reseed the random generator on Generic itself.
# super().reseed(seed)
#
# for attr in self.__dir__():
# try:
# provider = getattr(self, attr)
# provider.reseed(seed)
# except AttributeError:
# continue
#
# def add_provider(self, cls: Type[BaseProvider], **kwargs: Any) -> None:
# """Add a custom provider to Generic() object.
#
# :param cls: Custom provider.
# :return: None
# :raises TypeError: if cls is not class or is not a subclass
# of BaseProvider.
# """
# if inspect.isclass(cls):
# if not issubclass(cls, BaseProvider):
# raise TypeError(
# "The provider must be a "
# "subclass of mimesis.providers.BaseProvider"
# )
# try:
# name = cls.Meta.name # type: ignore
# except AttributeError:
# name = cls.__name__.lower()
#
# if "seed" in kwargs:
# kwargs.pop("seed")
#
# setattr(self, name, cls(seed=self.seed, **kwargs))
# else:
# raise TypeError("The provider must be a class")
#
# def add_providers(self, *providers: Type[BaseProvider]) -> None:
# """Add a lot of custom providers to Generic() object.
#
# :param providers: Custom providers.
# :return: None
# """
# for provider in providers:
# self.add_provider(provider)
#
# def __str__(self) -> str:
# """Human-readable representation of locale."""
# return f"{self.__class__.__name__} <{self.locale}>"
#
# Path: mimesis/types.py
# JSON = Dict[str, Any]
. Output only the next line. | seed: Seed = None, |
Given snippet: <|code_start|>
def validate_pesel(pesel):
"""Validate PESEL.
:param pesel: pesel to validate
:return: True if pesel is valid, False otherwise
"""
pesel_digits = list(map(int, pesel))
args = (9, 7, 3, 1, 9, 7, 3, 1, 9, 7)
sum_v = sum(map(lambda x: x[0] * x[1], zip(args, pesel_digits)))
return pesel_digits[-1] == (sum_v % 10)
def validate_regon(regon):
"""Validate REGON.
:param regon: regon to validate
:return: True if pesel is valid, False otherwise
"""
regon_digits = list(map(int, regon))
args = (8, 9, 2, 3, 4, 5, 6, 7)
sum_v = sum(map(lambda x: x[0] * x[1], zip(args, regon_digits)))
checksum_digit = sum_v % 11
if checksum_digit > 9:
return regon_digits[-1] == 0
return regon_digits[-1] == checksum_digit
@pytest.fixture
def pl():
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from mimesis.builtins.pl import PolandSpecProvider
from mimesis.enums import Gender
from mimesis.providers import Datetime
and context:
# Path: mimesis/builtins/pl.py
# class PolandSpecProvider(BaseSpecProvider):
# """Class that provides special data for Poland (pl)."""
#
# def __init__(self, seed: Seed = None) -> None:
# """Initialize attributes."""
# super().__init__(locale=Locale.PL, seed=seed)
#
# class Meta:
# """The name of the provider."""
#
# name: Final[str] = "poland_provider"
#
# def nip(self) -> str:
# """Generate random valid 10-digit NIP.
#
# :return: Valid 10-digit NIP
# """
# nip_digits = [int(d) for d in str(self.random.randint(101, 998))]
# nip_digits += [self.random.randint(0, 9) for _ in range(6)]
# nip_coefficients = (6, 5, 7, 2, 3, 4, 5, 6, 7)
# sum_v = sum(nc * nd for nc, nd in zip(nip_coefficients, nip_digits))
#
# checksum_digit = sum_v % 11
# if checksum_digit > 9:
# return self.nip()
# nip_digits.append(checksum_digit)
# return "".join(map(str, nip_digits))
#
# def pesel(
# self, birth_date: Optional[DateTime] = None, gender: Optional[Gender] = None
# ) -> str:
# """Generate random 11-digit PESEL.
#
# :param birth_date: Initial birth date (optional)
# :param gender: Gender of person
# :return: Valid 11-digit PESEL
# """
# date_object = birth_date
# if not date_object:
# date_object = Datetime().datetime(1940, 2018)
#
# date = date_object.date()
# year = date.year % 100
# month = date.month
# day = date.day
#
# if 1800 <= year <= 1899:
# month += 80
# elif 2000 <= year <= 2099:
# month += 20
# elif 2100 <= year <= 2199:
# month += 40
# elif 2200 <= year <= 2299:
# month += 60
#
# series_number = self.random.randint(0, 999)
#
# pesel_digits = list(
# map(int, f"{year:02d}{month:02d}{day:02d}{series_number:03d}")
# )
#
# if gender == Gender.MALE:
# gender_digit = self.random.choice((1, 3, 5, 7, 9))
# elif gender == Gender.FEMALE:
# gender_digit = self.random.choice((0, 2, 4, 6, 8))
# else:
# gender_digit = self.random.choice(range(10))
#
# pesel_digits.append(gender_digit)
# pesel_coeffs = (9, 7, 3, 1, 9, 7, 3, 1, 9, 7)
# sum_v = sum(nc * nd for nc, nd in zip(pesel_coeffs, pesel_digits))
# checksum_digit = sum_v % 10
# pesel_digits.append(checksum_digit)
# return "".join(map(str, pesel_digits))
#
# def regon(self) -> str:
# """Generate random valid 9-digit REGON.
#
# :return: Valid 9-digit REGON
# """
# regon_coeffs = (8, 9, 2, 3, 4, 5, 6, 7)
# regon_digits = [self.random.randint(0, 9) for _ in range(8)]
# sum_v = sum(nc * nd for nc, nd in zip(regon_coeffs, regon_digits))
# checksum_digit = sum_v % 11
# if checksum_digit > 9:
# checksum_digit = 0
# regon_digits.append(checksum_digit)
# return "".join(map(str, regon_digits))
which might include code, classes, or functions. Output only the next line. | return PolandSpecProvider() |
Here is a snippet: <|code_start|> :param enum: Enum object.
:return: Value of item.
:raises NonEnumerableError: if ``item`` not in ``enum``.
"""
if item is None:
result = get_random_item(enum, self.random)
elif item and isinstance(item, enum):
result = item
else:
raise NonEnumerableError(enum)
return result.value
def __str__(self) -> str:
"""Human-readable representation of locale."""
return self.__class__.__name__
class BaseDataProvider(BaseProvider):
"""This is a base class for all data providers."""
_LOCALE_SEPARATOR = "-"
def __init__(self, locale: Locale = Locale.DEFAULT, seed: Seed = None) -> None:
"""Initialize attributes for data providers.
:param locale: Current locale.
:param seed: Seed to all the random functions.
"""
super().__init__(seed=seed)
<|code_end|>
. Write the next line using the current file imports:
import contextlib
import functools
import json
import operator
from functools import reduce
from pathlib import Path
from typing import Any, Generator, List, Optional
from mimesis.exceptions import NonEnumerableError
from mimesis.locales import Locale, validate_locale
from mimesis.random import Random, get_random_item
from mimesis.types import JSON, Seed
and context from other files:
# Path: mimesis/types.py
# JSON = Dict[str, Any]
, which may include functions, classes, or code. Output only the next line. | self._data: JSON = {} |
Given the following code snippet before the placeholder: <|code_start|>
class HelloView(FormView):
form_class = HelloWorldForm
template_name = 'djam/form.html'
def form_valid(self, form):
url = self.riff.reverse('hello_finished',
slug=form.cleaned_data['name'])
return HttpResponseRedirect(url)
<|code_end|>
, predict the next line using imports from the current file:
from django.http import HttpResponseRedirect
from djam.views.generic import FormView, TemplateView
from test_project.example_app.forms import HelloWorldForm
and context including class names, function names, and sometimes code from other files:
# Path: djam/views/generic.py
# class FormView(FloppyformsMixin, RiffViewMixin, generic.FormView):
# pass
#
# class TemplateView(RiffViewMixin, generic.TemplateView):
# pass
. Output only the next line. | class HelloFinishedView(TemplateView): |
Given snippet: <|code_start|>
class HelloWorldForm(forms.Form):
name = forms.SlugField()
class ExampleModelForm(GFKForm):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from djam.forms import GFKField, GFKForm
from test_project.example_app.models import ExampleModel
import floppyforms as forms
and context:
# Path: djam/forms.py
# class GFKField(forms.MultiValueField):
# widget = GFKWidget
#
# def __init__(self, model, content_object_field, *args, **kwargs):
# for field in model._meta.virtual_fields:
# if field.name == content_object_field:
# break
# else:
# raise FieldDoesNotExist("No virtual field called {0} exists."
# "".format(content_object_field))
#
# if not isinstance(field, GenericForeignKey):
# raise FieldDoesNotExist("No generic foreign key called {0}"
# "exists.".format(content_object_field))
#
# fields = (
# model._meta.get_field(field.ct_field).formfield(),
# model._meta.get_field(field.fk_field).formfield()
# )
# super(GFKField, self).__init__(fields, *args, **kwargs)
# widgets = self.widget.widgets = tuple(f.widget for f in fields)
# widgets[0].attrs['data-required'] = int(self.required)
#
# def compress(self, data_list):
# ct, pk = data_list
# model = ct.model_class()
# try:
# return model.objects.get(pk=pk)
# except model.DoesNotExist:
# raise ValidationError("Choose a valid object id.")
#
# class GFKForm(forms.ModelForm):
# def __init__(self, *args, **kwargs):
# super(GFKForm, self).__init__(*args, **kwargs)
# object_data = {}
# for name, field in self.fields.iteritems():
# if isinstance(field, GFKField):
# object_data[name] = getattr(self.instance, name, None)
# object_data.update(self.initial)
# self.initial = object_data
#
# def _post_clean(self):
# for name, field in self.fields.iteritems():
# if isinstance(field, GFKField) and name in self.cleaned_data:
# setattr(self.instance, name, self.cleaned_data[name])
# super(GFKForm, self)._post_clean()
which might include code, classes, or functions. Output only the next line. | content_object = GFKField(ExampleModel, 'content_object') |
Continue the code snippet: <|code_start|>
# Heavier security check -- don't allow redirection to a different
# host.
elif netloc and netloc != self.request.get_host():
redirect_to = self.riff.base_riff.get_default_url()
# Okay, security checks complete. Log the user in.
login(self.request, form.get_user())
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
context.update({
'redirect_to': self.redirect_to,
'redirect_field_name': self.redirect_field_name,
'site': Site.objects.get_current(),
})
return context
def get_crumbs(self):
return [
(None, self.riff.base_riff.display_name),
(None, _('Login')),
]
<|code_end|>
. Use current file imports:
import urlparse
from django.contrib.auth import login, logout
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from djam.views.generic import FormView, TemplateView
and context (classes, functions, or code) from other files:
# Path: djam/views/generic.py
# class FormView(FloppyformsMixin, RiffViewMixin, generic.FormView):
# pass
#
# class TemplateView(RiffViewMixin, generic.TemplateView):
# pass
. Output only the next line. | class LogoutView(TemplateView): |
Given snippet: <|code_start|>from __future__ import unicode_literals
class Riff(object):
widgets = []
riff_classes = []
display_name = None
slug = None
namespace = None
app_name = None
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.conf.urls import patterns, include, url
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseForbidden
from django.template.defaultfilters import slugify
from django.utils.datastructures import SortedDict
from djam.views.base import DefaultRedirectView
and context:
# Path: djam/views/base.py
# class DefaultRedirectView(RedirectView):
# permanent = False
#
# def get_redirect_url(self, **kwargs):
# return self.riff.get_default_url()
which might include code, classes, or functions. Output only the next line. | default_redirect_view = DefaultRedirectView |
Based on the snippet: <|code_start|> if isinstance(col, basestring):
try:
value = getattr(obj, col)
except AttributeError:
value = ''
if callable(value):
try:
value = value()
except TypeError:
value = ''
elif callable(col):
try:
value = col(obj)
except TypeError:
value = ''
else:
value = ''
return value
@register.filter
def order(column, form):
try:
return form.order_fields[column]
except (KeyError, AttributeError):
return ''
@register.filter
def has_add_permission(riff, request):
<|code_end|>
, predict the immediate next line with the help of imports:
from urllib import urlencode
from urlparse import urlsplit, urlunsplit, parse_qs
from django import template
from django.conf import settings
from django.forms.forms import pretty_name
from django.forms.widgets import CheckboxInput
from django.template.base import kwarg_re, TemplateSyntaxError
from django.utils.encoding import force_unicode, smart_str
from floppyforms.templatetags.floppyforms import FormRowNode
from djam.riffs.base import Riff
and context (classes, functions, sometimes code) from other files:
# Path: djam/riffs/base.py
# class Riff(object):
# widgets = []
# riff_classes = []
# display_name = None
# slug = None
# namespace = None
# app_name = None
# default_redirect_view = DefaultRedirectView
# widget_template = 'djam/_widget.html'
#
# def __init__(self, parent=None, namespace=None, app_name=None):
# self.parent = parent
# if self.display_name is None:
# raise ImproperlyConfigured('Please give me a display name')
# if self.slug is None:
# self.slug = slugify(self.display_name)
# self.namespace = namespace or self.namespace or self.slug
# if parent is None:
# self.base_riff = self
# self.path = (self,)
# else:
# self.base_riff = parent.base_riff
# self.path = parent.path + (self,)
# self._riffs = SortedDict()
# for riff_class in self.riff_classes:
# self.register(riff_class)
#
# def __getitem__(self, key):
# return self._riffs[key]
#
# def sort_riffs(self, key=None, reverse=False):
# if key is None:
# key = lambda r: r.display_name
# riffs = sorted(self._riffs.itervalues(), key=key, reverse=reverse)
# self._riffs.keyOrder = [r.namespace for r in riffs]
#
# @property
# def riffs(self):
# return self._riffs.values()
#
# def get_default_url(self):
# """
# Returns the default base url for this riff. Must be implemented by
# subclasses.
#
# """
# raise NotImplementedError('Subclasses must implement get_default_url.')
#
# def get_urls(self):
# urlpatterns = self.get_extra_urls()
#
# for riff in self.riffs:
# pattern = r'^{0}/'.format(riff.slug) if riff.slug else r'^'
# urlpatterns += patterns('',
# url(pattern, include(riff.urls)),
# )
#
# if self.default_redirect_view is not None:
# urlpatterns += patterns('',
# url(r'^$', self.default_redirect_view.as_view(riff=self)),
# )
#
# return urlpatterns
#
# def get_extra_urls(self):
# return patterns('',)
#
# @property
# def urls(self):
# return self.get_urls(), self.app_name, self.namespace
#
# def get_view_kwargs(self):
# return {'riff': self}
#
# def has_permission(self, request):
# if self.parent:
# return self.parent.has_permission(request)
# return True
#
# def is_hidden(self, request):
# return not self.has_permission(request)
#
# def get_unauthorized_response(self, request):
# if self.base_riff is not self:
# return self.base_riff.get_unauthorized_response(request)
# return HttpResponseForbidden()
#
# def wrap_view(self, view):
# return view
#
# def reverse(self, name, args=None, kwargs=None):
# return reverse('{namespace}:{viewname}'.format(namespace=self.full_namespace, viewname=name),
# args=args, kwargs=kwargs)
#
# @property
# def full_namespace(self):
# return ":".join([r.namespace for r in self.path])
#
# def register(self, riff_class):
# riff = riff_class(parent=self)
# if riff.namespace in self._riffs:
# raise ValueError("Riff with namespace {0} already "
# "registered.".format(riff.namespace))
# self._riffs[riff.namespace] = riff
. Output only the next line. | if not hasattr(request, 'user') or not isinstance(riff, Riff): |
Here is a snippet: <|code_start|> mod, cls_name = widget.__module__, widget.__class__.__name__
if mod == 'django.forms.widgets' and 'widget' not in kwargs:
kwargs['widget'] = getattr(floppyforms, cls_name)
rebuild = True
return rebuild, kwargs
def _post_formfield(self, field, db_field):
field.widget.attrs['data-required'] = int(field.required)
if issubclass(db_field.__class__, models.ManyToManyField):
msg = _('Hold down "Control", or "Command" on a Mac, to select '
'more than one.')
msg = unicode(string_concat(' ', msg))
if field.help_text.endswith(msg):
field.help_text = field.help_text[:-len(msg)]
if (isinstance(field, forms.ChoiceField) and
hasattr(field, 'queryset')):
model = field.queryset.model
if isinstance(field, forms.MultipleChoiceField):
msg = string_concat(_("Choose some "),
model._meta.verbose_name_plural,
"...")
else:
msg = string_concat(_("Choose a "),
model._meta.verbose_name,
"...")
field.widget.attrs['data-placeholder'] = msg
for riff in self.riff.base_riff.riffs:
if getattr(riff, 'model', None) == model:
if riff.has_add_permission(self.request):
<|code_end|>
. Write the next line using the current file imports:
from django.contrib.admin.util import flatten_fieldsets
from django.db import models
from django import forms
from django.forms.models import modelform_factory
from django.utils.cache import add_never_cache_headers
from django.utils.translation import ugettext as _, string_concat
from django.views import generic
from djam.widgets import AddWrapper
import floppyforms
and context from other files:
# Path: djam/widgets.py
# class AddWrapper(Widget):
# template_name = 'floppyforms/addwrapper.html'
#
# def __init__(self, widget, riff):
# self.widget = widget
# self.riff = riff
# super(AddWrapper, self).__init__()
#
# def get_context(self, name, value, attrs=None, **kwargs):
# attrs = attrs or {}
# context = {
# 'rendered': self.widget.render(name, value, attrs, **kwargs),
# 'riff': self.riff,
# }
# context['attrs'] = self.build_attrs(attrs)
# return context
#
# def value_from_datadict(self, data, files, name):
# return self.widget.value_from_datadict(data, files, name)
#
# def _has_changed(self, initial, data):
# return self.widget._has_changed(initial, data)
#
# def render(self, name, value, attrs=None, **kwargs):
# context = self.get_context(name, value, attrs, **kwargs)
# return loader.render_to_string(
# self.template_name,
# dictionary=context,
# context_instance=None)
, which may include functions, classes, or code. Output only the next line. | field.widget = AddWrapper(field.widget, riff) |
Using the snippet: <|code_start|>
These are not related to the EOFs computed from the
correlation matrix.
**Optional argument:**
*neofs*
Number of EOFs to return. Defaults to all EOFs. If the
number of EOFs requested is more than the number that are
available, then all available EOFs will be returned.
**Returns:**
*eofs*
An array with the ordered EOFs along the first dimension.
**Examples:**
All EOFs::
eofs = solver.eofsAsCorrelation()
The leading EOF::
eof1 = solver.eofsAsCorrelation(neofs=1)
"""
# Retrieve the specified number of PCs.
pcs = self.pcs(npcs=neofs, pcscaling=1)
# Compute the correlation of the PCs with the input field.
<|code_end|>
, determine the next line of code. You have imports:
import collections
import warnings
import numpy as np
import numpy.ma as ma
import dask.array
from .tools.standard import correlation_map, covariance_map
and context (class names, function names, or code) available:
# Path: lib/eofs/tools/standard.py
# def correlation_map(pcs, field):
# """Correlation maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `~eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one correlation map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Returns:**
#
# *correlation_maps*
# An array with the correlation maps along the first dimension.
#
# **Example:**
#
# Compute correlation maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# correlation_maps = correlation_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Compute the standard deviation of the PCs and the fields along the time
# # dimension (the leading dimension).
# pcs_std = pcs_cent.std(axis=0)
# field_std = field_cent.std(axis=0)
# # Set the divisor.
# div = np.float64(pcs_cent.shape[0])
# # Compute the correlation map.
# cor = ma.dot(field_cent.T, pcs_cent).T / div
# cor = ma.masked_invalid(cor)
# cor /= ma.outer(pcs_std, field_std)
# # Return the correlation with the appropriate shape.
# return cor.reshape(out_shape)
#
# def covariance_map(pcs, field, ddof=1):
# """Covariance maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one covariance map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Optional arguments:**
#
# *ddof*
# 'Delta degrees of freedom'. The divisor used to normalize
# the covariance matrix is *N - ddof* where *N* is the
# number of samples. Defaults to *1*.
#
# **Returns:**
#
# *covariance_maps*
# An array with the covariance maps along the first dimension.
#
# **Example:**
#
# Compute covariance maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# covariance_maps = covariance_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Set the divisor according to the specified delta-degrees of freedom.
# div = np.float64(pcs_cent.shape[0] - ddof)
# # Compute the covariance map, making sure it has the appropriate shape.
# cov = (ma.dot(field_cent.T, pcs_cent).T / div).reshape(out_shape)
# cov = ma.masked_invalid(cov)
# return cov
. Output only the next line. | c = correlation_map( |
Based on the snippet: <|code_start|> (option 1).
**Returns:**
*eofs*
An array with the ordered EOFs along the first dimension.
**Examples:**
All EOFs::
eofs = solver.eofsAsCovariance()
The leading EOF::
eof1 = solver.eofsAsCovariance(neofs=1)
The leading EOF using un-scaled PCs::
eof1 = solver.eofsAsCovariance(neofs=1, pcscaling=0)
"""
pcs = self.pcs(npcs=neofs, pcscaling=pcscaling)
# Divide the input data by the weighting (if any) before computing
# the covariance maps.
data = self._data.reshape((self._records,) + self._originalshape)
if self._weights is not None:
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
data /= self._weights
<|code_end|>
, predict the immediate next line with the help of imports:
import collections
import warnings
import numpy as np
import numpy.ma as ma
import dask.array
from .tools.standard import correlation_map, covariance_map
and context (classes, functions, sometimes code) from other files:
# Path: lib/eofs/tools/standard.py
# def correlation_map(pcs, field):
# """Correlation maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `~eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one correlation map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Returns:**
#
# *correlation_maps*
# An array with the correlation maps along the first dimension.
#
# **Example:**
#
# Compute correlation maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# correlation_maps = correlation_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Compute the standard deviation of the PCs and the fields along the time
# # dimension (the leading dimension).
# pcs_std = pcs_cent.std(axis=0)
# field_std = field_cent.std(axis=0)
# # Set the divisor.
# div = np.float64(pcs_cent.shape[0])
# # Compute the correlation map.
# cor = ma.dot(field_cent.T, pcs_cent).T / div
# cor = ma.masked_invalid(cor)
# cor /= ma.outer(pcs_std, field_std)
# # Return the correlation with the appropriate shape.
# return cor.reshape(out_shape)
#
# def covariance_map(pcs, field, ddof=1):
# """Covariance maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one covariance map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Optional arguments:**
#
# *ddof*
# 'Delta degrees of freedom'. The divisor used to normalize
# the covariance matrix is *N - ddof* where *N* is the
# number of samples. Defaults to *1*.
#
# **Returns:**
#
# *covariance_maps*
# An array with the covariance maps along the first dimension.
#
# **Example:**
#
# Compute covariance maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# covariance_maps = covariance_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Set the divisor according to the specified delta-degrees of freedom.
# div = np.float64(pcs_cent.shape[0] - ddof)
# # Compute the covariance map, making sure it has the appropriate shape.
# cov = (ma.dot(field_cent.T, pcs_cent).T / div).reshape(out_shape)
# cov = ma.masked_invalid(cov)
# return cov
. Output only the next line. | c = covariance_map(pcs, data, ddof=self._ddof) |
Continue the code snippet: <|code_start|> pytest.skip('missing dependencies required to test '
'the {!s} interface'.format(cls.interface))
cls.modify_solution()
cls.neofs = cls.solution['eigenvalues'].shape[0]
if cls.alternate_weights_arg is not None:
weights = cls.alternate_weights_arg
else:
weights = cls.solution['weights']
try:
cls.solver = solvers[cls.interface](cls.solution['sst'],
weights=weights)
except KeyError:
pytest.skip('missing dependencies required to test '
'the {!s} interface'.format(cls.interface))
@classmethod
def modify_solution(cls):
pass
def test_eigenvalues(self):
self.assert_array_almost_equal(
self.solver.eigenvalues(neigs=self.neofs),
self.solution['eigenvalues'])
@pytest.mark.parametrize('eofscaling', (0, 1, 2))
def test_eofs(self, eofscaling):
# EOFs should match the (possibly scaled) reference solution
eofs = self._tomasked(self.solver.eofs(neofs=self.neofs,
eofscaling=eofscaling))
reofs = self._tomasked(self.solution['eofs']).copy()
<|code_end|>
. Use current file imports:
import numpy as np
import numpy.ma as ma
import pytest
import eofs
import dask
import dask
import dask
import dask
import dask
from iris.cube import Cube
from eofs.tests import EofsTest
from .utils import sign_adjustments
from .reference import reference_solution
and context (classes, functions, or code) from other files:
# Path: lib/eofs/tests/utils.py
# def sign_adjustments(eofset, refeofset):
# """Sign adjustments for EOFs/PCs.
#
# Create a matrix of sign weights used for adjusting the sign of a set
# of EOFs or PCs to the sign of a reference set.
#
# The first dimension is assumed to be modes.
#
# **Arguments:**
#
# *eofset*
# Set of EOFs.
#
# *refeofset*
# Reference set of EOFs.
#
# """
# if eofset.shape != refeofset.shape:
# raise ValueError('input set has different shape from reference set')
# eofset, refeofset = __tomasked(eofset, refeofset)
# nmodes = eofset.shape[0]
# signs = np.ones([nmodes])
# shape = [nmodes] + [1] * (eofset.ndim - 1)
# eofset = eofset.reshape([nmodes, np.prod(eofset.shape[1:], dtype=np.int)])
# refeofset = refeofset.reshape([nmodes,
# np.prod(refeofset.shape[1:],
# dtype=np.int)])
# for mode in range(nmodes):
# i = 0
# try:
# while _close(eofset[mode, i], 0.) or \
# _close(refeofset[mode, i], 0.) \
# or np.ma.is_masked(eofset[mode, i]) or \
# np.ma.is_masked(refeofset[mode, i]):
# i += 1
# except IndexError:
# i = 0
# if np.sign(eofset[mode, i]) != np.sign(refeofset[mode, i]):
# signs[mode] = -1
# return signs.reshape(shape)
#
# Path: lib/eofs/tests/reference.py
# def reference_solution(container_type, weights):
# """Obtain a reference EOF analysis solution.
#
# **Arguments:**
#
# *container_type*
# Either 'standard', 'cdms', 'iris' or 'xarray'.
#
# *weights*
# Weights method. One of 'equal', 'latitude', or 'area'.
#
# """
# container_type = container_type.lower()
# weights = weights.lower()
# if container_type not in ('standard', 'iris', 'cdms', 'xarray'):
# raise ValueError("unknown container type "
# "'{!s}'".format(container_type))
# solution = _read_reference_solution(weights)
# time_units = 'months since 0-1-1 00:00:0.0'
# neofs = len(solution['eigenvalues'])
# _get_wrapper(container_type)(solution, neofs, time_units)
# return solution
. Output only the next line. | eofs *= sign_adjustments(eofs, reofs) |
Next line prediction: <|code_start|> pass
# Create a mapping from interface name to solver class.
solvers = {'standard': eofs.standard.Eof}
try:
solvers['cdms'] = eofs.cdms.Eof
except AttributeError:
pass
try:
solvers['iris'] = eofs.iris.Eof
except AttributeError:
pass
try:
solvers['xarray'] = eofs.xarray.Eof
except AttributeError:
pass
class SolutionTest(EofsTest):
"""Base class for all solution test classes."""
interface = None
weights = None
alternate_weights_arg = None
@classmethod
def setup_class(cls):
try:
<|code_end|>
. Use current file imports:
(import numpy as np
import numpy.ma as ma
import pytest
import eofs
import dask
import dask
import dask
import dask
import dask
from iris.cube import Cube
from eofs.tests import EofsTest
from .utils import sign_adjustments
from .reference import reference_solution)
and context including class names, function names, or small code snippets from other files:
# Path: lib/eofs/tests/utils.py
# def sign_adjustments(eofset, refeofset):
# """Sign adjustments for EOFs/PCs.
#
# Create a matrix of sign weights used for adjusting the sign of a set
# of EOFs or PCs to the sign of a reference set.
#
# The first dimension is assumed to be modes.
#
# **Arguments:**
#
# *eofset*
# Set of EOFs.
#
# *refeofset*
# Reference set of EOFs.
#
# """
# if eofset.shape != refeofset.shape:
# raise ValueError('input set has different shape from reference set')
# eofset, refeofset = __tomasked(eofset, refeofset)
# nmodes = eofset.shape[0]
# signs = np.ones([nmodes])
# shape = [nmodes] + [1] * (eofset.ndim - 1)
# eofset = eofset.reshape([nmodes, np.prod(eofset.shape[1:], dtype=np.int)])
# refeofset = refeofset.reshape([nmodes,
# np.prod(refeofset.shape[1:],
# dtype=np.int)])
# for mode in range(nmodes):
# i = 0
# try:
# while _close(eofset[mode, i], 0.) or \
# _close(refeofset[mode, i], 0.) \
# or np.ma.is_masked(eofset[mode, i]) or \
# np.ma.is_masked(refeofset[mode, i]):
# i += 1
# except IndexError:
# i = 0
# if np.sign(eofset[mode, i]) != np.sign(refeofset[mode, i]):
# signs[mode] = -1
# return signs.reshape(shape)
#
# Path: lib/eofs/tests/reference.py
# def reference_solution(container_type, weights):
# """Obtain a reference EOF analysis solution.
#
# **Arguments:**
#
# *container_type*
# Either 'standard', 'cdms', 'iris' or 'xarray'.
#
# *weights*
# Weights method. One of 'equal', 'latitude', or 'area'.
#
# """
# container_type = container_type.lower()
# weights = weights.lower()
# if container_type not in ('standard', 'iris', 'cdms', 'xarray'):
# raise ValueError("unknown container type "
# "'{!s}'".format(container_type))
# solution = _read_reference_solution(weights)
# time_units = 'months since 0-1-1 00:00:0.0'
# neofs = len(solution['eigenvalues'])
# _get_wrapper(container_type)(solution, neofs, time_units)
# return solution
. Output only the next line. | cls.solution = reference_solution(cls.interface, cls.weights) |
Predict the next line for this snippet: <|code_start|>
# Create a mapping from interface name to tools module and solver class.
tools = {'standard': eofs.tools.standard}
solvers = {'standard': eofs.standard.Eof}
try:
tools['cdms'] = eofs.tools.cdms
solvers['cdms'] = eofs.cdms.Eof
except AttributeError:
pass
try:
tools['iris'] = eofs.tools.iris
solvers['iris'] = eofs.iris.Eof
except AttributeError:
pass
try:
tools['xarray'] = eofs.tools.xarray
solvers['xarray'] = eofs.xarray.Eof
except AttributeError:
pass
class ToolsTest(EofsTest):
""""""
interface = None
weights = None
@classmethod
def setup_class(cls):
try:
<|code_end|>
with the help of current file imports:
import numpy as np
import numpy.ma as ma
import pytest
import eofs
from iris.cube import Cube
from eofs.tests import EofsTest
from .reference import reference_solution
from .utils import sign_adjustments
and context from other files:
# Path: lib/eofs/tests/reference.py
# def reference_solution(container_type, weights):
# """Obtain a reference EOF analysis solution.
#
# **Arguments:**
#
# *container_type*
# Either 'standard', 'cdms', 'iris' or 'xarray'.
#
# *weights*
# Weights method. One of 'equal', 'latitude', or 'area'.
#
# """
# container_type = container_type.lower()
# weights = weights.lower()
# if container_type not in ('standard', 'iris', 'cdms', 'xarray'):
# raise ValueError("unknown container type "
# "'{!s}'".format(container_type))
# solution = _read_reference_solution(weights)
# time_units = 'months since 0-1-1 00:00:0.0'
# neofs = len(solution['eigenvalues'])
# _get_wrapper(container_type)(solution, neofs, time_units)
# return solution
#
# Path: lib/eofs/tests/utils.py
# def sign_adjustments(eofset, refeofset):
# """Sign adjustments for EOFs/PCs.
#
# Create a matrix of sign weights used for adjusting the sign of a set
# of EOFs or PCs to the sign of a reference set.
#
# The first dimension is assumed to be modes.
#
# **Arguments:**
#
# *eofset*
# Set of EOFs.
#
# *refeofset*
# Reference set of EOFs.
#
# """
# if eofset.shape != refeofset.shape:
# raise ValueError('input set has different shape from reference set')
# eofset, refeofset = __tomasked(eofset, refeofset)
# nmodes = eofset.shape[0]
# signs = np.ones([nmodes])
# shape = [nmodes] + [1] * (eofset.ndim - 1)
# eofset = eofset.reshape([nmodes, np.prod(eofset.shape[1:], dtype=np.int)])
# refeofset = refeofset.reshape([nmodes,
# np.prod(refeofset.shape[1:],
# dtype=np.int)])
# for mode in range(nmodes):
# i = 0
# try:
# while _close(eofset[mode, i], 0.) or \
# _close(refeofset[mode, i], 0.) \
# or np.ma.is_masked(eofset[mode, i]) or \
# np.ma.is_masked(refeofset[mode, i]):
# i += 1
# except IndexError:
# i = 0
# if np.sign(eofset[mode, i]) != np.sign(refeofset[mode, i]):
# signs[mode] = -1
# return signs.reshape(shape)
, which may contain function names, class names, or code. Output only the next line. | cls.solution = reference_solution(cls.interface, cls.weights) |
Continue the code snippet: <|code_start|>
class ToolsTest(EofsTest):
""""""
interface = None
weights = None
@classmethod
def setup_class(cls):
try:
cls.solution = reference_solution(cls.interface, cls.weights)
except ValueError:
pytest.skip('missing dependencies required to test '
'the {!s} interface'.format(cls.interface))
cls.neofs = cls.solution['eigenvalues'].shape[0]
try:
cls.solver = solvers[cls.interface](
cls.solution['sst'], weights=cls.solution['weights'])
cls.tools = {'covariance': tools[cls.interface].covariance_map,
'correlation': tools[cls.interface].correlation_map}
except KeyError:
pytest.skip('missing dependencies required to test '
'the {!s} interface'.format(cls.interface))
def test_covariance_map(self):
# covariance maps should match reference EOFs as covariance
pcs = self.solver.pcs(npcs=self.neofs, pcscaling=1)
cov = self.tools['covariance'](pcs, self.solution['sst'])
eofs = self._tomasked(self.solver.eofs(neofs=self.neofs))
reofs = self._tomasked(self.solution['eofs'])
<|code_end|>
. Use current file imports:
import numpy as np
import numpy.ma as ma
import pytest
import eofs
from iris.cube import Cube
from eofs.tests import EofsTest
from .reference import reference_solution
from .utils import sign_adjustments
and context (classes, functions, or code) from other files:
# Path: lib/eofs/tests/reference.py
# def reference_solution(container_type, weights):
# """Obtain a reference EOF analysis solution.
#
# **Arguments:**
#
# *container_type*
# Either 'standard', 'cdms', 'iris' or 'xarray'.
#
# *weights*
# Weights method. One of 'equal', 'latitude', or 'area'.
#
# """
# container_type = container_type.lower()
# weights = weights.lower()
# if container_type not in ('standard', 'iris', 'cdms', 'xarray'):
# raise ValueError("unknown container type "
# "'{!s}'".format(container_type))
# solution = _read_reference_solution(weights)
# time_units = 'months since 0-1-1 00:00:0.0'
# neofs = len(solution['eigenvalues'])
# _get_wrapper(container_type)(solution, neofs, time_units)
# return solution
#
# Path: lib/eofs/tests/utils.py
# def sign_adjustments(eofset, refeofset):
# """Sign adjustments for EOFs/PCs.
#
# Create a matrix of sign weights used for adjusting the sign of a set
# of EOFs or PCs to the sign of a reference set.
#
# The first dimension is assumed to be modes.
#
# **Arguments:**
#
# *eofset*
# Set of EOFs.
#
# *refeofset*
# Reference set of EOFs.
#
# """
# if eofset.shape != refeofset.shape:
# raise ValueError('input set has different shape from reference set')
# eofset, refeofset = __tomasked(eofset, refeofset)
# nmodes = eofset.shape[0]
# signs = np.ones([nmodes])
# shape = [nmodes] + [1] * (eofset.ndim - 1)
# eofset = eofset.reshape([nmodes, np.prod(eofset.shape[1:], dtype=np.int)])
# refeofset = refeofset.reshape([nmodes,
# np.prod(refeofset.shape[1:],
# dtype=np.int)])
# for mode in range(nmodes):
# i = 0
# try:
# while _close(eofset[mode, i], 0.) or \
# _close(refeofset[mode, i], 0.) \
# or np.ma.is_masked(eofset[mode, i]) or \
# np.ma.is_masked(refeofset[mode, i]):
# i += 1
# except IndexError:
# i = 0
# if np.sign(eofset[mode, i]) != np.sign(refeofset[mode, i]):
# signs[mode] = -1
# return signs.reshape(shape)
. Output only the next line. | cov = self._tomasked(cov) * sign_adjustments(eofs, reofs) |
Continue the code snippet: <|code_start|># You should have received a copy of the GNU General Public License
# along with eofs. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function) # noqa
try:
except ImportError:
pass
# Create a mapping from interface name to solver class.
solvers = {'standard': multivariate.standard.MultivariateEof}
try:
solvers['cdms'] = multivariate.cdms.MultivariateEof
except AttributeError:
pass
try:
solvers['iris'] = multivariate.iris.MultivariateEof
except AttributeError:
pass
class MVErrorHandlersTest(EofsTest):
interface = None
weights = None
@classmethod
def setup_class(cls):
try:
<|code_end|>
. Use current file imports:
import numpy as np
import cdms2
import pytest
import eofs.multivariate as multivariate
from eofs.tests import EofsTest
from .reference import reference_multivariate_solution
and context (classes, functions, or code) from other files:
# Path: lib/eofs/tests/reference.py
# def reference_multivariate_solution(container_type, weights):
# """Obtain a reference multivariate EOF analysis solution.
#
# **Arguments:**
#
# *container_type*
# Either 'standard', 'cdms', or 'iris'.
#
# *weights*
# Weights method. One of 'equal', 'latitude', 'area',
# 'area_multi', or 'area_multi_mix'.
#
# """
# if weights.lower() == 'area':
# weights = 'area_multi'
# if weights.lower() == 'none_area':
# weights = 'area_multi_mix'
# solution = reference_solution(container_type, weights)
# nlon = len(solution['longitude'])
# slice1 = slice(0, nlon // 2)
# slice2 = slice(nlon // 2, None)
# for var in ('longitude',
# 'sst',
# 'eofs',
# 'eofscor',
# 'eofscov',
# 'weights',
# 'rcon',):
# try:
# solution[var] = (solution[var][..., slice1],
# solution[var][..., slice2])
# except TypeError:
# solution[var] = None, None
# return solution
. Output only the next line. | cls.solution = reference_multivariate_solution(cls.interface, |
Given the following code snippet before the placeholder: <|code_start|> def setup_class(cls):
try:
cls.solution = reference_multivariate_solution(cls.interface,
cls.weights)
except ValueError:
pytest.skip('missing dependencies required to test '
'the {!s} interface'.format(cls.interface))
cls.neofs = cls.solution['eigenvalues'].shape[0]
if cls.alternate_weights_arg is not None:
weights = cls.alternate_weights_arg
else:
weights = cls.solution['weights']
try:
cls.solver = solvers[cls.interface](cls.solution['sst'],
weights=weights)
except KeyError:
pytest.skip('missing dependencies required to test '
'the {!s} interface'.format(cls.interface))
def test_eigenvalues(self):
self.assert_array_almost_equal(
self.solver.eigenvalues(neigs=self.neofs),
self.solution['eigenvalues'])
@pytest.mark.parametrize('eofscaling', (0, 1, 2))
def test_eofs(self, eofscaling):
eofs = [self._tomasked(e)
for e in self.solver.eofs(neofs=self.neofs,
eofscaling=eofscaling)]
reofs = [self._tomasked(e).copy() for e in self.solution['eofs']]
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import pytest
import eofs.multivariate as multivariate
from iris.cube import Cube
from eofs.tests import EofsTest
from .utils import sign_adjustments
from .reference import reference_multivariate_solution
and context including class names, function names, and sometimes code from other files:
# Path: lib/eofs/tests/utils.py
# def sign_adjustments(eofset, refeofset):
# """Sign adjustments for EOFs/PCs.
#
# Create a matrix of sign weights used for adjusting the sign of a set
# of EOFs or PCs to the sign of a reference set.
#
# The first dimension is assumed to be modes.
#
# **Arguments:**
#
# *eofset*
# Set of EOFs.
#
# *refeofset*
# Reference set of EOFs.
#
# """
# if eofset.shape != refeofset.shape:
# raise ValueError('input set has different shape from reference set')
# eofset, refeofset = __tomasked(eofset, refeofset)
# nmodes = eofset.shape[0]
# signs = np.ones([nmodes])
# shape = [nmodes] + [1] * (eofset.ndim - 1)
# eofset = eofset.reshape([nmodes, np.prod(eofset.shape[1:], dtype=np.int)])
# refeofset = refeofset.reshape([nmodes,
# np.prod(refeofset.shape[1:],
# dtype=np.int)])
# for mode in range(nmodes):
# i = 0
# try:
# while _close(eofset[mode, i], 0.) or \
# _close(refeofset[mode, i], 0.) \
# or np.ma.is_masked(eofset[mode, i]) or \
# np.ma.is_masked(refeofset[mode, i]):
# i += 1
# except IndexError:
# i = 0
# if np.sign(eofset[mode, i]) != np.sign(refeofset[mode, i]):
# signs[mode] = -1
# return signs.reshape(shape)
#
# Path: lib/eofs/tests/reference.py
# def reference_multivariate_solution(container_type, weights):
# """Obtain a reference multivariate EOF analysis solution.
#
# **Arguments:**
#
# *container_type*
# Either 'standard', 'cdms', or 'iris'.
#
# *weights*
# Weights method. One of 'equal', 'latitude', 'area',
# 'area_multi', or 'area_multi_mix'.
#
# """
# if weights.lower() == 'area':
# weights = 'area_multi'
# if weights.lower() == 'none_area':
# weights = 'area_multi_mix'
# solution = reference_solution(container_type, weights)
# nlon = len(solution['longitude'])
# slice1 = slice(0, nlon // 2)
# slice2 = slice(nlon // 2, None)
# for var in ('longitude',
# 'sst',
# 'eofs',
# 'eofscor',
# 'eofscov',
# 'weights',
# 'rcon',):
# try:
# solution[var] = (solution[var][..., slice1],
# solution[var][..., slice2])
# except TypeError:
# solution[var] = None, None
# return solution
. Output only the next line. | eofs = [e * sign_adjustments(e, r) for e, r in zip(eofs, reofs)] |
Predict the next line after this snippet: <|code_start|>from __future__ import (absolute_import, division, print_function) # noqa
try:
except ImportError:
pass
# Create a mapping from interface name to solver class.
solvers = {'standard': multivariate.standard.MultivariateEof}
try:
solvers['cdms'] = multivariate.cdms.MultivariateEof
except AttributeError:
pass
try:
solvers['iris'] = multivariate.iris.MultivariateEof
except AttributeError:
pass
class MVSolutionTest(EofsTest):
"""Base class for all multivariate solution test classes."""
interface = None
weights = None
alternate_weights_arg = None
@classmethod
def setup_class(cls):
try:
<|code_end|>
using the current file's imports:
import numpy as np
import pytest
import eofs.multivariate as multivariate
from iris.cube import Cube
from eofs.tests import EofsTest
from .utils import sign_adjustments
from .reference import reference_multivariate_solution
and any relevant context from other files:
# Path: lib/eofs/tests/utils.py
# def sign_adjustments(eofset, refeofset):
# """Sign adjustments for EOFs/PCs.
#
# Create a matrix of sign weights used for adjusting the sign of a set
# of EOFs or PCs to the sign of a reference set.
#
# The first dimension is assumed to be modes.
#
# **Arguments:**
#
# *eofset*
# Set of EOFs.
#
# *refeofset*
# Reference set of EOFs.
#
# """
# if eofset.shape != refeofset.shape:
# raise ValueError('input set has different shape from reference set')
# eofset, refeofset = __tomasked(eofset, refeofset)
# nmodes = eofset.shape[0]
# signs = np.ones([nmodes])
# shape = [nmodes] + [1] * (eofset.ndim - 1)
# eofset = eofset.reshape([nmodes, np.prod(eofset.shape[1:], dtype=np.int)])
# refeofset = refeofset.reshape([nmodes,
# np.prod(refeofset.shape[1:],
# dtype=np.int)])
# for mode in range(nmodes):
# i = 0
# try:
# while _close(eofset[mode, i], 0.) or \
# _close(refeofset[mode, i], 0.) \
# or np.ma.is_masked(eofset[mode, i]) or \
# np.ma.is_masked(refeofset[mode, i]):
# i += 1
# except IndexError:
# i = 0
# if np.sign(eofset[mode, i]) != np.sign(refeofset[mode, i]):
# signs[mode] = -1
# return signs.reshape(shape)
#
# Path: lib/eofs/tests/reference.py
# def reference_multivariate_solution(container_type, weights):
# """Obtain a reference multivariate EOF analysis solution.
#
# **Arguments:**
#
# *container_type*
# Either 'standard', 'cdms', or 'iris'.
#
# *weights*
# Weights method. One of 'equal', 'latitude', 'area',
# 'area_multi', or 'area_multi_mix'.
#
# """
# if weights.lower() == 'area':
# weights = 'area_multi'
# if weights.lower() == 'none_area':
# weights = 'area_multi_mix'
# solution = reference_solution(container_type, weights)
# nlon = len(solution['longitude'])
# slice1 = slice(0, nlon // 2)
# slice2 = slice(nlon // 2, None)
# for var in ('longitude',
# 'sst',
# 'eofs',
# 'eofscor',
# 'eofscov',
# 'weights',
# 'rcon',):
# try:
# solution[var] = (solution[var][..., slice1],
# solution[var][..., slice2])
# except TypeError:
# solution[var] = None, None
# return solution
. Output only the next line. | cls.solution = reference_multivariate_solution(cls.interface, |
Given snippet: <|code_start|> field and there can be any number of PCs.
**Arguments:**
*pcs*
PCs in a `cdms2` variable.
*field*
Spatial-temporal field in a `cdms2` variable.
**Optional arguments:**
*ddof*
'Delta degrees of freedom'. The divisor used to normalize
the covariance matrix is *N - ddof* where *N* is the
number of samples. Defaults to *1*.
**Returns:**
*covariance_maps*
A `cdms2` variable containing the covariance maps.
**Examples:**
Compute covariance maps for each PC::
pcs = eofobj.pcs(pcscaling=1)
covmaps = covariance_map(pcs, field)
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import cdms2
import numpy as np
from .standard import covariance_map as standard_covmap
from .standard import correlation_map as standard_cormap
from .generic import covcor_dimensions
and context:
# Path: lib/eofs/tools/standard.py
# def covariance_map(pcs, field, ddof=1):
# """Covariance maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one covariance map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Optional arguments:**
#
# *ddof*
# 'Delta degrees of freedom'. The divisor used to normalize
# the covariance matrix is *N - ddof* where *N* is the
# number of samples. Defaults to *1*.
#
# **Returns:**
#
# *covariance_maps*
# An array with the covariance maps along the first dimension.
#
# **Example:**
#
# Compute covariance maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# covariance_maps = covariance_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Set the divisor according to the specified delta-degrees of freedom.
# div = np.float64(pcs_cent.shape[0] - ddof)
# # Compute the covariance map, making sure it has the appropriate shape.
# cov = (ma.dot(field_cent.T, pcs_cent).T / div).reshape(out_shape)
# cov = ma.masked_invalid(cov)
# return cov
#
# Path: lib/eofs/tools/standard.py
# def correlation_map(pcs, field):
# """Correlation maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `~eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one correlation map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Returns:**
#
# *correlation_maps*
# An array with the correlation maps along the first dimension.
#
# **Example:**
#
# Compute correlation maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# correlation_maps = correlation_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Compute the standard deviation of the PCs and the fields along the time
# # dimension (the leading dimension).
# pcs_std = pcs_cent.std(axis=0)
# field_std = field_cent.std(axis=0)
# # Set the divisor.
# div = np.float64(pcs_cent.shape[0])
# # Compute the correlation map.
# cor = ma.dot(field_cent.T, pcs_cent).T / div
# cor = ma.masked_invalid(cor)
# cor /= ma.outer(pcs_std, field_std)
# # Return the correlation with the appropriate shape.
# return cor.reshape(out_shape)
#
# Path: lib/eofs/tools/generic.py
# def covcor_dimensions(pc_dims, field_dims):
# """
# Extract the appropriate dimensions from a set of PCs and a field for
# construction of covariance/correlation map dimensions.
#
# """
# spatial_dims = field_dims[1:]
# try:
# pc_dim = pc_dims[1]
# except IndexError:
# pc_dim = None
# covcor_dims = [d for d in [pc_dim] + spatial_dims if d is not None]
# return covcor_dims
which might include code, classes, or functions. Output only the next line. | cov = standard_covmap(pcs.asma(), field.asma(), ddof=ddof) |
Next line prediction: <|code_start|>
Given a set of PCs in a `cdms2` variable (e.g., as output from
`eofs.cdms.Eof.pcs`) and a spatial-temporal field in a `cdms`
variable, one correlation map per PC is computed.
The field must have the same temporal dimension as the PCs. Any
number of spatial dimensions (including zero) are allowed in the
field and there can be any number of PCs.
**Arguments:**
*pcs*
PCs in a `cdms2` variable.
*field*
Spatial-temporal field in a `cdms2` variable.
**Returns:**
*correlation_maps*
A `cdms2` variable containing the correlation maps.
**Examples:**
Compute correlation maps for each PC::
pcs = eofobj.pcs(pcscaling=1)
cormaps = correlation_map(pcs, field)
"""
<|code_end|>
. Use current file imports:
(import cdms2
import numpy as np
from .standard import covariance_map as standard_covmap
from .standard import correlation_map as standard_cormap
from .generic import covcor_dimensions)
and context including class names, function names, or small code snippets from other files:
# Path: lib/eofs/tools/standard.py
# def covariance_map(pcs, field, ddof=1):
# """Covariance maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one covariance map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Optional arguments:**
#
# *ddof*
# 'Delta degrees of freedom'. The divisor used to normalize
# the covariance matrix is *N - ddof* where *N* is the
# number of samples. Defaults to *1*.
#
# **Returns:**
#
# *covariance_maps*
# An array with the covariance maps along the first dimension.
#
# **Example:**
#
# Compute covariance maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# covariance_maps = covariance_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Set the divisor according to the specified delta-degrees of freedom.
# div = np.float64(pcs_cent.shape[0] - ddof)
# # Compute the covariance map, making sure it has the appropriate shape.
# cov = (ma.dot(field_cent.T, pcs_cent).T / div).reshape(out_shape)
# cov = ma.masked_invalid(cov)
# return cov
#
# Path: lib/eofs/tools/standard.py
# def correlation_map(pcs, field):
# """Correlation maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `~eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one correlation map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Returns:**
#
# *correlation_maps*
# An array with the correlation maps along the first dimension.
#
# **Example:**
#
# Compute correlation maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# correlation_maps = correlation_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Compute the standard deviation of the PCs and the fields along the time
# # dimension (the leading dimension).
# pcs_std = pcs_cent.std(axis=0)
# field_std = field_cent.std(axis=0)
# # Set the divisor.
# div = np.float64(pcs_cent.shape[0])
# # Compute the correlation map.
# cor = ma.dot(field_cent.T, pcs_cent).T / div
# cor = ma.masked_invalid(cor)
# cor /= ma.outer(pcs_std, field_std)
# # Return the correlation with the appropriate shape.
# return cor.reshape(out_shape)
#
# Path: lib/eofs/tools/generic.py
# def covcor_dimensions(pc_dims, field_dims):
# """
# Extract the appropriate dimensions from a set of PCs and a field for
# construction of covariance/correlation map dimensions.
#
# """
# spatial_dims = field_dims[1:]
# try:
# pc_dim = pc_dims[1]
# except IndexError:
# pc_dim = None
# covcor_dims = [d for d in [pc_dim] + spatial_dims if d is not None]
# return covcor_dims
. Output only the next line. | cor = standard_cormap(pcs.asma(), field.asma()) |
Given the following code snippet before the placeholder: <|code_start|> Given a set of PCs in a `cdms2` variable (e.g., as output from
`eofs.cdms.Eof.pcs`) and a spatial-temporal field in a `cdms`
variable, one correlation map per PC is computed.
The field must have the same temporal dimension as the PCs. Any
number of spatial dimensions (including zero) are allowed in the
field and there can be any number of PCs.
**Arguments:**
*pcs*
PCs in a `cdms2` variable.
*field*
Spatial-temporal field in a `cdms2` variable.
**Returns:**
*correlation_maps*
A `cdms2` variable containing the correlation maps.
**Examples:**
Compute correlation maps for each PC::
pcs = eofobj.pcs(pcscaling=1)
cormaps = correlation_map(pcs, field)
"""
cor = standard_cormap(pcs.asma(), field.asma())
<|code_end|>
, predict the next line using imports from the current file:
import cdms2
import numpy as np
from .standard import covariance_map as standard_covmap
from .standard import correlation_map as standard_cormap
from .generic import covcor_dimensions
and context including class names, function names, and sometimes code from other files:
# Path: lib/eofs/tools/standard.py
# def covariance_map(pcs, field, ddof=1):
# """Covariance maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one covariance map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Optional arguments:**
#
# *ddof*
# 'Delta degrees of freedom'. The divisor used to normalize
# the covariance matrix is *N - ddof* where *N* is the
# number of samples. Defaults to *1*.
#
# **Returns:**
#
# *covariance_maps*
# An array with the covariance maps along the first dimension.
#
# **Example:**
#
# Compute covariance maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# covariance_maps = covariance_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Set the divisor according to the specified delta-degrees of freedom.
# div = np.float64(pcs_cent.shape[0] - ddof)
# # Compute the covariance map, making sure it has the appropriate shape.
# cov = (ma.dot(field_cent.T, pcs_cent).T / div).reshape(out_shape)
# cov = ma.masked_invalid(cov)
# return cov
#
# Path: lib/eofs/tools/standard.py
# def correlation_map(pcs, field):
# """Correlation maps for a set of PCs and a spatial-temporal field.
#
# Given an array where the columns are PCs (e.g., as output from
# `~eofs.standard.Eof.pcs`) and an array containing spatial-temporal
# data where the first dimension represents time, one correlation map
# per PC is computed.
#
# The field must have the same temporal dimension as the PCs. Any
# number of spatial dimensions (including zero) are allowed in the
# field and there can be any number of PCs.
#
# **Arguments:**
#
# *pcs*
# PCs as the columns of an array.
#
# *field*
# Spatial-temporal field with time as the first dimension.
#
# **Returns:**
#
# *correlation_maps*
# An array with the correlation maps along the first dimension.
#
# **Example:**
#
# Compute correlation maps for each PC::
#
# pcs = solver.pcs(pcscaling=1)
# correlation_maps = correlation_maps(pcs, field)
#
# """
# # Check PCs and fields for validity, flatten the arrays ready for the
# # computation and remove the mean along the leading dimension.
# pcs_cent, field_cent, out_shape = _check_flat_center(pcs, field)
# # Compute the standard deviation of the PCs and the fields along the time
# # dimension (the leading dimension).
# pcs_std = pcs_cent.std(axis=0)
# field_std = field_cent.std(axis=0)
# # Set the divisor.
# div = np.float64(pcs_cent.shape[0])
# # Compute the correlation map.
# cor = ma.dot(field_cent.T, pcs_cent).T / div
# cor = ma.masked_invalid(cor)
# cor /= ma.outer(pcs_std, field_std)
# # Return the correlation with the appropriate shape.
# return cor.reshape(out_shape)
#
# Path: lib/eofs/tools/generic.py
# def covcor_dimensions(pc_dims, field_dims):
# """
# Extract the appropriate dimensions from a set of PCs and a field for
# construction of covariance/correlation map dimensions.
#
# """
# spatial_dims = field_dims[1:]
# try:
# pc_dim = pc_dims[1]
# except IndexError:
# pc_dim = None
# covcor_dims = [d for d in [pc_dim] + spatial_dims if d is not None]
# return covcor_dims
. Output only the next line. | outdims = covcor_dimensions(pcs.getAxisList(), field.getAxisList()) |
Here is a snippet: <|code_start|>try:
except ImportError:
pass
# Create a mapping from interface name to solver class.
solvers = {'standard': eofs.standard.Eof}
try:
solvers['cdms'] = eofs.cdms.Eof
except AttributeError:
pass
try:
solvers['iris'] = eofs.iris.Eof
except AttributeError:
pass
try:
solvers['xarray'] = eofs.xarray.Eof
except AttributeError:
pass
class ErrorHandlersTest(EofsTest):
interface = None
weights = None
@classmethod
def setup_class(cls):
try:
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import cdms2
import pytest
import eofs
from eofs.tests import EofsTest
from .reference import reference_solution
and context from other files:
# Path: lib/eofs/tests/reference.py
# def reference_solution(container_type, weights):
# """Obtain a reference EOF analysis solution.
#
# **Arguments:**
#
# *container_type*
# Either 'standard', 'cdms', 'iris' or 'xarray'.
#
# *weights*
# Weights method. One of 'equal', 'latitude', or 'area'.
#
# """
# container_type = container_type.lower()
# weights = weights.lower()
# if container_type not in ('standard', 'iris', 'cdms', 'xarray'):
# raise ValueError("unknown container type "
# "'{!s}'".format(container_type))
# solution = _read_reference_solution(weights)
# time_units = 'months since 0-1-1 00:00:0.0'
# neofs = len(solution['eigenvalues'])
# _get_wrapper(container_type)(solution, neofs, time_units)
# return solution
, which may include functions, classes, or code. Output only the next line. | cls.solution = reference_solution(cls.interface, cls.weights) |
Continue the code snippet: <|code_start|>
class PaginatedList(ListView):
template_name = 'showcase/paginated_list.html'
model = Task
paginate_by = 20
class FormExampleView(FormView):
template_name = 'showcase/form.html'
<|code_end|>
. Use current file imports:
from django.views.generic import ListView, FormView
from .forms import FormExample
from .models import Task
and context (classes, functions, or code) from other files:
# Path: showcase/forms.py
# class FormExample(forms.Form):
# text = forms.CharField()
# email = forms.EmailField()
# number = forms.CharField(widget=forms.NumberInput())
# url = forms.CharField(widget=forms.URLInput())
# password = forms.CharField(widget=forms.PasswordInput())
# select = forms.ChoiceField(choices=COLOR_CHOICES)
# multi_select = forms.MultipleChoiceField(choices=COLOR_CHOICES)
# textarea = forms.CharField(widget=forms.Textarea())
# checkbox = forms.BooleanField()
# checkboxes = forms.MultipleChoiceField(
# choices=COLOR_CHOICES,
# widget=forms.CheckboxSelectMultiple()
# )
# radios = forms.ChoiceField(
# choices=COLOR_CHOICES,
# widget=forms.RadioSelect()
# )
# file = forms.FileField(required=True)
#
# Path: showcase/models.py
# class Task(models.Model):
# name = models.CharField(max_length=128)
# done = models.BooleanField(default=False)
. Output only the next line. | form_class = FormExample |
Predict the next line after this snippet: <|code_start|> (forms.MultipleChoiceField(
choices=COLOR_CHOICES,
widget=forms.CheckboxSelectMultiple()
), "Checkboxes", 'input', {
'name': 'input',
'type': 'checkbox',
'value': 'red'
}),
(forms.ChoiceField(
choices=COLOR_CHOICES,
widget=forms.RadioSelect()
), "Radios", 'input', {
'name': 'input',
'type': 'radio',
'value': 'red'
}),
(forms.FileField(), "File input", 'input', {
'name': 'input',
'type': 'file',
'class': ['file-input']
})
])
def test_input_rendering(field, tag, label, attributes):
class TestForm(forms.Form):
input = field
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['input'].label = label
<|code_end|>
using the current file's imports:
import pytest
from django import forms
from .utils import render_form, get_dom, element_has_all_attributes
and any relevant context from other files:
# Path: bulma/tests/utils.py
# def render_form(form):
# return render_template(
# """
# {% extends 'bulma/base.html' %}
# {% load bulma_tags %}
#
# {% block content %}
# <form method="post" enctype="multipart/form-data" action="." novalidate>
# {% csrf_token %}
# {{ form|bulma }}
# <button class="button is-primary">Submit</button>
# </form>
# {% endblock %}
# """, context={
# 'form': form
# }
# )
#
# def get_dom(html):
# return BeautifulSoup(html, 'html.parser')
#
# def element_has_all_attributes(element, attributes):
# for attribute_name, attribute_value in attributes.items():
# assert element.has_attr(attribute_name) is True
# print(element.get(attribute_name))
# assert element.get(attribute_name) == attribute_value, f'Element {element} has attribute "{attribute_name}" with value {attribute_value}'
# #return False
# #return True
. Output only the next line. | output = render_form(TestForm()) |
Given the code snippet: <|code_start|> choices=COLOR_CHOICES,
widget=forms.CheckboxSelectMultiple()
), "Checkboxes", 'input', {
'name': 'input',
'type': 'checkbox',
'value': 'red'
}),
(forms.ChoiceField(
choices=COLOR_CHOICES,
widget=forms.RadioSelect()
), "Radios", 'input', {
'name': 'input',
'type': 'radio',
'value': 'red'
}),
(forms.FileField(), "File input", 'input', {
'name': 'input',
'type': 'file',
'class': ['file-input']
})
])
def test_input_rendering(field, tag, label, attributes):
class TestForm(forms.Form):
input = field
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['input'].label = label
output = render_form(TestForm())
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from django import forms
from .utils import render_form, get_dom, element_has_all_attributes
and context (functions, classes, or occasionally code) from other files:
# Path: bulma/tests/utils.py
# def render_form(form):
# return render_template(
# """
# {% extends 'bulma/base.html' %}
# {% load bulma_tags %}
#
# {% block content %}
# <form method="post" enctype="multipart/form-data" action="." novalidate>
# {% csrf_token %}
# {{ form|bulma }}
# <button class="button is-primary">Submit</button>
# </form>
# {% endblock %}
# """, context={
# 'form': form
# }
# )
#
# def get_dom(html):
# return BeautifulSoup(html, 'html.parser')
#
# def element_has_all_attributes(element, attributes):
# for attribute_name, attribute_value in attributes.items():
# assert element.has_attr(attribute_name) is True
# print(element.get(attribute_name))
# assert element.get(attribute_name) == attribute_value, f'Element {element} has attribute "{attribute_name}" with value {attribute_value}'
# #return False
# #return True
. Output only the next line. | dom = get_dom(output) |
Predict the next line for this snippet: <|code_start|> ), "Checkboxes", 'input', {
'name': 'input',
'type': 'checkbox',
'value': 'red'
}),
(forms.ChoiceField(
choices=COLOR_CHOICES,
widget=forms.RadioSelect()
), "Radios", 'input', {
'name': 'input',
'type': 'radio',
'value': 'red'
}),
(forms.FileField(), "File input", 'input', {
'name': 'input',
'type': 'file',
'class': ['file-input']
})
])
def test_input_rendering(field, tag, label, attributes):
class TestForm(forms.Form):
input = field
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['input'].label = label
output = render_form(TestForm())
dom = get_dom(output)
<|code_end|>
with the help of current file imports:
import pytest
from django import forms
from .utils import render_form, get_dom, element_has_all_attributes
and context from other files:
# Path: bulma/tests/utils.py
# def render_form(form):
# return render_template(
# """
# {% extends 'bulma/base.html' %}
# {% load bulma_tags %}
#
# {% block content %}
# <form method="post" enctype="multipart/form-data" action="." novalidate>
# {% csrf_token %}
# {{ form|bulma }}
# <button class="button is-primary">Submit</button>
# </form>
# {% endblock %}
# """, context={
# 'form': form
# }
# )
#
# def get_dom(html):
# return BeautifulSoup(html, 'html.parser')
#
# def element_has_all_attributes(element, attributes):
# for attribute_name, attribute_value in attributes.items():
# assert element.has_attr(attribute_name) is True
# print(element.get(attribute_name))
# assert element.get(attribute_name) == attribute_value, f'Element {element} has attribute "{attribute_name}" with value {attribute_value}'
# #return False
# #return True
, which may contain function names, class names, or code. Output only the next line. | element_has_all_attributes(dom.find(tag), attributes), f"{label} has attributes {str(attributes)}" |
Here is a snippet: <|code_start|>#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""The Lanshark Daemon serves files, discovery, and search requests"""
try:
except ImportError:
logger = logging.getLogger('lanshark')
<|code_end|>
. Write the next line using the current file imports:
import BaseHTTPServer
import cgi
import mimetypes
import os
import posixpath
import re
import shutil, socket, SocketServer, stat
import threading
import urllib, urllib2
import simplejson
import logging
from cStringIO import StringIO
from StringIO import StringIO
from lanshark.config import config
from lanshark import icons
from lanshark import network
from lanshark import sendfile
from cache import cached
and context from other files:
# Path: lanshark/config.py
# def in_pathlist(file, pathlist = os.environ.get("PATH").split(os.pathsep)):
# def select_app(*apps):
# def get_mediaplayer():
# def get_registry_value (subkey, name):
# def get_imageviewer():
# def get_openfile():
# def get_sys_encoding():
# def __init__(self, path):
# def debug(self):
# def set_prefix(self, value):
# def save(self, path=None):
# class Config(configuration.Config):
# LOG_LEVEL = Enum('CRITICAL',
# ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'),
# 'Sets the verbosity of logging output')
# LOG_TARGET = String('-',
# 'Target of logging output. Set to - to log stderr')
# LOG_FORMAT = String('%(levelname)s:%(name)s:%(message)s',
# 'Format of logging output. '
# 'See http://docs.python.org/lib/node422.html '
# 'for format description')
# BROADCAST_IP = String("255.255.255.255",
# "IP to use for udp broadcasts")
# PORT = Integer(31337, "Port to use for both UDP and TCP")
# CLIENT_PORT = Integer(31338, "Port for the clients udp sockets")
# SEARCH_TIMEOUT = Integer(5,
# "Time to wait for search results in seconds")
# DISCOVER_TIMEOUT = Integer(5,
# "Time to wait for new hosts to answer in seconds")
# DISCOVER_INTERVAL = Integer(60,
# "Interval to search for new computers in seconds")
# NETWORK_NAME = String("HELO",
# "Word to use for discovery, might act as simple password")
# CACHE_TIMEOUT = Integer(600, "HTTP cache time to live")
# SHARE_PATH = String("", "Path to the files you want to share")
# INCOMING_PATH = String("", "Path to store the downloaded files")
# MAX_SEARCH_RESULTS = Integer(128,
# "Maximal number of search results per peer")
# FOLDER_IMAGES = StringList([r"\.?folder\.(png|jpg|gif|img)$",
# r"cover\.(png|jpg|gif)$",
# r"(cover\-)?front\.(png|jpg|gif)$",
# r"cover.*?\.(png|jpg|gif)$",
# r"albumart.*?large\.jpg$",
# r"albumart.*?\.jpg"],
# "regexps to match the folder images/covers")
# MAX_IMAGE_SIZE = Integer(250000,
# "Maximal size of preview images/covers to use")
# DAEMON_IN_GUI = Boolean(True,
# "Integrates daemon in the gui process")
# RESOLVE_HOSTS = Boolean(False, "Resolve hostnames")
# INDEX_INTERVAL = Integer(3600,
# "Interval to update the fileindex in seconds")
# GUI_ICON_SIZE = Integer(48, "Icon size in the gtkui")
# PID_FILE = String(os.path.join("$configdir", "lanshark.pid"),
# "Location of the pid file")
# SOCKET_TIMEOUT = Integer(5000, "The timeout of tcp sockets in ms")
# VIDEO_PLAYER = String(get_mediaplayer(),
# "Command to play video files %s gets replaced with "
# "the url to the video")
# AUDIO_PLAYER = String(get_mediaplayer(),
# "Command to play audio files %s gets replaced with"
# "the url to the audio file ")
# IMAGE_VIEWER = String(get_imageviewer(),
# "Command to view images files %s gets replaced with"
# "the url to the image ")
# DISABLE_WEBINTERFACE = Boolean(False, "Do not show html interface")
# FS_ENCODING = String(sys.getfilesystemencoding(), "Filesystem encoding")
# SYS_ENCODING = String(get_sys_encoding(), 'System Encoding')
# HOSTNAME = String(socket.gethostname(), "The name of your host/share")
# DOWNLOAD_RELPATH = Boolean(True, "Use relative paths for downloads"
# "instead of absolute ones")
# INVISIBLE = Boolean(False, "Do not answer to discovery or search requests")
# STATUSICON = Boolean(True, "Show icon in statusbar")
# STATICHOSTS = StringList([], "Static peer entries for networks where udp"
# "broadcasts are not avaible. "
# "Exmaple: example.com:31337, 192.168.1.2:31337")
# HIDDEN_FILES = StringList([r"\..*", r"Thumbs\.db"],
# "Regexps to match hidden files")
# PSYCO = Boolean(False, "Enable psyco JIT")
# OPENFILE = String(get_openfile(), "The application used to start/open files")
# LANGUAGES = [locale.getdefaultlocale()[0] or "en_US", "en_US"]
# WEBSITE = "http://lanshark.29a.ch/"
# VERSION = Integer(6, "Version of the config file")
# DOWNLOAD_BS = 65536
#
# Path: lanshark/icons.py
# class IconFactory:
# class URLIconFactory(IconFactory):
# def guess_icon_name(self, filename):
# def guess_icon(self, filename):
# def has_icon(self, name):
# def get_icon(self, name):
# def __init__(self, documentroot, urlroot, ext=".png"):
# def get_icon(self, name):
#
# Path: lanshark/network.py
# def broadcast_dgram_socket(port):
#
# Path: lanshark/sendfile.py
# def sendfile(sock, fileobj):
# if not hasattr(fileobj, "fileno"):
# _sendfile(sock, fileobj)
# return
# offset = c.c_longlong(fileobj.tell())
# # seek backwards
# fileobj.seek(0, 2)
# size = fileobj.tell()
# sock.setblocking(1)
# sendfile64(sock.fileno(), fileobj.fileno(), c.byref(offset), c.c_longlong(size - offset.value))
, which may include functions, classes, or code. Output only the next line. | socket.getaddrinfo = cached(config.CACHE_TIMEOUT, stats=config.debug)( |
Here is a snippet: <|code_start|> LOG_FORMAT = String('%(levelname)s:%(name)s:%(message)s',
'Format of logging output. '
'See http://docs.python.org/lib/node422.html '
'for format description')
BROADCAST_IP = String("255.255.255.255",
"IP to use for udp broadcasts")
PORT = Integer(31337, "Port to use for both UDP and TCP")
CLIENT_PORT = Integer(31338, "Port for the clients udp sockets")
SEARCH_TIMEOUT = Integer(5,
"Time to wait for search results in seconds")
DISCOVER_TIMEOUT = Integer(5,
"Time to wait for new hosts to answer in seconds")
DISCOVER_INTERVAL = Integer(60,
"Interval to search for new computers in seconds")
NETWORK_NAME = String("HELO",
"Word to use for discovery, might act as simple password")
CACHE_TIMEOUT = Integer(600, "HTTP cache time to live")
SHARE_PATH = String("", "Path to the files you want to share")
INCOMING_PATH = String("", "Path to store the downloaded files")
MAX_SEARCH_RESULTS = Integer(128,
"Maximal number of search results per peer")
FOLDER_IMAGES = StringList([r"\.?folder\.(png|jpg|gif|img)$",
r"cover\.(png|jpg|gif)$",
r"(cover\-)?front\.(png|jpg|gif)$",
r"cover.*?\.(png|jpg|gif)$",
r"albumart.*?large\.jpg$",
r"albumart.*?\.jpg"],
"regexps to match the folder images/covers")
MAX_IMAGE_SIZE = Integer(250000,
"Maximal size of preview images/covers to use")
<|code_end|>
. Write the next line using the current file imports:
import os, sys
import socket
import locale
import _winreg
import psyco
import logging
from lanshark.configuration import Boolean, Integer, String, StringList, Enum
from lanshark import configuration
and context from other files:
# Path: lanshark/configuration.py
# class Boolean(Key):
# """Represents a bool"""
# keytype = bool
# def parse(self, value):
# return (value.lower() != "false")
#
# class Integer(Key):
# """Represents a int"""
# keytype = int
#
# class String(Key):
# """Represents a str"""
# keytype = str
#
# class StringList(List, String):
# "list of strings"
# pass
#
# class Enum(String):
# """Represents an enum"""
# def __init__(self, default, values, doc):
# """Creates new instance. `values` is a tuple of all valid enum
# values"""
# String.__init__(self, default, doc)
# self.values = values
#
# def parse(self, value):
# retval = String.parse(self, value)
# if retval not in self.values:
# raise ValueError('Invalid enum value %s' % retval)
# return retval
#
# Path: lanshark/configuration.py
# class Error(Exception):
# class Config(observable.Observable):
# class Key:
# class String(Key):
# class Integer(Key):
# class Float(float):
# class Boolean(Key):
# class Enum(String):
# class List:
# class StringList(List, String):
# class JSON(Key):
# class TestConfig(Config):
# def __init__(self, lineno, line, cause):
# def __str__(self):
# def __repr__(self):
# def __init__(self):
# def __setattr__(self, attr, value):
# def load(self, f):
# def _load(self, f):
# def save(self, f):
# def _save(self, f):
# def __init__(self, default, doc):
# def parse(self, value):
# def dump(self, value):
# def __repr__(self):
# def parse(self, value):
# def __init__(self, default, values, doc):
# def parse(self, value):
# def parse(self, value):
# def parse(value):
# def dump(self, value):
# def dump(value):
# def parse(self, value):
# def dump(self, value):
# def test():
, which may include functions, classes, or code. Output only the next line. | DAEMON_IN_GUI = Boolean(True, |
Given the code snippet: <|code_start|> player = player + " %s"
return player
def get_imageviewer():
return sys.platform.startswith("win") and "browser" or select_app(
"kuickshow", "display") + " %s" or "browser"
def get_openfile():
if sys.platform == "darwin":
return "open"
else:
return select_app("xdg-open", "exo-open", "gnome-open") or ""
def get_sys_encoding():
if sys.platform.startswith("win"):
return "utf-8"
return locale.getpreferredencoding()
class Config(configuration.Config):
LOG_LEVEL = Enum('CRITICAL',
('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'),
'Sets the verbosity of logging output')
LOG_TARGET = String('-',
'Target of logging output. Set to - to log stderr')
LOG_FORMAT = String('%(levelname)s:%(name)s:%(message)s',
'Format of logging output. '
'See http://docs.python.org/lib/node422.html '
'for format description')
BROADCAST_IP = String("255.255.255.255",
"IP to use for udp broadcasts")
<|code_end|>
, generate the next line using the imports in this file:
import os, sys
import socket
import locale
import _winreg
import psyco
import logging
from lanshark.configuration import Boolean, Integer, String, StringList, Enum
from lanshark import configuration
and context (functions, classes, or occasionally code) from other files:
# Path: lanshark/configuration.py
# class Boolean(Key):
# """Represents a bool"""
# keytype = bool
# def parse(self, value):
# return (value.lower() != "false")
#
# class Integer(Key):
# """Represents a int"""
# keytype = int
#
# class String(Key):
# """Represents a str"""
# keytype = str
#
# class StringList(List, String):
# "list of strings"
# pass
#
# class Enum(String):
# """Represents an enum"""
# def __init__(self, default, values, doc):
# """Creates new instance. `values` is a tuple of all valid enum
# values"""
# String.__init__(self, default, doc)
# self.values = values
#
# def parse(self, value):
# retval = String.parse(self, value)
# if retval not in self.values:
# raise ValueError('Invalid enum value %s' % retval)
# return retval
#
# Path: lanshark/configuration.py
# class Error(Exception):
# class Config(observable.Observable):
# class Key:
# class String(Key):
# class Integer(Key):
# class Float(float):
# class Boolean(Key):
# class Enum(String):
# class List:
# class StringList(List, String):
# class JSON(Key):
# class TestConfig(Config):
# def __init__(self, lineno, line, cause):
# def __str__(self):
# def __repr__(self):
# def __init__(self):
# def __setattr__(self, attr, value):
# def load(self, f):
# def _load(self, f):
# def save(self, f):
# def _save(self, f):
# def __init__(self, default, doc):
# def parse(self, value):
# def dump(self, value):
# def __repr__(self):
# def parse(self, value):
# def __init__(self, default, values, doc):
# def parse(self, value):
# def parse(self, value):
# def parse(value):
# def dump(self, value):
# def dump(value):
# def parse(self, value):
# def dump(self, value):
# def test():
. Output only the next line. | PORT = Integer(31337, "Port to use for both UDP and TCP") |
Here is a snippet: <|code_start|> except _winreg.error:
pass
return value
player = '"%s"' % get_registry_value('Software\\VideoLAN','VLC') or ''
else:
player = select_app("gmplayer", "gxine", "totem", "kaffeine", "vlc",
"mplayer", "xine")
if player:
player = player + " %s"
return player
def get_imageviewer():
return sys.platform.startswith("win") and "browser" or select_app(
"kuickshow", "display") + " %s" or "browser"
def get_openfile():
if sys.platform == "darwin":
return "open"
else:
return select_app("xdg-open", "exo-open", "gnome-open") or ""
def get_sys_encoding():
if sys.platform.startswith("win"):
return "utf-8"
return locale.getpreferredencoding()
class Config(configuration.Config):
LOG_LEVEL = Enum('CRITICAL',
('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'),
'Sets the verbosity of logging output')
<|code_end|>
. Write the next line using the current file imports:
import os, sys
import socket
import locale
import _winreg
import psyco
import logging
from lanshark.configuration import Boolean, Integer, String, StringList, Enum
from lanshark import configuration
and context from other files:
# Path: lanshark/configuration.py
# class Boolean(Key):
# """Represents a bool"""
# keytype = bool
# def parse(self, value):
# return (value.lower() != "false")
#
# class Integer(Key):
# """Represents a int"""
# keytype = int
#
# class String(Key):
# """Represents a str"""
# keytype = str
#
# class StringList(List, String):
# "list of strings"
# pass
#
# class Enum(String):
# """Represents an enum"""
# def __init__(self, default, values, doc):
# """Creates new instance. `values` is a tuple of all valid enum
# values"""
# String.__init__(self, default, doc)
# self.values = values
#
# def parse(self, value):
# retval = String.parse(self, value)
# if retval not in self.values:
# raise ValueError('Invalid enum value %s' % retval)
# return retval
#
# Path: lanshark/configuration.py
# class Error(Exception):
# class Config(observable.Observable):
# class Key:
# class String(Key):
# class Integer(Key):
# class Float(float):
# class Boolean(Key):
# class Enum(String):
# class List:
# class StringList(List, String):
# class JSON(Key):
# class TestConfig(Config):
# def __init__(self, lineno, line, cause):
# def __str__(self):
# def __repr__(self):
# def __init__(self):
# def __setattr__(self, attr, value):
# def load(self, f):
# def _load(self, f):
# def save(self, f):
# def _save(self, f):
# def __init__(self, default, doc):
# def parse(self, value):
# def dump(self, value):
# def __repr__(self):
# def parse(self, value):
# def __init__(self, default, values, doc):
# def parse(self, value):
# def parse(self, value):
# def parse(value):
# def dump(self, value):
# def dump(value):
# def parse(self, value):
# def dump(self, value):
# def test():
, which may include functions, classes, or code. Output only the next line. | LOG_TARGET = String('-', |
Here is a snippet: <|code_start|> return "utf-8"
return locale.getpreferredencoding()
class Config(configuration.Config):
LOG_LEVEL = Enum('CRITICAL',
('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'),
'Sets the verbosity of logging output')
LOG_TARGET = String('-',
'Target of logging output. Set to - to log stderr')
LOG_FORMAT = String('%(levelname)s:%(name)s:%(message)s',
'Format of logging output. '
'See http://docs.python.org/lib/node422.html '
'for format description')
BROADCAST_IP = String("255.255.255.255",
"IP to use for udp broadcasts")
PORT = Integer(31337, "Port to use for both UDP and TCP")
CLIENT_PORT = Integer(31338, "Port for the clients udp sockets")
SEARCH_TIMEOUT = Integer(5,
"Time to wait for search results in seconds")
DISCOVER_TIMEOUT = Integer(5,
"Time to wait for new hosts to answer in seconds")
DISCOVER_INTERVAL = Integer(60,
"Interval to search for new computers in seconds")
NETWORK_NAME = String("HELO",
"Word to use for discovery, might act as simple password")
CACHE_TIMEOUT = Integer(600, "HTTP cache time to live")
SHARE_PATH = String("", "Path to the files you want to share")
INCOMING_PATH = String("", "Path to store the downloaded files")
MAX_SEARCH_RESULTS = Integer(128,
"Maximal number of search results per peer")
<|code_end|>
. Write the next line using the current file imports:
import os, sys
import socket
import locale
import _winreg
import psyco
import logging
from lanshark.configuration import Boolean, Integer, String, StringList, Enum
from lanshark import configuration
and context from other files:
# Path: lanshark/configuration.py
# class Boolean(Key):
# """Represents a bool"""
# keytype = bool
# def parse(self, value):
# return (value.lower() != "false")
#
# class Integer(Key):
# """Represents a int"""
# keytype = int
#
# class String(Key):
# """Represents a str"""
# keytype = str
#
# class StringList(List, String):
# "list of strings"
# pass
#
# class Enum(String):
# """Represents an enum"""
# def __init__(self, default, values, doc):
# """Creates new instance. `values` is a tuple of all valid enum
# values"""
# String.__init__(self, default, doc)
# self.values = values
#
# def parse(self, value):
# retval = String.parse(self, value)
# if retval not in self.values:
# raise ValueError('Invalid enum value %s' % retval)
# return retval
#
# Path: lanshark/configuration.py
# class Error(Exception):
# class Config(observable.Observable):
# class Key:
# class String(Key):
# class Integer(Key):
# class Float(float):
# class Boolean(Key):
# class Enum(String):
# class List:
# class StringList(List, String):
# class JSON(Key):
# class TestConfig(Config):
# def __init__(self, lineno, line, cause):
# def __str__(self):
# def __repr__(self):
# def __init__(self):
# def __setattr__(self, attr, value):
# def load(self, f):
# def _load(self, f):
# def save(self, f):
# def _save(self, f):
# def __init__(self, default, doc):
# def parse(self, value):
# def dump(self, value):
# def __repr__(self):
# def parse(self, value):
# def __init__(self, default, values, doc):
# def parse(self, value):
# def parse(self, value):
# def parse(value):
# def dump(self, value):
# def dump(value):
# def parse(self, value):
# def dump(self, value):
# def test():
, which may include functions, classes, or code. Output only the next line. | FOLDER_IMAGES = StringList([r"\.?folder\.(png|jpg|gif|img)$", |
Given the code snippet: <|code_start|> reg = _winreg.OpenKey(hkey, subkey)
value = _winreg.QueryValue(reg, name)
_winreg.CloseKey(reg)
except _winreg.error:
pass
return value
player = '"%s"' % get_registry_value('Software\\VideoLAN','VLC') or ''
else:
player = select_app("gmplayer", "gxine", "totem", "kaffeine", "vlc",
"mplayer", "xine")
if player:
player = player + " %s"
return player
def get_imageviewer():
return sys.platform.startswith("win") and "browser" or select_app(
"kuickshow", "display") + " %s" or "browser"
def get_openfile():
if sys.platform == "darwin":
return "open"
else:
return select_app("xdg-open", "exo-open", "gnome-open") or ""
def get_sys_encoding():
if sys.platform.startswith("win"):
return "utf-8"
return locale.getpreferredencoding()
class Config(configuration.Config):
<|code_end|>
, generate the next line using the imports in this file:
import os, sys
import socket
import locale
import _winreg
import psyco
import logging
from lanshark.configuration import Boolean, Integer, String, StringList, Enum
from lanshark import configuration
and context (functions, classes, or occasionally code) from other files:
# Path: lanshark/configuration.py
# class Boolean(Key):
# """Represents a bool"""
# keytype = bool
# def parse(self, value):
# return (value.lower() != "false")
#
# class Integer(Key):
# """Represents a int"""
# keytype = int
#
# class String(Key):
# """Represents a str"""
# keytype = str
#
# class StringList(List, String):
# "list of strings"
# pass
#
# class Enum(String):
# """Represents an enum"""
# def __init__(self, default, values, doc):
# """Creates new instance. `values` is a tuple of all valid enum
# values"""
# String.__init__(self, default, doc)
# self.values = values
#
# def parse(self, value):
# retval = String.parse(self, value)
# if retval not in self.values:
# raise ValueError('Invalid enum value %s' % retval)
# return retval
#
# Path: lanshark/configuration.py
# class Error(Exception):
# class Config(observable.Observable):
# class Key:
# class String(Key):
# class Integer(Key):
# class Float(float):
# class Boolean(Key):
# class Enum(String):
# class List:
# class StringList(List, String):
# class JSON(Key):
# class TestConfig(Config):
# def __init__(self, lineno, line, cause):
# def __str__(self):
# def __repr__(self):
# def __init__(self):
# def __setattr__(self, attr, value):
# def load(self, f):
# def _load(self, f):
# def save(self, f):
# def _save(self, f):
# def __init__(self, default, doc):
# def parse(self, value):
# def dump(self, value):
# def __repr__(self):
# def parse(self, value):
# def __init__(self, default, values, doc):
# def parse(self, value):
# def parse(self, value):
# def parse(value):
# def dump(self, value):
# def dump(value):
# def parse(self, value):
# def dump(self, value):
# def test():
. Output only the next line. | LOG_LEVEL = Enum('CRITICAL', |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.