Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Continue the code snippet: <|code_start|>
desc = '''
Visualize specific graphs, for a three-graph example:
python viewer.py 10 -i is_bipartite 1 -i is_integral 1 -i is_eulerian 1
'''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('N', type=int, default=5,
help="Graph order (number of vertices) to query)")
parser.add_argument('--limit', type=int, default=15,
help="Max number of graphs to draw")
parser.add_argument('--cooling_step', type=float, default=.99,
help="Cooling step for the sfdp_layout")
parser.add_argument('--output', type=str, default=None,
help="If given, saves the image to this file")
parser.add_argument('-i', '--invariant_query', nargs=2, action='append', required=False,
help="Invariant to query (can be repeated)")
cargs = vars(parser.parse_args())
N = cargs["N"]
# Start the logger
logging.root.setLevel(logging.INFO)
# Connect to the database
conn = helper_functions.load_graph_database(N)
# Load the list of invariants to compute
<|code_end|>
. Use current file imports:
import logging
import argparse
import graph_tool as gt
import numpy as np
from src.helper_functions import grab_vector, load_options, load_graph_database
from src.invariants import convert_to_numpy
and context (classes, functions, or code) from other files:
# Path: src/helper_functions.py
# def grab_vector(connection, cmd, *args):
# return [x[0] for x in connection.execute(cmd, *args).fetchall()]
#
# def load_options(f_option_file="options_simple_connected.json"):
# # Load the file into a string
# try:
# with open(f_option_file) as FIN:
# raw_text = FIN.read()
# except:
# msg = "Couldn't find option file {}".format(f_option_file)
# raise IOError(msg)
#
# # Parse the text as json
# try:
# return json.loads(raw_text)
# except Exception as Ex:
# msg = "Couldn't parse JSON file {}, {}".format(f_option_file, Ex)
# raise IOError(msg)
#
# def load_graph_database(N, check_exist=True, special=False, timeout=5):
# ''' Given an input value of N, return a connection to the
# cooresponding database '''
#
# # Build the needed directories
# mkdir_p("database")
# mkdir_p("database/special")
#
# if not special:
# f_database = generate_database_name(N)
# else:
# f_database = generate_special_database_name(N)
#
# # Check if database exists, if so exit!
# if check_exist and not os.path.exists(f_database):
# err = "Database %s does not exist." % f_database
# logging.critical(err)
# exit()
#
# return sqlite3.connect(f_database, check_same_thread=False,
# timeout=timeout)
#
# Path: src/invariants.py
# def convert_to_numpy(adj, N, **kwargs):
# possible_edges = int ((N * (N + 1)) / 2)
#
# edge_map = np.binary_repr(adj, possible_edges)
# edge_int = [int(x) for x in edge_map]
#
# idx = np.triu_indices(N)
# A = np.zeros((N, N), dtype=np.int)
#
# A[idx] = edge_int
#
# # Works for loopless graphs only
# A += A.T
# return A
. Output only the next line. | options = load_options() |
Given the code snippet: <|code_start|>
desc = '''
Visualize specific graphs, for a three-graph example:
python viewer.py 10 -i is_bipartite 1 -i is_integral 1 -i is_eulerian 1
'''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('N', type=int, default=5,
help="Graph order (number of vertices) to query)")
parser.add_argument('--limit', type=int, default=15,
help="Max number of graphs to draw")
parser.add_argument('--cooling_step', type=float, default=.99,
help="Cooling step for the sfdp_layout")
parser.add_argument('--output', type=str, default=None,
help="If given, saves the image to this file")
parser.add_argument('-i', '--invariant_query', nargs=2, action='append', required=False,
help="Invariant to query (can be repeated)")
cargs = vars(parser.parse_args())
N = cargs["N"]
# Start the logger
logging.root.setLevel(logging.INFO)
# Connect to the database
<|code_end|>
, generate the next line using the imports in this file:
import logging
import argparse
import graph_tool as gt
import numpy as np
from src.helper_functions import grab_vector, load_options, load_graph_database
from src.invariants import convert_to_numpy
and context (functions, classes, or occasionally code) from other files:
# Path: src/helper_functions.py
# def grab_vector(connection, cmd, *args):
# return [x[0] for x in connection.execute(cmd, *args).fetchall()]
#
# def load_options(f_option_file="options_simple_connected.json"):
# # Load the file into a string
# try:
# with open(f_option_file) as FIN:
# raw_text = FIN.read()
# except:
# msg = "Couldn't find option file {}".format(f_option_file)
# raise IOError(msg)
#
# # Parse the text as json
# try:
# return json.loads(raw_text)
# except Exception as Ex:
# msg = "Couldn't parse JSON file {}, {}".format(f_option_file, Ex)
# raise IOError(msg)
#
# def load_graph_database(N, check_exist=True, special=False, timeout=5):
# ''' Given an input value of N, return a connection to the
# cooresponding database '''
#
# # Build the needed directories
# mkdir_p("database")
# mkdir_p("database/special")
#
# if not special:
# f_database = generate_database_name(N)
# else:
# f_database = generate_special_database_name(N)
#
# # Check if database exists, if so exit!
# if check_exist and not os.path.exists(f_database):
# err = "Database %s does not exist." % f_database
# logging.critical(err)
# exit()
#
# return sqlite3.connect(f_database, check_same_thread=False,
# timeout=timeout)
#
# Path: src/invariants.py
# def convert_to_numpy(adj, N, **kwargs):
# possible_edges = int ((N * (N + 1)) / 2)
#
# edge_map = np.binary_repr(adj, possible_edges)
# edge_int = [int(x) for x in edge_map]
#
# idx = np.triu_indices(N)
# A = np.zeros((N, N), dtype=np.int)
#
# A[idx] = edge_int
#
# # Works for loopless graphs only
# A += A.T
# return A
. Output only the next line. | conn = helper_functions.load_graph_database(N) |
Using the snippet: <|code_start|> try:
int(val)
except Exception as ex:
err = "{}={} is {}".format(func_name, val, ex)
raise ValueError(err)
cmd_search = '''
SELECT adj FROM invariant_integer AS A
JOIN graph AS B ON A.graph_id = B.graph_id'''
constraints = ["{}={}".format(*items) for items in cargs["invariant_query"]]
if constraints:
cmd_search += ' WHERE ' + ' AND '.join(constraints)
cmd_search += " LIMIT {limit} ".format(**cargs)
# Add limit at some point
ADJ = grab_vector(conn, cmd_search)
logging.info("Found at least {} graphs matching the criteria".format(len(ADJ)))
#######################################################################################
def disjoint_graph_add(adj_list, N):
# Disjoint union of many graphs, assumes graphs are all size N
total_vertex = N * len(adj_list)
G = gt.Graph(directed=False)
G.add_vertex(total_vertex)
for k, adj in enumerate(adj_list):
<|code_end|>
, determine the next line of code. You have imports:
import logging
import argparse
import graph_tool as gt
import numpy as np
from src.helper_functions import grab_vector, load_options, load_graph_database
from src.invariants import convert_to_numpy
and context (class names, function names, or code) available:
# Path: src/helper_functions.py
# def grab_vector(connection, cmd, *args):
# return [x[0] for x in connection.execute(cmd, *args).fetchall()]
#
# def load_options(f_option_file="options_simple_connected.json"):
# # Load the file into a string
# try:
# with open(f_option_file) as FIN:
# raw_text = FIN.read()
# except:
# msg = "Couldn't find option file {}".format(f_option_file)
# raise IOError(msg)
#
# # Parse the text as json
# try:
# return json.loads(raw_text)
# except Exception as Ex:
# msg = "Couldn't parse JSON file {}, {}".format(f_option_file, Ex)
# raise IOError(msg)
#
# def load_graph_database(N, check_exist=True, special=False, timeout=5):
# ''' Given an input value of N, return a connection to the
# cooresponding database '''
#
# # Build the needed directories
# mkdir_p("database")
# mkdir_p("database/special")
#
# if not special:
# f_database = generate_database_name(N)
# else:
# f_database = generate_special_database_name(N)
#
# # Check if database exists, if so exit!
# if check_exist and not os.path.exists(f_database):
# err = "Database %s does not exist." % f_database
# logging.critical(err)
# exit()
#
# return sqlite3.connect(f_database, check_same_thread=False,
# timeout=timeout)
#
# Path: src/invariants.py
# def convert_to_numpy(adj, N, **kwargs):
# possible_edges = int ((N * (N + 1)) / 2)
#
# edge_map = np.binary_repr(adj, possible_edges)
# edge_int = [int(x) for x in edge_map]
#
# idx = np.triu_indices(N)
# A = np.zeros((N, N), dtype=np.int)
#
# A[idx] = edge_int
#
# # Works for loopless graphs only
# A += A.T
# return A
. Output only the next line. | A = convert_to_numpy(adj, N=N) |
Predict the next line for this snippet: <|code_start|>
desc = "Make a report of level 1 sequences"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-f', '--force', default=False, action='store_true')
cargs = vars(parser.parse_args())
# Start the logger
logging.root.setLevel(logging.INFO)
# Connect to the database and add structure info
f_seq_database = "database/sequence.db"
seq_conn = sqlite3.connect(f_seq_database, check_same_thread=False)
# Find the level 1 sequence_ids with at least 4 terms
cmd_search = '''SELECT sequence_id FROM stat_sequence
WHERE query_level=1 AND non_zero_terms>=4'''
<|code_end|>
with the help of current file imports:
import sqlite3
import logging
import argparse
import collections
from src.helper_functions import grab_vector, grab_all
and context from other files:
# Path: src/helper_functions.py
# def grab_vector(connection, cmd, *args):
# return [x[0] for x in connection.execute(cmd, *args).fetchall()]
#
# def grab_all(connection, cmd, *args):
# return connection.execute(cmd, *args).fetchall()
, which may contain function names, class names, or code. Output only the next line. | SID = grab_vector(seq_conn, cmd_search) |
Continue the code snippet: <|code_start|>
desc = "Make a report of level 1 sequences"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-f', '--force', default=False, action='store_true')
cargs = vars(parser.parse_args())
# Start the logger
logging.root.setLevel(logging.INFO)
# Connect to the database and add structure info
f_seq_database = "database/sequence.db"
seq_conn = sqlite3.connect(f_seq_database, check_same_thread=False)
# Find the level 1 sequence_ids with at least 4 terms
cmd_search = '''SELECT sequence_id FROM stat_sequence
WHERE query_level=1 AND non_zero_terms>=4'''
SID = grab_vector(seq_conn, cmd_search)
# Build the lookup table
cmd = '''SELECT function_name,invariant_id FROM ref_invariant_integer
ORDER BY invariant_id'''
<|code_end|>
. Use current file imports:
import sqlite3
import logging
import argparse
import collections
from src.helper_functions import grab_vector, grab_all
and context (classes, functions, or code) from other files:
# Path: src/helper_functions.py
# def grab_vector(connection, cmd, *args):
# return [x[0] for x in connection.execute(cmd, *args).fetchall()]
#
# def grab_all(connection, cmd, *args):
# return connection.execute(cmd, *args).fetchall()
. Output only the next line. | ref_lookup = dict(grab_all(seq_conn, cmd)) |
Predict the next line after this snippet: <|code_start|>
desc = "Make a report of the distinct sequences"
parser = argparse.ArgumentParser(description=desc)
cargs = vars(parser.parse_args())
# Start the logger
logging.root.setLevel(logging.INFO)
# Connect to the database
f_distinct_database = "database/distinct_seq.db"
conn = sqlite3.connect(f_distinct_database)
# Load the list of distinct invariants
options = load_options()
distinct_seq_names = options["distinct_sequences"]
cmd_build_seq = '''
SELECT N,coeff FROM distinct_sequence
WHERE function_name = "{}" ORDER BY N
'''
for name in distinct_seq_names:
cmd = cmd_build_seq.format(name)
<|code_end|>
using the current file's imports:
import sqlite3
import logging
import argparse
from src.helper_functions import grab_all, load_options
and any relevant context from other files:
# Path: src/helper_functions.py
# def grab_all(connection, cmd, *args):
# return connection.execute(cmd, *args).fetchall()
#
# def load_options(f_option_file="options_simple_connected.json"):
# # Load the file into a string
# try:
# with open(f_option_file) as FIN:
# raw_text = FIN.read()
# except:
# msg = "Couldn't find option file {}".format(f_option_file)
# raise IOError(msg)
#
# # Parse the text as json
# try:
# return json.loads(raw_text)
# except Exception as Ex:
# msg = "Couldn't parse JSON file {}, {}".format(f_option_file, Ex)
# raise IOError(msg)
. Output only the next line. | n_val, seq = zip(*grab_all(conn, cmd)) |
Given snippet: <|code_start|>
desc = "Make a report of the distinct sequences"
parser = argparse.ArgumentParser(description=desc)
cargs = vars(parser.parse_args())
# Start the logger
logging.root.setLevel(logging.INFO)
# Connect to the database
f_distinct_database = "database/distinct_seq.db"
conn = sqlite3.connect(f_distinct_database)
# Load the list of distinct invariants
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sqlite3
import logging
import argparse
from src.helper_functions import grab_all, load_options
and context:
# Path: src/helper_functions.py
# def grab_all(connection, cmd, *args):
# return connection.execute(cmd, *args).fetchall()
#
# def load_options(f_option_file="options_simple_connected.json"):
# # Load the file into a string
# try:
# with open(f_option_file) as FIN:
# raw_text = FIN.read()
# except:
# msg = "Couldn't find option file {}".format(f_option_file)
# raise IOError(msg)
#
# # Parse the text as json
# try:
# return json.loads(raw_text)
# except Exception as Ex:
# msg = "Couldn't parse JSON file {}, {}".format(f_option_file, Ex)
# raise IOError(msg)
which might include code, classes, or functions. Output only the next line. | options = load_options() |
Given the following code snippet before the placeholder: <|code_start|>
desc = "Make a report of level 2 sequences"
parser = argparse.ArgumentParser(description=desc)
cargs = vars(parser.parse_args())
# Start the logger
logging.root.setLevel(logging.INFO)
# Connect to the database and add structure info
f_seq_database = "database/sequence.db"
seq_conn = sqlite3.connect(f_seq_database)
# Build the lookup table
cmd = '''SELECT function_name,invariant_id FROM ref_invariant_integer
ORDER BY invariant_id'''
ref_lookup = dict(grab_all(seq_conn, cmd))
ref_lookup_inv = {v: k for k, v in ref_lookup.items()}
func_names = ref_lookup.keys()
logging.info("Loading level 2 sequences")
cmd_select_interesting = '''
SELECT sequence_id FROM stat_sequence
WHERE query_level=2 AND non_zero_terms>4'''
<|code_end|>
, predict the next line using imports from the current file:
import sqlite3
import logging
import argparse
import collections
from src.helper_functions import grab_vector, grab_all
and context including class names, function names, and sometimes code from other files:
# Path: src/helper_functions.py
# def grab_vector(connection, cmd, *args):
# return [x[0] for x in connection.execute(cmd, *args).fetchall()]
#
# def grab_all(connection, cmd, *args):
# return connection.execute(cmd, *args).fetchall()
. Output only the next line. | interesting_idx = set(grab_vector(seq_conn, cmd_select_interesting)) |
Next line prediction: <|code_start|>
desc = "Make a report of level 2 sequences"
parser = argparse.ArgumentParser(description=desc)
cargs = vars(parser.parse_args())
# Start the logger
logging.root.setLevel(logging.INFO)
# Connect to the database and add structure info
f_seq_database = "database/sequence.db"
seq_conn = sqlite3.connect(f_seq_database)
# Build the lookup table
cmd = '''SELECT function_name,invariant_id FROM ref_invariant_integer
ORDER BY invariant_id'''
<|code_end|>
. Use current file imports:
(import sqlite3
import logging
import argparse
import collections
from src.helper_functions import grab_vector, grab_all)
and context including class names, function names, or small code snippets from other files:
# Path: src/helper_functions.py
# def grab_vector(connection, cmd, *args):
# return [x[0] for x in connection.execute(cmd, *args).fetchall()]
#
# def grab_all(connection, cmd, *args):
# return connection.execute(cmd, *args).fetchall()
. Output only the next line. | ref_lookup = dict(grab_all(seq_conn, cmd)) |
Given the following code snippet before the placeholder: <|code_start|>
desc = "Output the computed relations between the invariant sequences"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-f', '--force', default=False, action='store_true')
cargs = vars(parser.parse_args())
# Start the logger
logging.root.setLevel(logging.INFO)
ignored_functions = []
# Connect to the database and add structure info
f_seq_database = "database/sequence.db"
seq_conn = sqlite3.connect(f_seq_database, check_same_thread=False)
# Load the list of invariants to compute
options = load_options()
func_names = options["invariant_function_names"]
# These will use a different operator
special_conditionals = options["sequence_info"]["special_conditionals"]
# These variants will not be used in the powerset construction
excluded_terms = options["sequence_info"]["excluded_terms"]
# Build the lookup table
cmd = '''SELECT function_name,invariant_id FROM ref_invariant_integer
ORDER BY invariant_id'''
<|code_end|>
, predict the next line using imports from the current file:
import sqlite3
import logging
import argparse
from src.helper_functions import grab_all, load_options
and context including class names, function names, and sometimes code from other files:
# Path: src/helper_functions.py
# def grab_all(connection, cmd, *args):
# return connection.execute(cmd, *args).fetchall()
#
# def load_options(f_option_file="options_simple_connected.json"):
# # Load the file into a string
# try:
# with open(f_option_file) as FIN:
# raw_text = FIN.read()
# except:
# msg = "Couldn't find option file {}".format(f_option_file)
# raise IOError(msg)
#
# # Parse the text as json
# try:
# return json.loads(raw_text)
# except Exception as Ex:
# msg = "Couldn't parse JSON file {}, {}".format(f_option_file, Ex)
# raise IOError(msg)
. Output only the next line. | ref_lookup = dict(grab_all(seq_conn, cmd)) |
Predict the next line after this snippet: <|code_start|>
desc = "Output the computed relations between the invariant sequences"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-f', '--force', default=False, action='store_true')
cargs = vars(parser.parse_args())
# Start the logger
logging.root.setLevel(logging.INFO)
ignored_functions = []
# Connect to the database and add structure info
f_seq_database = "database/sequence.db"
seq_conn = sqlite3.connect(f_seq_database, check_same_thread=False)
# Load the list of invariants to compute
<|code_end|>
using the current file's imports:
import sqlite3
import logging
import argparse
from src.helper_functions import grab_all, load_options
and any relevant context from other files:
# Path: src/helper_functions.py
# def grab_all(connection, cmd, *args):
# return connection.execute(cmd, *args).fetchall()
#
# def load_options(f_option_file="options_simple_connected.json"):
# # Load the file into a string
# try:
# with open(f_option_file) as FIN:
# raw_text = FIN.read()
# except:
# msg = "Couldn't find option file {}".format(f_option_file)
# raise IOError(msg)
#
# # Parse the text as json
# try:
# return json.loads(raw_text)
# except Exception as Ex:
# msg = "Couldn't parse JSON file {}, {}".format(f_option_file, Ex)
# raise IOError(msg)
. Output only the next line. | options = load_options() |
Continue the code snippet: <|code_start|>
def is_manager(user):
return 'manager' in (s.lower() for s in user.groups.values_list('name', flat=True))
class SprintView(DetailView):
template_name = 'scrum/sprint.html'
model = Sprint
def get_context_data(self, **kwargs):
context = super(SprintView, self).get_context_data(**kwargs)
sprint = kwargs.get('object')
#Get all tasks
sprint_tasks = sprint.tasks.all()
#Set template's data
context['sprint'] = sprint
context['todos'] = sprint_tasks.filter(status='TO')
context['doings'] = sprint_tasks.filter(status='IN')
context['dones'] = sprint_tasks.filter(status='DO')
context['bugs'] = sprint_tasks.filter(status='PR')
context['backlogs'] = sprint_tasks.filter(status='BA')
#Can close a sprint if the user is a manager, if the sprint is not closed and if there is at least one task
context['can_close_sprint'] = is_manager(self.request.user) and not sprint.is_closed and sprint.tasks.count() > 0
return context
class ProjectListView(ListView):
<|code_end|>
. Use current file imports:
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json
and context (classes, functions, or code) from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
. Output only the next line. | model = Project |
Given the code snippet: <|code_start|> **response_kwargs
)
def add_story(request, pk_project):
if request.method == 'POST':
response_data = {}
#Add the project to post values
post_values = request.POST.copy()
post_values['project'] = pk_project
#Call form with post values
f = StoryForm(post_values)
if f.is_valid():
new_story = f.save()
response_data['story_pk'] = new_story.pk
else:
print f.errors
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
raise Http404
def update_story(request, pk_story):
if request.method == 'POST':
response_data = {}
if pk_story:
<|code_end|>
, generate the next line using the imports in this file:
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json
and context (functions, classes, or occasionally code) from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
. Output only the next line. | story = Story.objects.get(pk=pk_story) |
Predict the next line for this snippet: <|code_start|> sprint_tasks = sprint.tasks.all()
#Set template's data
context['sprint'] = sprint
context['todos'] = sprint_tasks.filter(status='TO')
context['doings'] = sprint_tasks.filter(status='IN')
context['dones'] = sprint_tasks.filter(status='DO')
context['bugs'] = sprint_tasks.filter(status='PR')
context['backlogs'] = sprint_tasks.filter(status='BA')
#Can close a sprint if the user is a manager, if the sprint is not closed and if there is at least one task
context['can_close_sprint'] = is_manager(self.request.user) and not sprint.is_closed and sprint.tasks.count() > 0
return context
class ProjectListView(ListView):
model = Project
class WhiteBoardView(DetailView):
model = Project
def get_context_data(self, **kwargs):
context = super(WhiteBoardView, self).get_context_data(**kwargs)
#Get project's stories
project = kwargs.get('object')
#project_stories = Story.objects.filter(project=project)
#Get project's tasks
<|code_end|>
with the help of current file imports:
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json
and context from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
, which may contain function names, class names, or code. Output only the next line. | project_tasks = Task.objects.unassigned().filter(story__project_id=project.id) |
Given snippet: <|code_start|>
def is_manager(user):
return 'manager' in (s.lower() for s in user.groups.values_list('name', flat=True))
class SprintView(DetailView):
template_name = 'scrum/sprint.html'
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json
and context:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
which might include code, classes, or functions. Output only the next line. | model = Sprint |
Predict the next line after this snippet: <|code_start|> #Set template's data
context['sprint'] = sprint
context['todos'] = sprint_tasks.filter(status='TO')
context['doings'] = sprint_tasks.filter(status='IN')
context['dones'] = sprint_tasks.filter(status='DO')
context['bugs'] = sprint_tasks.filter(status='PR')
context['backlogs'] = sprint_tasks.filter(status='BA')
#Can close a sprint if the user is a manager, if the sprint is not closed and if there is at least one task
context['can_close_sprint'] = is_manager(self.request.user) and not sprint.is_closed and sprint.tasks.count() > 0
return context
class ProjectListView(ListView):
model = Project
class WhiteBoardView(DetailView):
model = Project
def get_context_data(self, **kwargs):
context = super(WhiteBoardView, self).get_context_data(**kwargs)
#Get project's stories
project = kwargs.get('object')
#project_stories = Story.objects.filter(project=project)
#Get project's tasks
project_tasks = Task.objects.unassigned().filter(story__project_id=project.id)
#Get project's tasks with a status backlog in a sprint with a end status backlog
<|code_end|>
using the current file's imports:
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json
and any relevant context from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
. Output only the next line. | project_tasks_backlog = Task.objects.filter(pk__in=SprintTasks.objects.filter(task_end_status='BA', task__status='BA', task__story__project_id=project.id).values_list('task', flat=True)) |
Predict the next line for this snippet: <|code_start|> if first_name or last_name:
response_data['full_name'] = ' '.join(filter(None, (first_name, last_name)))
else:
response_data['full_name'] = request.user.username
print response_data['full_name']
else:
print f.errors
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
raise Http404
def add_project(request):
if request.method == 'GET':
newProject = Project.objects.create()
return HttpResponseRedirect(reverse('project', args=(newProject.pk,)))
else:
raise Http404
def update_project(request, pk_project):
if request.method == 'POST':
response_data = {}
if pk_project:
project = Project.objects.get(pk=pk_project)
#Call form with post values and instance
<|code_end|>
with the help of current file imports:
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json
and context from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
, which may contain function names, class names, or code. Output only the next line. | f = ProjectForm(request.POST, instance=project) |
Next line prediction: <|code_start|> context['tasks'] = project_tasks
context['backlogs'] = project_tasks_backlog
return context
def render_to_response(self, context, **response_kwargs):
#If the user is not a manager
if is_manager(self.request.user):
#Display whiteboard in read only
self.template_name = 'scrum/whiteboard.html'
else:
self.template_name = 'scrum/whiteboard_read.html'
return self.response_class(
request=self.request,
template=self.template_name,
context=context,
**response_kwargs
)
def add_story(request, pk_project):
if request.method == 'POST':
response_data = {}
#Add the project to post values
post_values = request.POST.copy()
post_values['project'] = pk_project
#Call form with post values
<|code_end|>
. Use current file imports:
(from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json)
and context including class names, function names, or small code snippets from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
. Output only the next line. | f = StoryForm(post_values) |
Given snippet: <|code_start|>
if pk_story:
story = Story.objects.get(pk=pk_story)
#Add the project to post values
post_values = request.POST.copy()
post_values['project'] = story.project.pk
#Call form with post values and instance
f = StoryForm(post_values, instance=story)
if f.is_valid():
story = f.save()
response_data['story_pk'] = story.pk
else:
print f.errors
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
raise Http404
def add_task(request, pk_project):
if request.method == 'POST':
response_data = {}
post_values = request.POST.copy()
post_values['project'] = pk_project
post_values['status'] = 'TO'
#Call form with post values
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json
and context:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
which might include code, classes, or functions. Output only the next line. | f = TaskForm(post_values) |
Using the snippet: <|code_start|>def update_project(request, pk_project):
if request.method == 'POST':
response_data = {}
if pk_project:
project = Project.objects.get(pk=pk_project)
#Call form with post values and instance
f = ProjectForm(request.POST, instance=project)
if f.is_valid():
project = f.save()
response_data['project_pk'] = project.pk
else:
print f.errors
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
raise Http404
def add_sprint_task(request):
if request.method == 'POST':
post_data = {}
#Add the task, sprint and task_end_status to post values
post_data['task_end_status'] = 'DO'
post_data['sprint'] = request.POST.get('sprint', None)
post_data['task'] = request.POST.get('task', None)
#Call form with post values
<|code_end|>
, determine the next line of code. You have imports:
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from .models import Project, Story, Task, Sprint, SprintTasks
from .forms import ProjectForm, StoryForm, TaskForm, SprintTasksForm
import json
and context (class names, function names, or code) available:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
#
# Path: scrum/forms.py
# class ProjectForm(ModelForm):
# class Meta:
# model = Project
#
# class StoryForm(ModelForm):
# class Meta:
# model = Story
#
# class TaskForm(ModelForm):
# class Meta:
# model = Task
#
# class SprintTasksForm(ModelForm):
# class Meta:
# model = SprintTasks
. Output only the next line. | f = SprintTasksForm(post_data) |
Predict the next line for this snippet: <|code_start|>
class ProjectForm(ModelForm):
class Meta:
model = Project
class StoryForm(ModelForm):
class Meta:
<|code_end|>
with the help of current file imports:
from django.forms import ModelForm
from .models import Project, Story, Task, Sprint, SprintTasks
and context from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
, which may contain function names, class names, or code. Output only the next line. | model = Story
|
Using the snippet: <|code_start|>
class ProjectForm(ModelForm):
class Meta:
model = Project
class StoryForm(ModelForm):
class Meta:
model = Story
class TaskForm(ModelForm):
class Meta:
<|code_end|>
, determine the next line of code. You have imports:
from django.forms import ModelForm
from .models import Project, Story, Task, Sprint, SprintTasks
and context (class names, function names, or code) available:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
. Output only the next line. | model = Task
|
Given the code snippet: <|code_start|>
class ProjectForm(ModelForm):
class Meta:
model = Project
class StoryForm(ModelForm):
class Meta:
model = Story
class TaskForm(ModelForm):
class Meta:
model = Task
class SprintForm(ModelForm):
class Meta:
<|code_end|>
, generate the next line using the imports in this file:
from django.forms import ModelForm
from .models import Project, Story, Task, Sprint, SprintTasks
and context (functions, classes, or occasionally code) from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
. Output only the next line. | model = Sprint
|
Given the code snippet: <|code_start|>
class ProjectForm(ModelForm):
class Meta:
model = Project
class StoryForm(ModelForm):
class Meta:
model = Story
class TaskForm(ModelForm):
class Meta:
model = Task
class SprintForm(ModelForm):
class Meta:
model = Sprint
class SprintTasksForm(ModelForm):
class Meta:
<|code_end|>
, generate the next line using the imports in this file:
from django.forms import ModelForm
from .models import Project, Story, Task, Sprint, SprintTasks
and context (functions, classes, or occasionally code) from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
. Output only the next line. | model = SprintTasks
|
Predict the next line after this snippet: <|code_start|>
class ImageInline(admin.StackedInline):
model = SprintTasks
class ProjectAdmin(admin.ModelAdmin):
<|code_end|>
using the current file's imports:
from django.contrib import admin
from .models import Project, Story, Task, Sprint, SprintTasks
from django.db import models
from django.contrib.admin.widgets import FilteredSelectMultiple
and any relevant context from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
. Output only the next line. | model = Project
|
Predict the next line after this snippet: <|code_start|>
class ImageInline(admin.StackedInline):
model = SprintTasks
class ProjectAdmin(admin.ModelAdmin):
model = Project
class StoryAdmin(admin.ModelAdmin):
<|code_end|>
using the current file's imports:
from django.contrib import admin
from .models import Project, Story, Task, Sprint, SprintTasks
from django.db import models
from django.contrib.admin.widgets import FilteredSelectMultiple
and any relevant context from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
. Output only the next line. | model = Story
|
Using the snippet: <|code_start|>
class ImageInline(admin.StackedInline):
model = SprintTasks
class ProjectAdmin(admin.ModelAdmin):
model = Project
class StoryAdmin(admin.ModelAdmin):
model = Story
class TaskAdmin(admin.ModelAdmin):
<|code_end|>
, determine the next line of code. You have imports:
from django.contrib import admin
from .models import Project, Story, Task, Sprint, SprintTasks
from django.db import models
from django.contrib.admin.widgets import FilteredSelectMultiple
and context (class names, function names, or code) available:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
. Output only the next line. | model = Task
|
Here is a snippet: <|code_start|>
class ImageInline(admin.StackedInline):
model = SprintTasks
class ProjectAdmin(admin.ModelAdmin):
model = Project
class StoryAdmin(admin.ModelAdmin):
model = Story
class TaskAdmin(admin.ModelAdmin):
model = Task
class SprintAdmin(admin.ModelAdmin):
<|code_end|>
. Write the next line using the current file imports:
from django.contrib import admin
from .models import Project, Story, Task, Sprint, SprintTasks
from django.db import models
from django.contrib.admin.widgets import FilteredSelectMultiple
and context from other files:
# Path: scrum/models.py
# class Project(models.Model):
# name = models.CharField(max_length=255, blank=True)
# description = models.TextField(blank=True)
# users = models.ManyToManyField(User, related_name='Project_users', blank=True, null=True)
#
# class Meta:
# ordering = ('id',)
#
# def __unicode__(self):
# return "%s" % (self.name, )
#
# class Story(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# last_modified = models.DateTimeField(auto_now=True)
# project = models.ForeignKey(Project, related_name='stories')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(5)])
#
# class Meta:
# ordering = ('title',)
#
# def __unicode__(self):
# return "%s" % (self.title, )
#
# class Task(models.Model):
# title = models.CharField(max_length=255)
# note = models.TextField(blank=True)
# status = models.CharField(max_length=2, choices=TASKS_STATUS)
# last_modified = models.DateTimeField(auto_now=True)
# assigned_to = models.ForeignKey(User, related_name='Task_users', blank=True, null=True)
# story = models.ForeignKey(Story, related_name='Task_story')
# estimated_time = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(8)])
# objects = TaskManager()
#
# class Meta:
# ordering = ('id',)
# verbose_name = "Task"
#
# def __unicode__(self):
# return "%s" % (self.title,)
#
# class Sprint(models.Model):
# number = models.IntegerField()
# is_closed = models.BooleanField()
# project = models.ForeignKey(Project, related_name='sprints')
# last_modified = models.DateTimeField(auto_now=True)
# tasks = models.ManyToManyField(Task, through='SprintTasks', blank=True, null=True)
# objects = SprintManager()
#
# class Meta:
# ordering = ('number',)
# unique_together = ('number', 'project')
#
# def __unicode__(self):
# return "sprint %s" % (self.number, )
#
# def save(self, *args, **kwargs):
# if self.id is None:
# self.number = (Sprint.objects.filter(project_id=self.project_id).aggregate(Max('number'))['number__max'] or 0) + 1
#
# super(Sprint, self).save(*args, **kwargs)
#
# class SprintTasks(models.Model):
# sprint = models.ForeignKey(Sprint, related_name='sprints')
# task = models.ForeignKey(Task, related_name='tasks')
# task_end_status = models.CharField(max_length=2, choices=TASKS_STATUS)
#
# class Meta:
# unique_together = ('sprint', 'task')
, which may include functions, classes, or code. Output only the next line. | model = Sprint
|
Next line prediction: <|code_start|>__author__ = 'chenzhao'
REST_PREFIX = '/rest'
class ReSTManager(object):
rest_models = []
@classmethod
def rest_url_get_single(cls, model_obj):
return '%s/%s/%d' % (REST_PREFIX, model_obj.urlname(), model_obj.id)
@classmethod
def init_rest(cls, app):
for model_cls in globals().values():
if inspect.isclass(model_cls) and issubclass(model_cls, db.Model):
# print model_cls
<|code_end|>
. Use current file imports:
(import inspect
from flask.ext import restless
from gmission.models import *
from .base import ReSTBase
from gmission.flask_app import db, app)
and context including class names, function names, or small code snippets from other files:
# Path: hkust-gmission/gmission/rest/base.py
# class ReSTBase(object):
# @classmethod
# def universal_before_post(cls, data):
# # print 'Universal before_post'
# data.pop('id', None)
#
# @classmethod
# def universal_after_get_many(cls, result=None, search_params=None, **kwargs):
# if request.method == 'HEAD':
# if result is not None:
# result.pop('objects', [])
# pass
#
# @classmethod
# @jwt_auth()
# def check_user_token(cls, **kw):
# return True
#
# @classmethod
# def processor_name_mapping(cls, prefix):
# exclude_list = []
# processors = {}
# processors_fields = ['GET_SINGLE', 'GET_MANY', 'PATCH_SINGLE', 'PATCH_MANY', 'PUT_SINGLE', 'PUT_MANY', 'POST',
# 'DELETE']
# for raw_method in inspect.getmembers(cls, predicate=inspect.ismethod):
# name, method = raw_method
# if name.startswith(prefix):
# processors[name[len(prefix) + 1:].upper()] = [method.__get__(cls), ]
#
# # if cls.__name__ not in exclude_list and prefix == 'before':
# # for key in processors_fields:
# # preprocessor = processors.get(key, [])
# # preprocessor.insert(0, cls.check_user_token)
# # processors[key] = preprocessor
# return processors
#
# @classmethod
# def universal_preprocessors(cls):
# prefix = 'universal_before'
# return ReSTBase.processor_name_mapping(prefix)
#
# @classmethod
# def universal_postprocessors(cls):
# prefix = 'universal_after'
# return ReSTBase.processor_name_mapping(prefix)
#
# @classmethod
# def rest_preprocessors(cls):
# prefix = 'before'
# return cls.processor_name_mapping(prefix)
#
# @classmethod
# def rest_postprocessors(cls):
# prefix = 'after'
# return cls.processor_name_mapping(prefix)
#
# @classmethod
# def rest_exclude_columns(cls):
# # r = [cln for cln in cls.__mapper__.columns if isinstance(cln, db.RelationshipProperty)]
# return [str(r).split('.')[1] for r in cls.__mapper__.relationships]
. Output only the next line. | if model_cls not in ReSTBase.__subclasses__(): |
Continue the code snippet: <|code_start|>#
# Mini-NDN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mini-NDN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mini-NDN, e.g., in COPYING.md file.
# If not, see <http://www.gnu.org/licenses/>.
class Application(object):
def __init__(self, node):
self.node = node
self.process = None
self.logfile = None
self.homeDir = self.node.params['params']['homeDir']
# Make directory for log file
self.logDir = '{}/log'.format(self.homeDir)
self.node.cmd('mkdir -p {}'.format(self.logDir))
def start(self, command, logfile, envDict=None):
if self.process is None:
self.logfile = open('{}/{}'.format(self.logDir, logfile), 'w')
<|code_end|>
. Use current file imports:
from minindn.util import getPopen
and context (classes, functions, or code) from other files:
# Path: minindn/util.py
# def getPopen(host, cmd, envDict=None, **params):
# return host.popen(cmd, cwd=host.params['params']['homeDir'],
# env=popenGetEnv(host, envDict), **params)
. Output only the next line. | self.process = getPopen(self.node, command.split(), envDict, |
Based on the snippet: <|code_start|> else:
plt.show()
return pos
def plot_rule(rule, filename=None, title=None):
"""Plot ReGraph's rule object.
This function plots a rule object, it produces three
separate plots: for the left-hand side of the rule,
preserved part and the right-hand side, where the two
homomorphsisms p->lhs, p->rhs are encoded with colors
of nodes.
Parameters
----------
rule : regraph.rules.Rule
Rule object to plot
filename : str, optional
Path to file to save the plot
"""
fig = plt.figure(figsize=(14, 3))
if title is not None:
st = fig.suptitle(title, fontsize=14)
# generate colors
p_colors_dict = {}
p_colors = []
for node in rule.p.nodes():
lhs_node = rule.p_lhs[node]
<|code_end|>
, predict the immediate next line with the help of imports:
import copy
import networkx as nx
import numpy as np
from regraph.utils import keys_by_value
from matplotlib import pyplot as plt
and context (classes, functions, sometimes code) from other files:
# Path: regraph/utils.py
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
. Output only the next line. | all_p_keys = keys_by_value(rule.p_lhs, lhs_node) |
Given the following code snippet before the placeholder: <|code_start|> ----------
node_list : iterable
Iterable containing a collection of nodes, optionally,
with their attributes
"""
for n in node_list:
if type(n) != str:
try:
node_id, node_attrs = n
self.add_node(node_id, node_attrs)
except (TypeError, ValueError):
self.add_node(n)
else:
self.add_node(n)
def add_edges_from(self, edge_list):
"""Add edges from an edge list.
Parameters
----------
edge_list : iterable
Iterable containing a collection of edges, optionally,
with their attributes
"""
for e in edge_list:
if len(e) == 2:
self.add_edge(e[0], e[1])
elif len(e) == 3:
self.add_edge(e[0], e[1], e[2])
else:
<|code_end|>
, predict the next line using imports from the current file:
import json
import os
import warnings
from abc import ABC, abstractmethod
from regraph.exceptions import (ReGraphError,
GraphError,
GraphAttrsWarning,
)
from regraph.utils import (load_nodes_from_json,
load_edges_from_json,
generate_new_id,
normalize_attrs,
safe_deepcopy_dict,
set_attrs,
add_attrs,
remove_attrs,
merge_attributes,
keys_by_value,
)
and context including class names, function names, and sometimes code from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class GraphError(ReGraphException):
# """Class for errors in graph transformation with primitives."""
#
# class GraphAttrsWarning(ReGraphWarning):
# """Class for warnings about empty attrs dict."""
#
# Path: regraph/utils.py
# def load_nodes_from_json(j_data):
# """Load nodes from json-like dict."""
# loaded_nodes = []
# if "nodes" in j_data.keys():
# j_nodes = j_data["nodes"]
# for node in j_nodes:
# if "id" in node.keys():
# node_id = node["id"]
# else:
# raise ReGraphError(
# "Error loading graph: node id is not specified!")
# attrs = None
# if "attrs" in node.keys():
# attrs = json_dict_to_attrs(node["attrs"])
# loaded_nodes.append((node_id, attrs))
# else:
# raise ReGraphError(
# "Error loading graph: no nodes specified!")
# return loaded_nodes
#
# def load_edges_from_json(j_data):
# """Load edges from json-like dict."""
# loaded_edges = []
# if "edges" in j_data.keys():
# j_edges = j_data["edges"]
# for edge in j_edges:
# if "from" in edge.keys():
# s_node = edge["from"]
# else:
# raise ReGraphError(
# "Error loading graph: edge source is not specified!")
# if "to" in edge.keys():
# t_node = edge["to"]
# else:
# raise ReGraphError(
# "Error loading graph: edge target is not specified!")
# if "attrs" in edge.keys():
# attrs = json_dict_to_attrs(edge["attrs"])
# loaded_edges.append((s_node, t_node, attrs))
# else:
# loaded_edges.append((s_node, t_node))
# return loaded_edges
#
# def generate_new_id(collection, basename):
# """Generate unique id for a node."""
# node_id = basename
# i = 1
# while node_id in collection:
# node_id = "{}_{}".format(basename, i)
# i += 1
# return node_id
#
# def normalize_attrs(attrs):
# """Normalize node attributes."""
# if attrs is not None:
# for k, v in list(attrs.items()):
# if not isinstance(v, AttributeSet):
# attrs[k] = FiniteSet(v)
# if attrs[k].is_empty():
# del attrs[k]
# return
#
# def safe_deepcopy_dict(d):
# """Util for safe deepcopy of a dict.
#
# Solves the issue with 'TypeError: can't pickle dict_items objects'
# of the default 'copy.deepcopy'.
# """
# try:
# new_d = copy.deepcopy(d)
# except TypeError:
# new_d = dict()
# for k, v in d.items():
# new_d[k] = copy.deepcopy(list(v.items()))
# return new_d
#
# def set_attrs(old_attrs, attrs, normalize=True, update=True):
# if normalize:
# normalize_attrs(attrs)
#
# for key in attrs:
# old_attrs[key] = attrs[key]
# if update:
# keys_to_remove = set()
# for key in old_attrs:
# if key not in attrs:
# keys_to_remove.add(key)
# for key in keys_to_remove:
# del old_attrs[key]
# return old_attrs
#
# def add_attrs(old_attrs, attrs, normalize=True):
# if normalize:
# normalize_attrs(attrs)
# for key in attrs:
# if key in old_attrs:
# old_attrs[key] = old_attrs[key].union(attrs[key])
# else:
# old_attrs[key] = attrs[key]
#
# def remove_attrs(old_attrs, attrs, normalize=True):
# if normalize:
# normalize_attrs(attrs)
# for key, value in attrs.items():
# if key in old_attrs:
# new_set = old_attrs[key].difference(value)
# if not new_set:
# del old_attrs[key]
# else:
# old_attrs[key] = new_set
#
# def merge_attributes(attr1, attr2, method="union"):
# """Merge two dictionaries of attributes."""
# if method == "union":
# return attrs_union(attr1, attr2)
# elif method == "intersection":
# return attrs_intersection(attr1, attr2)
# else:
# raise ReGraphError("Merging method %s is not defined!" % method)
#
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
. Output only the next line. | raise ReGraphError( |
Predict the next line after this snippet: <|code_start|> s : hashable
Source node id.
t : hashable
Target node id.
"""
# print("\n\n\n\n\n\n\n\n\n\n", s, t, self.edges())
return((s, t) in self.edges())
def set_node_attrs(self, node_id, attrs, normalize=True, update=True):
"""Set node attrs.
Parameters
----------
node_id : hashable
Id of the node to update
attrs : dict
Dictionary with new attributes to set
normalize : bool, optional
Flag, when set to True attributes are normalized to be set-valued.
True by default
update : bool, optional
Flag, when set to True attributes whose keys are not present
in attrs are removed, True by default
Raises
------
GraphError
If a node `node_id` does not exist.
"""
if node_id not in self.nodes():
<|code_end|>
using the current file's imports:
import json
import os
import warnings
from abc import ABC, abstractmethod
from regraph.exceptions import (ReGraphError,
GraphError,
GraphAttrsWarning,
)
from regraph.utils import (load_nodes_from_json,
load_edges_from_json,
generate_new_id,
normalize_attrs,
safe_deepcopy_dict,
set_attrs,
add_attrs,
remove_attrs,
merge_attributes,
keys_by_value,
)
and any relevant context from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class GraphError(ReGraphException):
# """Class for errors in graph transformation with primitives."""
#
# class GraphAttrsWarning(ReGraphWarning):
# """Class for warnings about empty attrs dict."""
#
# Path: regraph/utils.py
# def load_nodes_from_json(j_data):
# """Load nodes from json-like dict."""
# loaded_nodes = []
# if "nodes" in j_data.keys():
# j_nodes = j_data["nodes"]
# for node in j_nodes:
# if "id" in node.keys():
# node_id = node["id"]
# else:
# raise ReGraphError(
# "Error loading graph: node id is not specified!")
# attrs = None
# if "attrs" in node.keys():
# attrs = json_dict_to_attrs(node["attrs"])
# loaded_nodes.append((node_id, attrs))
# else:
# raise ReGraphError(
# "Error loading graph: no nodes specified!")
# return loaded_nodes
#
# def load_edges_from_json(j_data):
# """Load edges from json-like dict."""
# loaded_edges = []
# if "edges" in j_data.keys():
# j_edges = j_data["edges"]
# for edge in j_edges:
# if "from" in edge.keys():
# s_node = edge["from"]
# else:
# raise ReGraphError(
# "Error loading graph: edge source is not specified!")
# if "to" in edge.keys():
# t_node = edge["to"]
# else:
# raise ReGraphError(
# "Error loading graph: edge target is not specified!")
# if "attrs" in edge.keys():
# attrs = json_dict_to_attrs(edge["attrs"])
# loaded_edges.append((s_node, t_node, attrs))
# else:
# loaded_edges.append((s_node, t_node))
# return loaded_edges
#
# def generate_new_id(collection, basename):
# """Generate unique id for a node."""
# node_id = basename
# i = 1
# while node_id in collection:
# node_id = "{}_{}".format(basename, i)
# i += 1
# return node_id
#
# def normalize_attrs(attrs):
# """Normalize node attributes."""
# if attrs is not None:
# for k, v in list(attrs.items()):
# if not isinstance(v, AttributeSet):
# attrs[k] = FiniteSet(v)
# if attrs[k].is_empty():
# del attrs[k]
# return
#
# def safe_deepcopy_dict(d):
# """Util for safe deepcopy of a dict.
#
# Solves the issue with 'TypeError: can't pickle dict_items objects'
# of the default 'copy.deepcopy'.
# """
# try:
# new_d = copy.deepcopy(d)
# except TypeError:
# new_d = dict()
# for k, v in d.items():
# new_d[k] = copy.deepcopy(list(v.items()))
# return new_d
#
# def set_attrs(old_attrs, attrs, normalize=True, update=True):
# if normalize:
# normalize_attrs(attrs)
#
# for key in attrs:
# old_attrs[key] = attrs[key]
# if update:
# keys_to_remove = set()
# for key in old_attrs:
# if key not in attrs:
# keys_to_remove.add(key)
# for key in keys_to_remove:
# del old_attrs[key]
# return old_attrs
#
# def add_attrs(old_attrs, attrs, normalize=True):
# if normalize:
# normalize_attrs(attrs)
# for key in attrs:
# if key in old_attrs:
# old_attrs[key] = old_attrs[key].union(attrs[key])
# else:
# old_attrs[key] = attrs[key]
#
# def remove_attrs(old_attrs, attrs, normalize=True):
# if normalize:
# normalize_attrs(attrs)
# for key, value in attrs.items():
# if key in old_attrs:
# new_set = old_attrs[key].difference(value)
# if not new_set:
# del old_attrs[key]
# else:
# old_attrs[key] = new_set
#
# def merge_attributes(attr1, attr2, method="union"):
# """Merge two dictionaries of attributes."""
# if method == "union":
# return attrs_union(attr1, attr2)
# elif method == "intersection":
# return attrs_intersection(attr1, attr2)
# else:
# raise ReGraphError("Merging method %s is not defined!" % method)
#
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
. Output only the next line. | raise GraphError("Node '{}' does not exist!".format(node_id)) |
Continue the code snippet: <|code_start|> edge_data[key] = list(edge_attrs[key])
j_data["links"].append(edge_data)
return j_data
def export(self, filename):
"""Export graph to JSON file.
Parameters
----------
filename : str
Name of the file to save the json serialization of the graph
"""
with open(filename, 'w') as f:
j_data = self.to_json()
json.dump(j_data, f)
return
@classmethod
def from_json(cls, json_data):
"""Create a NetworkX graph from a json-like dictionary.
Parameters
----------
json_data : dict
JSON-like dictionary with graph representation
"""
graph = cls()
<|code_end|>
. Use current file imports:
import json
import os
import warnings
from abc import ABC, abstractmethod
from regraph.exceptions import (ReGraphError,
GraphError,
GraphAttrsWarning,
)
from regraph.utils import (load_nodes_from_json,
load_edges_from_json,
generate_new_id,
normalize_attrs,
safe_deepcopy_dict,
set_attrs,
add_attrs,
remove_attrs,
merge_attributes,
keys_by_value,
)
and context (classes, functions, or code) from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class GraphError(ReGraphException):
# """Class for errors in graph transformation with primitives."""
#
# class GraphAttrsWarning(ReGraphWarning):
# """Class for warnings about empty attrs dict."""
#
# Path: regraph/utils.py
# def load_nodes_from_json(j_data):
# """Load nodes from json-like dict."""
# loaded_nodes = []
# if "nodes" in j_data.keys():
# j_nodes = j_data["nodes"]
# for node in j_nodes:
# if "id" in node.keys():
# node_id = node["id"]
# else:
# raise ReGraphError(
# "Error loading graph: node id is not specified!")
# attrs = None
# if "attrs" in node.keys():
# attrs = json_dict_to_attrs(node["attrs"])
# loaded_nodes.append((node_id, attrs))
# else:
# raise ReGraphError(
# "Error loading graph: no nodes specified!")
# return loaded_nodes
#
# def load_edges_from_json(j_data):
# """Load edges from json-like dict."""
# loaded_edges = []
# if "edges" in j_data.keys():
# j_edges = j_data["edges"]
# for edge in j_edges:
# if "from" in edge.keys():
# s_node = edge["from"]
# else:
# raise ReGraphError(
# "Error loading graph: edge source is not specified!")
# if "to" in edge.keys():
# t_node = edge["to"]
# else:
# raise ReGraphError(
# "Error loading graph: edge target is not specified!")
# if "attrs" in edge.keys():
# attrs = json_dict_to_attrs(edge["attrs"])
# loaded_edges.append((s_node, t_node, attrs))
# else:
# loaded_edges.append((s_node, t_node))
# return loaded_edges
#
# def generate_new_id(collection, basename):
# """Generate unique id for a node."""
# node_id = basename
# i = 1
# while node_id in collection:
# node_id = "{}_{}".format(basename, i)
# i += 1
# return node_id
#
# def normalize_attrs(attrs):
# """Normalize node attributes."""
# if attrs is not None:
# for k, v in list(attrs.items()):
# if not isinstance(v, AttributeSet):
# attrs[k] = FiniteSet(v)
# if attrs[k].is_empty():
# del attrs[k]
# return
#
# def safe_deepcopy_dict(d):
# """Util for safe deepcopy of a dict.
#
# Solves the issue with 'TypeError: can't pickle dict_items objects'
# of the default 'copy.deepcopy'.
# """
# try:
# new_d = copy.deepcopy(d)
# except TypeError:
# new_d = dict()
# for k, v in d.items():
# new_d[k] = copy.deepcopy(list(v.items()))
# return new_d
#
# def set_attrs(old_attrs, attrs, normalize=True, update=True):
# if normalize:
# normalize_attrs(attrs)
#
# for key in attrs:
# old_attrs[key] = attrs[key]
# if update:
# keys_to_remove = set()
# for key in old_attrs:
# if key not in attrs:
# keys_to_remove.add(key)
# for key in keys_to_remove:
# del old_attrs[key]
# return old_attrs
#
# def add_attrs(old_attrs, attrs, normalize=True):
# if normalize:
# normalize_attrs(attrs)
# for key in attrs:
# if key in old_attrs:
# old_attrs[key] = old_attrs[key].union(attrs[key])
# else:
# old_attrs[key] = attrs[key]
#
# def remove_attrs(old_attrs, attrs, normalize=True):
# if normalize:
# normalize_attrs(attrs)
# for key, value in attrs.items():
# if key in old_attrs:
# new_set = old_attrs[key].difference(value)
# if not new_set:
# del old_attrs[key]
# else:
# old_attrs[key] = new_set
#
# def merge_attributes(attr1, attr2, method="union"):
# """Merge two dictionaries of attributes."""
# if method == "union":
# return attrs_union(attr1, attr2)
# elif method == "intersection":
# return attrs_intersection(attr1, attr2)
# else:
# raise ReGraphError("Merging method %s is not defined!" % method)
#
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
. Output only the next line. | graph.add_nodes_from(load_nodes_from_json(json_data)) |
Here is a snippet: <|code_start|> j_data["links"].append(edge_data)
return j_data
def export(self, filename):
"""Export graph to JSON file.
Parameters
----------
filename : str
Name of the file to save the json serialization of the graph
"""
with open(filename, 'w') as f:
j_data = self.to_json()
json.dump(j_data, f)
return
@classmethod
def from_json(cls, json_data):
"""Create a NetworkX graph from a json-like dictionary.
Parameters
----------
json_data : dict
JSON-like dictionary with graph representation
"""
graph = cls()
graph.add_nodes_from(load_nodes_from_json(json_data))
<|code_end|>
. Write the next line using the current file imports:
import json
import os
import warnings
from abc import ABC, abstractmethod
from regraph.exceptions import (ReGraphError,
GraphError,
GraphAttrsWarning,
)
from regraph.utils import (load_nodes_from_json,
load_edges_from_json,
generate_new_id,
normalize_attrs,
safe_deepcopy_dict,
set_attrs,
add_attrs,
remove_attrs,
merge_attributes,
keys_by_value,
)
and context from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class GraphError(ReGraphException):
# """Class for errors in graph transformation with primitives."""
#
# class GraphAttrsWarning(ReGraphWarning):
# """Class for warnings about empty attrs dict."""
#
# Path: regraph/utils.py
# def load_nodes_from_json(j_data):
# """Load nodes from json-like dict."""
# loaded_nodes = []
# if "nodes" in j_data.keys():
# j_nodes = j_data["nodes"]
# for node in j_nodes:
# if "id" in node.keys():
# node_id = node["id"]
# else:
# raise ReGraphError(
# "Error loading graph: node id is not specified!")
# attrs = None
# if "attrs" in node.keys():
# attrs = json_dict_to_attrs(node["attrs"])
# loaded_nodes.append((node_id, attrs))
# else:
# raise ReGraphError(
# "Error loading graph: no nodes specified!")
# return loaded_nodes
#
# def load_edges_from_json(j_data):
# """Load edges from json-like dict."""
# loaded_edges = []
# if "edges" in j_data.keys():
# j_edges = j_data["edges"]
# for edge in j_edges:
# if "from" in edge.keys():
# s_node = edge["from"]
# else:
# raise ReGraphError(
# "Error loading graph: edge source is not specified!")
# if "to" in edge.keys():
# t_node = edge["to"]
# else:
# raise ReGraphError(
# "Error loading graph: edge target is not specified!")
# if "attrs" in edge.keys():
# attrs = json_dict_to_attrs(edge["attrs"])
# loaded_edges.append((s_node, t_node, attrs))
# else:
# loaded_edges.append((s_node, t_node))
# return loaded_edges
#
# def generate_new_id(collection, basename):
# """Generate unique id for a node."""
# node_id = basename
# i = 1
# while node_id in collection:
# node_id = "{}_{}".format(basename, i)
# i += 1
# return node_id
#
# def normalize_attrs(attrs):
# """Normalize node attributes."""
# if attrs is not None:
# for k, v in list(attrs.items()):
# if not isinstance(v, AttributeSet):
# attrs[k] = FiniteSet(v)
# if attrs[k].is_empty():
# del attrs[k]
# return
#
# def safe_deepcopy_dict(d):
# """Util for safe deepcopy of a dict.
#
# Solves the issue with 'TypeError: can't pickle dict_items objects'
# of the default 'copy.deepcopy'.
# """
# try:
# new_d = copy.deepcopy(d)
# except TypeError:
# new_d = dict()
# for k, v in d.items():
# new_d[k] = copy.deepcopy(list(v.items()))
# return new_d
#
# def set_attrs(old_attrs, attrs, normalize=True, update=True):
# if normalize:
# normalize_attrs(attrs)
#
# for key in attrs:
# old_attrs[key] = attrs[key]
# if update:
# keys_to_remove = set()
# for key in old_attrs:
# if key not in attrs:
# keys_to_remove.add(key)
# for key in keys_to_remove:
# del old_attrs[key]
# return old_attrs
#
# def add_attrs(old_attrs, attrs, normalize=True):
# if normalize:
# normalize_attrs(attrs)
# for key in attrs:
# if key in old_attrs:
# old_attrs[key] = old_attrs[key].union(attrs[key])
# else:
# old_attrs[key] = attrs[key]
#
# def remove_attrs(old_attrs, attrs, normalize=True):
# if normalize:
# normalize_attrs(attrs)
# for key, value in attrs.items():
# if key in old_attrs:
# new_set = old_attrs[key].difference(value)
# if not new_set:
# del old_attrs[key]
# else:
# old_attrs[key] = new_set
#
# def merge_attributes(attr1, attr2, method="union"):
# """Merge two dictionaries of attributes."""
# if method == "union":
# return attrs_union(attr1, attr2)
# elif method == "intersection":
# return attrs_intersection(attr1, attr2)
# else:
# raise ReGraphError("Merging method %s is not defined!" % method)
#
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
, which may include functions, classes, or code. Output only the next line. | graph.add_edges_from(load_edges_from_json(json_data)) |
Given the code snippet: <|code_start|>
try:
re._pattern_type = re.Pattern
except:
pass
def _hashify(d):
"""Hashify a dictionary to a list of tuples."""
result = []
for key, value in d.items():
if type(value) is dict:
result.append((key, _hashify(value)))
else:
result.append((key, value))
return tuple(result)
def _regex_to_string(a):
if isinstance(a, str):
return a
elif isinstance(a, re._pattern_type):
return a.pattern
elif isinstance(a, RegexSet):
if a.pattern is not None:
return a.pattern
else:
return None
else:
<|code_end|>
, generate the next line using the imports in this file:
import copy
import re
import numpy as np
import math
import sys
from greenery.lego import parse
from regraph.exceptions import AttributeSetError
and context (functions, classes, or occasionally code) from other files:
# Path: regraph/exceptions.py
# class AttributeSetError(ReGraphException):
# """Exceptions class for errors in attribute sets."""
. Output only the next line. | raise AttributeSetError("Cannot convert regex to string!") |
Given the following code snippet before the placeholder: <|code_start|> "Graph '%s' is not typed by '%s' specified "
"as a typing graph of the lhs of the rule." %
(graph_id, typing_graph))
def _check_rule_instance_typing(hierarchy, graph_id, rule, instance,
p_typing, rhs_typing, strict):
"""Check consistency of the input."""
lhs_typing = {}
# Autocomplete typings
lhs_typing, p_typing, rhs_typing =\
_autocomplete_typing(
hierarchy, graph_id,
instance=instance,
lhs_typing=lhs_typing,
p_typing=p_typing,
rhs_typing_rel=rhs_typing,
p_lhs=rule.p_lhs,
p_rhs=rule.p_rhs)
# Check the instance
_check_instance(
hierarchy, graph_id, rule.lhs, instance, lhs_typing)
# Check consistency of the (autocompleted) rhs/p/lhs typings
if lhs_typing is not None and rhs_typing is not None:
try:
_check_self_consistency(
hierarchy, lhs_typing)
<|code_end|>
, predict the next line using imports from the current file:
import networkx as nx
from networkx.exception import NetworkXNoPath
from regraph.exceptions import (ReGraphError,
RewritingError,
HierarchyError,
InvalidHomomorphism)
from regraph.utils import (keys_by_value,
format_typing,
normalize_typing_relation)
from regraph.networkx.category_utils import (check_homomorphism,
compose,
is_monic)
and context including class names, function names, and sometimes code from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class RewritingError(ReGraphException):
# """Exceptions class for errors in rewriting in a hierarchy."""
#
# class HierarchyError(ReGraphException):
# """Exceptions class for hierarchy handling."""
#
# class InvalidHomomorphism(ReGraphException):
# """Exceptions class for invalid homomorphisms."""
#
# Path: regraph/utils.py
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
#
# def format_typing(typing):
# if typing is None:
# typing = dict()
# new_typing = dict()
# for key, value in typing.items():
# if type(value) == dict:
# new_typing[key] = copy.deepcopy(value)
# else:
# try:
# if len(value) == 2:
# new_typing[key] = copy.deepcopy(value)
# elif len(value) == 1:
# new_typing[key] = copy.deepcopy(value[0])
# except:
# raise ReGraphError("Typing format is not valid!")
# return new_typing
#
# def normalize_typing_relation(typing_rel):
# new_typing_rel = format_typing(typing_rel)
# for g, typing_rel in new_typing_rel.items():
# for key, values in typing_rel.items():
# value_set = set()
# if type(values) == str:
# value_set.add(values)
# else:
# try:
# for v in values:
# value_set.add(v)
# except TypeError:
# value_set.add(values)
# if len(value_set) > 0:
# new_typing_rel[g][key] = value_set
# return new_typing_rel
. Output only the next line. | except ReGraphError as e: |
Predict the next line after this snippet: <|code_start|> if p_rhs[p_node] not in new_rhs_typing_rel[typing_graph].keys():
new_rhs_typing_rel[typing_graph][p_rhs[p_node]] = set()
new_rhs_typing_rel[typing_graph][p_rhs[p_node]].add(
new_lhs_typing[typing_graph][l_node])
# Second step of autocompletion of rhs typing
for graph, typing in new_rhs_typing_rel.items():
ancestors = hierarchy.get_descendants(graph)
for ancestor, ancestor_typing in ancestors.items():
dif = set(typing.keys()) -\
set(new_rhs_typing_rel[ancestor].keys())
for node in dif:
type_set = set()
for el in new_rhs_typing_rel[graph][node]:
type_set.add(ancestor_typing[el])
new_rhs_typing_rel[ancestor][node] = type_set
return (new_lhs_typing, new_p_typing, new_rhs_typing_rel)
def _check_self_consistency(hierarchy, typing, strict=True):
for typing_graph, mapping in typing.items():
ancestors = hierarchy.get_descendants(typing_graph)
for anc, anc_typing in ancestors.items():
if anc in typing.keys():
for key, value in mapping.items():
if key in typing[anc].keys():
if type(value) == str:
if value in anc_typing.keys() and\
anc_typing[value] != typing[anc][key]:
<|code_end|>
using the current file's imports:
import networkx as nx
from networkx.exception import NetworkXNoPath
from regraph.exceptions import (ReGraphError,
RewritingError,
HierarchyError,
InvalidHomomorphism)
from regraph.utils import (keys_by_value,
format_typing,
normalize_typing_relation)
from regraph.networkx.category_utils import (check_homomorphism,
compose,
is_monic)
and any relevant context from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class RewritingError(ReGraphException):
# """Exceptions class for errors in rewriting in a hierarchy."""
#
# class HierarchyError(ReGraphException):
# """Exceptions class for hierarchy handling."""
#
# class InvalidHomomorphism(ReGraphException):
# """Exceptions class for invalid homomorphisms."""
#
# Path: regraph/utils.py
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
#
# def format_typing(typing):
# if typing is None:
# typing = dict()
# new_typing = dict()
# for key, value in typing.items():
# if type(value) == dict:
# new_typing[key] = copy.deepcopy(value)
# else:
# try:
# if len(value) == 2:
# new_typing[key] = copy.deepcopy(value)
# elif len(value) == 1:
# new_typing[key] = copy.deepcopy(value[0])
# except:
# raise ReGraphError("Typing format is not valid!")
# return new_typing
#
# def normalize_typing_relation(typing_rel):
# new_typing_rel = format_typing(typing_rel)
# for g, typing_rel in new_typing_rel.items():
# for key, values in typing_rel.items():
# value_set = set()
# if type(values) == str:
# value_set.add(values)
# else:
# try:
# for v in values:
# value_set.add(v)
# except TypeError:
# value_set.add(values)
# if len(value_set) > 0:
# new_typing_rel[g][key] = value_set
# return new_typing_rel
. Output only the next line. | raise RewritingError( |
Using the snippet: <|code_start|>"""A collection of (internal usage) utils for rule type checking."""
def _check_rule_typing(hierarchy, rule_id, graph_id, lhs_mapping, rhs_mapping):
all_paths = dict(nx.all_pairs_shortest_path(hierarchy))
paths_from_target = {}
for s in hierarchy.nodes():
if s == graph_id:
for key in all_paths[graph_id].keys():
paths_from_target[key] = all_paths[graph_id][key]
for t in paths_from_target.keys():
if t != graph_id:
new_lhs_h = compose(
lhs_mapping,
hierarchy.compose_path_typing(paths_from_target[t]))
new_rhs_h = compose(
rhs_mapping,
hierarchy.compose_path_typing(paths_from_target[t]))
try:
# find homomorphisms from s to t via other paths
s_t_paths = nx.all_shortest_paths(hierarchy, rule_id, t)
for path in s_t_paths:
lhs_h, rhs_h = hierarchy.compose_path_typing(path)
if lhs_h != new_lhs_h:
<|code_end|>
, determine the next line of code. You have imports:
import networkx as nx
from networkx.exception import NetworkXNoPath
from regraph.exceptions import (ReGraphError,
RewritingError,
HierarchyError,
InvalidHomomorphism)
from regraph.utils import (keys_by_value,
format_typing,
normalize_typing_relation)
from regraph.networkx.category_utils import (check_homomorphism,
compose,
is_monic)
and context (class names, function names, or code) available:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class RewritingError(ReGraphException):
# """Exceptions class for errors in rewriting in a hierarchy."""
#
# class HierarchyError(ReGraphException):
# """Exceptions class for hierarchy handling."""
#
# class InvalidHomomorphism(ReGraphException):
# """Exceptions class for invalid homomorphisms."""
#
# Path: regraph/utils.py
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
#
# def format_typing(typing):
# if typing is None:
# typing = dict()
# new_typing = dict()
# for key, value in typing.items():
# if type(value) == dict:
# new_typing[key] = copy.deepcopy(value)
# else:
# try:
# if len(value) == 2:
# new_typing[key] = copy.deepcopy(value)
# elif len(value) == 1:
# new_typing[key] = copy.deepcopy(value[0])
# except:
# raise ReGraphError("Typing format is not valid!")
# return new_typing
#
# def normalize_typing_relation(typing_rel):
# new_typing_rel = format_typing(typing_rel)
# for g, typing_rel in new_typing_rel.items():
# for key, values in typing_rel.items():
# value_set = set()
# if type(values) == str:
# value_set.add(values)
# else:
# try:
# for v in values:
# value_set.add(v)
# except TypeError:
# value_set.add(values)
# if len(value_set) > 0:
# new_typing_rel[g][key] = value_set
# return new_typing_rel
. Output only the next line. | raise HierarchyError( |
Based on the snippet: <|code_start|> # node will be merged
all_untyped = True
for p_node in p_nodes:
if instance[rule.p_lhs[p_node]] in typing.keys():
all_untyped = False
break
if all_untyped:
continue
if typing_graph in rhs_typing.keys() and\
node in rhs_typing[typing_graph].keys():
continue
else:
raise RewritingError(
"Rewriting is strict (no propagation of types is "
"allowed), typing of the node `%s` "
"in rhs is required (typing by the following "
"graph stays unresolved: '%s')!" %
(node, typing_graph))
def _check_instance(hierarchy, graph_id, pattern, instance, pattern_typing):
# Check that the homomorphism is valid
try:
check_homomorphism(
pattern,
hierarchy.get_graph(graph_id),
instance,
total=True
)
<|code_end|>
, predict the immediate next line with the help of imports:
import networkx as nx
from networkx.exception import NetworkXNoPath
from regraph.exceptions import (ReGraphError,
RewritingError,
HierarchyError,
InvalidHomomorphism)
from regraph.utils import (keys_by_value,
format_typing,
normalize_typing_relation)
from regraph.networkx.category_utils import (check_homomorphism,
compose,
is_monic)
and context (classes, functions, sometimes code) from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class RewritingError(ReGraphException):
# """Exceptions class for errors in rewriting in a hierarchy."""
#
# class HierarchyError(ReGraphException):
# """Exceptions class for hierarchy handling."""
#
# class InvalidHomomorphism(ReGraphException):
# """Exceptions class for invalid homomorphisms."""
#
# Path: regraph/utils.py
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
#
# def format_typing(typing):
# if typing is None:
# typing = dict()
# new_typing = dict()
# for key, value in typing.items():
# if type(value) == dict:
# new_typing[key] = copy.deepcopy(value)
# else:
# try:
# if len(value) == 2:
# new_typing[key] = copy.deepcopy(value)
# elif len(value) == 1:
# new_typing[key] = copy.deepcopy(value[0])
# except:
# raise ReGraphError("Typing format is not valid!")
# return new_typing
#
# def normalize_typing_relation(typing_rel):
# new_typing_rel = format_typing(typing_rel)
# for g, typing_rel in new_typing_rel.items():
# for key, values in typing_rel.items():
# value_set = set()
# if type(values) == str:
# value_set.add(values)
# else:
# try:
# for v in values:
# value_set.add(v)
# except TypeError:
# value_set.add(values)
# if len(value_set) > 0:
# new_typing_rel[g][key] = value_set
# return new_typing_rel
. Output only the next line. | except InvalidHomomorphism as e: |
Next line prediction: <|code_start|> "Homomorphism does not commute with an " +
"existing path from '%s' to '%s'!" % (s, t)
)
except(nx.NetworkXNoPath):
pass
def _autocomplete_typing(hierarchy, graph_id, instance,
lhs_typing, p_typing, rhs_typing_rel, p_lhs, p_rhs):
if lhs_typing is None:
new_lhs_typing = dict()
else:
new_lhs_typing = format_typing(lhs_typing)
if p_typing is None:
new_p_typing = dict()
else:
new_p_typing = normalize_typing_relation(p_typing)
if rhs_typing_rel is None:
new_rhs_typing_rel = dict()
else:
new_rhs_typing_rel = normalize_typing_relation(rhs_typing_rel)
successors = list(hierarchy.successors(graph_id))
if len(successors) > 0:
ancestors = hierarchy.get_descendants(graph_id)
for anc, anc_typing in ancestors.items():
if anc not in new_rhs_typing_rel.keys():
new_rhs_typing_rel[anc] = dict()
merged_nodes = set()
for r_node in p_rhs.values():
<|code_end|>
. Use current file imports:
(import networkx as nx
from networkx.exception import NetworkXNoPath
from regraph.exceptions import (ReGraphError,
RewritingError,
HierarchyError,
InvalidHomomorphism)
from regraph.utils import (keys_by_value,
format_typing,
normalize_typing_relation)
from regraph.networkx.category_utils import (check_homomorphism,
compose,
is_monic))
and context including class names, function names, or small code snippets from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class RewritingError(ReGraphException):
# """Exceptions class for errors in rewriting in a hierarchy."""
#
# class HierarchyError(ReGraphException):
# """Exceptions class for hierarchy handling."""
#
# class InvalidHomomorphism(ReGraphException):
# """Exceptions class for invalid homomorphisms."""
#
# Path: regraph/utils.py
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
#
# def format_typing(typing):
# if typing is None:
# typing = dict()
# new_typing = dict()
# for key, value in typing.items():
# if type(value) == dict:
# new_typing[key] = copy.deepcopy(value)
# else:
# try:
# if len(value) == 2:
# new_typing[key] = copy.deepcopy(value)
# elif len(value) == 1:
# new_typing[key] = copy.deepcopy(value[0])
# except:
# raise ReGraphError("Typing format is not valid!")
# return new_typing
#
# def normalize_typing_relation(typing_rel):
# new_typing_rel = format_typing(typing_rel)
# for g, typing_rel in new_typing_rel.items():
# for key, values in typing_rel.items():
# value_set = set()
# if type(values) == str:
# value_set.add(values)
# else:
# try:
# for v in values:
# value_set.add(v)
# except TypeError:
# value_set.add(values)
# if len(value_set) > 0:
# new_typing_rel[g][key] = value_set
# return new_typing_rel
. Output only the next line. | p_nodes = keys_by_value(p_rhs, r_node) |
Predict the next line after this snippet: <|code_start|> new_homomorphism = dict([(key, key)
for key, _ in mapping.items()])
new_homomorphism = compose(
new_homomorphism, mapping)
if t != target:
new_homomorphism = compose(
new_homomorphism,
hierarchy.compose_path_typing(paths_from_target[t])
)
# find homomorphisms from s to t via other paths
s_t_paths = nx.all_shortest_paths(hierarchy, s, t)
try:
# check only the first path
for path in s_t_paths:
path_homomorphism = hierarchy.compose_path_typing(path)
if path_homomorphism != new_homomorphism:
raise HierarchyError(
"Homomorphism does not commute with an " +
"existing path from '%s' to '%s'!" % (s, t)
)
except(nx.NetworkXNoPath):
pass
def _autocomplete_typing(hierarchy, graph_id, instance,
lhs_typing, p_typing, rhs_typing_rel, p_lhs, p_rhs):
if lhs_typing is None:
new_lhs_typing = dict()
else:
<|code_end|>
using the current file's imports:
import networkx as nx
from networkx.exception import NetworkXNoPath
from regraph.exceptions import (ReGraphError,
RewritingError,
HierarchyError,
InvalidHomomorphism)
from regraph.utils import (keys_by_value,
format_typing,
normalize_typing_relation)
from regraph.networkx.category_utils import (check_homomorphism,
compose,
is_monic)
and any relevant context from other files:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class RewritingError(ReGraphException):
# """Exceptions class for errors in rewriting in a hierarchy."""
#
# class HierarchyError(ReGraphException):
# """Exceptions class for hierarchy handling."""
#
# class InvalidHomomorphism(ReGraphException):
# """Exceptions class for invalid homomorphisms."""
#
# Path: regraph/utils.py
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
#
# def format_typing(typing):
# if typing is None:
# typing = dict()
# new_typing = dict()
# for key, value in typing.items():
# if type(value) == dict:
# new_typing[key] = copy.deepcopy(value)
# else:
# try:
# if len(value) == 2:
# new_typing[key] = copy.deepcopy(value)
# elif len(value) == 1:
# new_typing[key] = copy.deepcopy(value[0])
# except:
# raise ReGraphError("Typing format is not valid!")
# return new_typing
#
# def normalize_typing_relation(typing_rel):
# new_typing_rel = format_typing(typing_rel)
# for g, typing_rel in new_typing_rel.items():
# for key, values in typing_rel.items():
# value_set = set()
# if type(values) == str:
# value_set.add(values)
# else:
# try:
# for v in values:
# value_set.add(v)
# except TypeError:
# value_set.add(values)
# if len(value_set) > 0:
# new_typing_rel[g][key] = value_set
# return new_typing_rel
. Output only the next line. | new_lhs_typing = format_typing(lhs_typing) |
Using the snippet: <|code_start|> if t != target:
new_homomorphism = compose(
new_homomorphism,
hierarchy.compose_path_typing(paths_from_target[t])
)
# find homomorphisms from s to t via other paths
s_t_paths = nx.all_shortest_paths(hierarchy, s, t)
try:
# check only the first path
for path in s_t_paths:
path_homomorphism = hierarchy.compose_path_typing(path)
if path_homomorphism != new_homomorphism:
raise HierarchyError(
"Homomorphism does not commute with an " +
"existing path from '%s' to '%s'!" % (s, t)
)
except(nx.NetworkXNoPath):
pass
def _autocomplete_typing(hierarchy, graph_id, instance,
lhs_typing, p_typing, rhs_typing_rel, p_lhs, p_rhs):
if lhs_typing is None:
new_lhs_typing = dict()
else:
new_lhs_typing = format_typing(lhs_typing)
if p_typing is None:
new_p_typing = dict()
else:
<|code_end|>
, determine the next line of code. You have imports:
import networkx as nx
from networkx.exception import NetworkXNoPath
from regraph.exceptions import (ReGraphError,
RewritingError,
HierarchyError,
InvalidHomomorphism)
from regraph.utils import (keys_by_value,
format_typing,
normalize_typing_relation)
from regraph.networkx.category_utils import (check_homomorphism,
compose,
is_monic)
and context (class names, function names, or code) available:
# Path: regraph/exceptions.py
# class ReGraphError(ReGraphException):
# """Exception for errors in ReGraph."""
#
# class RewritingError(ReGraphException):
# """Exceptions class for errors in rewriting in a hierarchy."""
#
# class HierarchyError(ReGraphException):
# """Exceptions class for hierarchy handling."""
#
# class InvalidHomomorphism(ReGraphException):
# """Exceptions class for invalid homomorphisms."""
#
# Path: regraph/utils.py
# def keys_by_value(dictionary, val):
# """Get keys of a dictionary by a value."""
# res = []
# for key, value in dictionary.items():
# if value == val:
# res.append(key)
# return res
#
# def format_typing(typing):
# if typing is None:
# typing = dict()
# new_typing = dict()
# for key, value in typing.items():
# if type(value) == dict:
# new_typing[key] = copy.deepcopy(value)
# else:
# try:
# if len(value) == 2:
# new_typing[key] = copy.deepcopy(value)
# elif len(value) == 1:
# new_typing[key] = copy.deepcopy(value[0])
# except:
# raise ReGraphError("Typing format is not valid!")
# return new_typing
#
# def normalize_typing_relation(typing_rel):
# new_typing_rel = format_typing(typing_rel)
# for g, typing_rel in new_typing_rel.items():
# for key, values in typing_rel.items():
# value_set = set()
# if type(values) == str:
# value_set.add(values)
# else:
# try:
# for v in values:
# value_set.add(v)
# except TypeError:
# value_set.add(values)
# if len(value_set) > 0:
# new_typing_rel[g][key] = value_set
# return new_typing_rel
. Output only the next line. | new_p_typing = normalize_typing_relation(p_typing) |
Given the code snippet: <|code_start|> new_lemmas.append(new_lemma)
while len(new_lemmas) > 0:
all_lemmas_for_new_words = []
for new_lemma in new_lemmas:
word = new_lemma.name()
all_lemmas_for_this_word = [lemma for ss in wn.synsets(word)
for lemma in ss.lemmas()
if lemma.name() == word]
for lemma in all_lemmas_for_this_word:
if not belongs(lemma, all_lemmas_for_new_words):
all_lemmas_for_new_words.append(lemma)
all_related_lemmas += all_lemmas_for_new_words
new_lemmas = []
for lemma in all_lemmas_for_new_words:
for new_lemma in (lemma.derivationally_related_forms() +
lemma.pertainyms()):
if (not belongs(new_lemma, all_related_lemmas) and
not belongs(new_lemma, new_lemmas)):
new_lemmas.append(new_lemma)
return all_related_lemmas
def singularize(noun):
"""
args
- noun : a noun e.g "man"
returns the singular form of the word if it finds one. Otherwise,
returns the word itself.
"""
singular = inflect.engine().singular_noun(noun)
<|code_end|>
, generate the next line using the imports in this file:
from nltk.corpus import wordnet as wn
from .constants import (ALL_WORDNET_WORDS, CONJUGATED_VERB_LIST,
ADJECTIVE_TO_ADVERB)
import nltk
import inflect
and context (functions, classes, or occasionally code) from other files:
# Path: OSMTagFinder/wordforms/constants.py
# ALL_WORDNET_WORDS = set()
#
# CONJUGATED_VERB_LIST = []
#
# ADJECTIVE_TO_ADVERB = {"good" : "well", "fast" : "fast", "hard" : "hard",
# "late" : "late", "early" : "early", "daily" : "daily",
# "straight" : "straight"}
. Output only the next line. | if singular in ALL_WORDNET_WORDS: |
Here is a snippet: <|code_start|> return noun
def get_word_forms(word):
"""
args
word : a word e.g "love"
returns the related word forms corresponding to the input word. the output
is a dictionary with four keys "n" (noun), "a" (adjective), "v" (verb)
and "r" (adverb). The value for each key is a python Set containing
related word forms with that part of speech.
e.g. {'a': {'lovable', 'loveable'},
'n': {'love', 'lover', 'lovers', 'loves'},
'r': set(),
'v': {'love', 'loved', 'loves', 'loving'}}
"""
word = singularize(word)
related_lemmas = get_related_lemmas(word)
related_words_dict = {"n" : set(), "a" : set(), "v" : set(), "r" : set()}
for lemma in related_lemmas:
pos = lemma.synset().pos()
if pos == "s":
pos = "a"
related_words_dict[pos].add(lemma.name())
noun_set = [noun for noun in related_words_dict["n"]]
for noun in noun_set:
related_words_dict["n"].add(inflect.engine().plural_noun(noun))
verb_set = [verb for verb in related_words_dict["v"]]
for verb in verb_set:
<|code_end|>
. Write the next line using the current file imports:
from nltk.corpus import wordnet as wn
from .constants import (ALL_WORDNET_WORDS, CONJUGATED_VERB_LIST,
ADJECTIVE_TO_ADVERB)
import nltk
import inflect
and context from other files:
# Path: OSMTagFinder/wordforms/constants.py
# ALL_WORDNET_WORDS = set()
#
# CONJUGATED_VERB_LIST = []
#
# ADJECTIVE_TO_ADVERB = {"good" : "well", "fast" : "fast", "hard" : "hard",
# "late" : "late", "early" : "early", "daily" : "daily",
# "straight" : "straight"}
, which may include functions, classes, or code. Output only the next line. | for conjugated_verbs in CONJUGATED_VERB_LIST: |
Given the code snippet: <|code_start|> returns the related word forms corresponding to the input word. the output
is a dictionary with four keys "n" (noun), "a" (adjective), "v" (verb)
and "r" (adverb). The value for each key is a python Set containing
related word forms with that part of speech.
e.g. {'a': {'lovable', 'loveable'},
'n': {'love', 'lover', 'lovers', 'loves'},
'r': set(),
'v': {'love', 'loved', 'loves', 'loving'}}
"""
word = singularize(word)
related_lemmas = get_related_lemmas(word)
related_words_dict = {"n" : set(), "a" : set(), "v" : set(), "r" : set()}
for lemma in related_lemmas:
pos = lemma.synset().pos()
if pos == "s":
pos = "a"
related_words_dict[pos].add(lemma.name())
noun_set = [noun for noun in related_words_dict["n"]]
for noun in noun_set:
related_words_dict["n"].add(inflect.engine().plural_noun(noun))
verb_set = [verb for verb in related_words_dict["v"]]
for verb in verb_set:
for conjugated_verbs in CONJUGATED_VERB_LIST:
if verb in conjugated_verbs:
for conjugated_verb in conjugated_verbs:
related_words_dict["v"].add(conjugated_verb)
adjective_set = [adjective for adjective in related_words_dict["a"]]
for adjective in adjective_set:
try:
<|code_end|>
, generate the next line using the imports in this file:
from nltk.corpus import wordnet as wn
from .constants import (ALL_WORDNET_WORDS, CONJUGATED_VERB_LIST,
ADJECTIVE_TO_ADVERB)
import nltk
import inflect
and context (functions, classes, or occasionally code) from other files:
# Path: OSMTagFinder/wordforms/constants.py
# ALL_WORDNET_WORDS = set()
#
# CONJUGATED_VERB_LIST = []
#
# ADJECTIVE_TO_ADVERB = {"good" : "well", "fast" : "fast", "hard" : "hard",
# "late" : "late", "early" : "early", "daily" : "daily",
# "straight" : "straight"}
. Output only the next line. | related_words_dict["r"].add(ADJECTIVE_TO_ADVERB[adjective]) |
Predict the next line after this snippet: <|code_start|> draw_axes()
text('x and y relative to width', x=0.5, y=0.5)
text('x and y relative to width, with aspect ratio for y', x=0.5,
y=0.5*(3/4.), width=300)
text('x and y given in pixels', x=100, y=100)
text('x and y given in centimetres', x='2cm', y='5cm')
display_matplotlib(gcs())
######################################################
#
#Anchors
#+++++++
#
#We could also change the anchor of the Beampy module using the `center`,
#`right`, `bottom` function in the coordinate.
#
with slide():
draw_axes()
t1 = text('Top-left absolute positioning $$x=x^2$$', x=400, y=100)
t2 = text('Top-right absolute positioning $$x=x^2$$', x=right(400), y=200)
t3 = text('Middle-middle absolute positioning $$x=x^2$$', x=center(400), y=center(300))
t4 = text('Bottom-right absolute positioning $$x=x^2$$', x=right(0.5), y=bottom(0.6))
for t in [t1, t2, t3, t4]:
<|code_end|>
using the current file's imports:
from beampy import *
from beampy.utils import bounding_box, draw_axes
and any relevant context from other files:
# Path: beampy/utils.py
# def bounding_box(element):
# """
# Function to add a bounding-box (border + anchors) to the given element.
#
# Parameters
# ----------
#
# element : Beampy Module
# The Beampy module class of the element to add the bounding box
#
# """
#
# # Add the border
# element.add_border()
#
# # Create rectangles to show anchors of the text box
# rw, rh = 10, 10
#
# anchor_selected = element.positionner.x['anchor'] + element.positionner.y['anchor']
#
# corners = [('lefttop', element.left, element.top),
# ('righttop', element.right, element.top),
# ('leftbottom', element.left, element.bottom),
# ('rightbottom', element.right, element.bottom),
# ('leftmiddle', element.left, element.center),
# ('middletop', element.center, element.top),
# ('rightmiddle', element.right, element.center),
# ('middlebottom', element.center, element.bottom),
# ('middlemiddle', element.center, element.center)]
#
# for args in corners:
# label, ex, ey = args
# rc = 'gray'
# if anchor_selected == label:
# rc = 'red'
#
# rectangle(x=ex + center(0), y=ey + center(0), width=rw, height=rh,
# color=rc, edgecolor=None)
#
# def draw_axes(dx=100, dy=100, show_ticks=False, grid_color='lightgray'):
# """
# Function to draw Beampy axis with a grid
# """
#
# grid(dx=dx, dy=dy, color=grid_color)
# ax=arrow(5, 5, document._width-35, 0)
# ay=arrow(5, 5, 0, document._height-35)
# text('$x$', x=ax.right+5, y=ax.top+0)
# text('$y$', x=ay.left+0, y=ay.bottom+5)
#
# if show_ticks:
# # add the positions in pixels
# for xt in range(0, document._width, dx):
# text('$x=%ipx$' % xt, x=xt, y=15, size=10)
#
# for yt in range(0, document._height, dx):
# text('$y=%ipx$' % yt, x=15, y=yt, size=10)
. Output only the next line. | bounding_box(t) |
Given snippet: <|code_start|>relative positioning. The default behavior is set by the theme used in the
presentation.
The default theme sets the coordinates to:
* `x='center'` which means that element is centered in the horizontal direction
* x element anchor is set to left, which means that the horizontal distance is
computed between to left side of the slide and the left border of the element
bounding-box.
* `y='auto'` which means that elements are equally spaced on the vertical
direction.
* y element anchor is set to top, which means that the vertical distance is
computed between the top of the slide and the top border of the element
bounding-box.
* The reference for computing coordinates as percent is the page or group width
for both `x` and `y`.
Slide coordinate system
-----------------------
The origin of the coordinate coordinate system is the upper-left corner of the
slide or the current group. And is positive when moving toward the bottom-right
corner.
"""
doc = document(quiet=True)
with slide():
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from beampy import *
from beampy.utils import bounding_box, draw_axes
and context:
# Path: beampy/utils.py
# def bounding_box(element):
# """
# Function to add a bounding-box (border + anchors) to the given element.
#
# Parameters
# ----------
#
# element : Beampy Module
# The Beampy module class of the element to add the bounding box
#
# """
#
# # Add the border
# element.add_border()
#
# # Create rectangles to show anchors of the text box
# rw, rh = 10, 10
#
# anchor_selected = element.positionner.x['anchor'] + element.positionner.y['anchor']
#
# corners = [('lefttop', element.left, element.top),
# ('righttop', element.right, element.top),
# ('leftbottom', element.left, element.bottom),
# ('rightbottom', element.right, element.bottom),
# ('leftmiddle', element.left, element.center),
# ('middletop', element.center, element.top),
# ('rightmiddle', element.right, element.center),
# ('middlebottom', element.center, element.bottom),
# ('middlemiddle', element.center, element.center)]
#
# for args in corners:
# label, ex, ey = args
# rc = 'gray'
# if anchor_selected == label:
# rc = 'red'
#
# rectangle(x=ex + center(0), y=ey + center(0), width=rw, height=rh,
# color=rc, edgecolor=None)
#
# def draw_axes(dx=100, dy=100, show_ticks=False, grid_color='lightgray'):
# """
# Function to draw Beampy axis with a grid
# """
#
# grid(dx=dx, dy=dy, color=grid_color)
# ax=arrow(5, 5, document._width-35, 0)
# ay=arrow(5, 5, 0, document._height-35)
# text('$x$', x=ax.right+5, y=ax.top+0)
# text('$y$', x=ay.left+0, y=ay.bottom+5)
#
# if show_ticks:
# # add the positions in pixels
# for xt in range(0, document._width, dx):
# text('$x=%ipx$' % xt, x=xt, y=15, size=10)
#
# for yt in range(0, document._height, dx):
# text('$y=%ipx$' % yt, x=15, y=yt, size=10)
which might include code, classes, or functions. Output only the next line. | draw_axes(show_ticks=True) |
Using the snippet: <|code_start|> dest="subcommand",
metavar="[subcommand]")
self.__generic_arguments(self.parser)
def add_command(self, command):
command_name = command.__class__.__name__.lower()
command.setup(command_name, self.config, self.subparser)
self.commands[command_name] = command
def parse(self):
for key, command in self.commands.items():
command.parse()
def execute(self, args):
if args.subcommand is None:
self.parser.print_help()
return None
if (args.subcommand in self.commands):
self.commands[args.subcommand].execute(args)
def __generic_arguments(self, parser):
parser.add_argument(
"-d", "--debug",
action="store_true",
help="show debug informations")
parser.add_argument(
"--version",
action="version",
<|code_end|>
, determine the next line of code. You have imports:
import argparse
from yoda.version import get_version
and context (class names, function names, or code) available:
# Path: yoda/version.py
# def get_version():
# """Get version from package resources."""
# requirement = pkg_resources.Requirement.parse("yoda")
# provider = pkg_resources.get_provider(requirement)
# return provider.version
. Output only the next line. | version="%(prog)s {}".format(get_version())) |
Given snippet: <|code_start|># the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yoda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Yoda. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
class TestAdapter(Abstract):
def __init__(self, path):
super(TestAdapter, self).__init__(path)
def status(self):
pass
def update(self):
pass
def show(self):
pass
def clone(self):
pass
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import mock
import os
import subprocess
from mock import patch
from testfixtures import LogCapture
from tests.helpers import YodaTestHelper
from yoda.adapter import Abstract
from yoda.adapter import ExecutableNotFoundException
and context:
# Path: tests/helpers.py
# class YodaTestHelper(unittest.TestCase):
# """Yoda test helper class.
# This class provides custom assertions for yoda's tests suite.
# """
# sandbox = None
#
# def setUp(self):
# """Setup sandbox."""
# self.sandbox = Sandbox()
#
# def tearDown(self):
# """Destroy sandbox."""
# self.sandbox.destroy()
#
# def assert_config_file_contains(self, config_file, expected):
# """Custom assert to check content of config_file."""
# file = open(config_file)
# config = yaml.safe_load(file.read())
# file.close()
# self.assertEqual(config, expected)
#
# Path: yoda/adapter/abstract.py
# class Abstract:
# """SCM Adapter interface."""
# __metaclass__ = ABCMeta
#
# path = None
# executable = None
#
# def __init__(self, path):
# self.path = path
#
# def execute(self, command, path=None):
# """Execute command with os.popen and return output."""
# logger = logging.getLogger(__name__)
#
# self.check_executable()
# logger.debug("Executing command `%s` (cwd: %s)" % (command, path))
# process = subprocess.Popen(
# command,
# shell=True,
# cwd=path,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE
# )
# stdout, stderr = process.communicate()
# exit_code = process.wait()
#
# if stdout:
# logger.info(stdout.decode("utf-8"))
#
# if stderr:
# if exit_code != 0:
# logger.error(stderr.decode("utf-8"))
# else:
# logger.info(stderr.decode("utf-8"))
#
# return process
#
# def exec_on_path(self, command):
# """Execute command in repository path."""
# self.execute("%s" % command, self.path)
#
# def check_executable(self):
# """Check adapter executable exists."""
# if not find_executable(self.executable):
# raise ExecutableNotFoundException(
# "Executable %s not found" % self.executable
# )
#
# @abstractmethod
# def status(self):
# pass
#
# @abstractmethod
# def update(self):
# pass
#
# @abstractmethod
# def show(self):
# pass
#
# @abstractmethod
# def clone(self):
# pass
#
# Path: yoda/adapter/abstract.py
# class ExecutableNotFoundException(OSError):
# pass
which might include code, classes, or functions. Output only the next line. | class TestAdapterAbstract(YodaTestHelper): |
Predict the next line for this snippet: <|code_start|>
@patch("yoda.adapter.abstract.subprocess.Popen",
return_value=mock.create_autospec(
subprocess.Popen, return_value=mock.Mock()))
def test_execute_failure(self, mock_proc):
"""Test execute with error."""
mock_com = mock_proc.return_value.communicate
mock_com.return_value = [b"Yoda", b"Rosk"]
mock_wait = mock_proc.return_value.wait
mock_wait.return_value = 1
with patch("yoda.adapter.abstract.find_executable",
return_value=True):
with LogCapture() as lcap:
self.assertEqual(
self.adapter.execute("git log"),
mock_proc.return_value)
lcap.check(("yoda.adapter.abstract", "DEBUG",
"Executing command `git log` (cwd: None)"),
("yoda.adapter.abstract", "INFO",
"Yoda"),
("yoda.adapter.abstract", "ERROR",
"Rosk"))
def test_check_executable_with_wrong_executable(self):
"""Test check executable with wrong command."""
self.adapter.executable = "wrong_executable"
with patch("yoda.adapter.abstract.find_executable",
return_value=False):
self.assertRaises(
<|code_end|>
with the help of current file imports:
import mock
import os
import subprocess
from mock import patch
from testfixtures import LogCapture
from tests.helpers import YodaTestHelper
from yoda.adapter import Abstract
from yoda.adapter import ExecutableNotFoundException
and context from other files:
# Path: tests/helpers.py
# class YodaTestHelper(unittest.TestCase):
# """Yoda test helper class.
# This class provides custom assertions for yoda's tests suite.
# """
# sandbox = None
#
# def setUp(self):
# """Setup sandbox."""
# self.sandbox = Sandbox()
#
# def tearDown(self):
# """Destroy sandbox."""
# self.sandbox.destroy()
#
# def assert_config_file_contains(self, config_file, expected):
# """Custom assert to check content of config_file."""
# file = open(config_file)
# config = yaml.safe_load(file.read())
# file.close()
# self.assertEqual(config, expected)
#
# Path: yoda/adapter/abstract.py
# class Abstract:
# """SCM Adapter interface."""
# __metaclass__ = ABCMeta
#
# path = None
# executable = None
#
# def __init__(self, path):
# self.path = path
#
# def execute(self, command, path=None):
# """Execute command with os.popen and return output."""
# logger = logging.getLogger(__name__)
#
# self.check_executable()
# logger.debug("Executing command `%s` (cwd: %s)" % (command, path))
# process = subprocess.Popen(
# command,
# shell=True,
# cwd=path,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE
# )
# stdout, stderr = process.communicate()
# exit_code = process.wait()
#
# if stdout:
# logger.info(stdout.decode("utf-8"))
#
# if stderr:
# if exit_code != 0:
# logger.error(stderr.decode("utf-8"))
# else:
# logger.info(stderr.decode("utf-8"))
#
# return process
#
# def exec_on_path(self, command):
# """Execute command in repository path."""
# self.execute("%s" % command, self.path)
#
# def check_executable(self):
# """Check adapter executable exists."""
# if not find_executable(self.executable):
# raise ExecutableNotFoundException(
# "Executable %s not found" % self.executable
# )
#
# @abstractmethod
# def status(self):
# pass
#
# @abstractmethod
# def update(self):
# pass
#
# @abstractmethod
# def show(self):
# pass
#
# @abstractmethod
# def clone(self):
# pass
#
# Path: yoda/adapter/abstract.py
# class ExecutableNotFoundException(OSError):
# pass
, which may contain function names, class names, or code. Output only the next line. | ExecutableNotFoundException, |
Given the following code snippet before the placeholder: <|code_start|> pass
class RepositoryAdapterNotFound(RepositoryError):
"""Repository invalid because adapter not found."""
pass
class Repository:
path = None
adapter = None
scm_dirs = [".git", ".svn", ".bzr", ".hg"]
def __init__(self, path):
if not os.path.exists(path) or not os.path.isdir(path):
raise RepositoryPathInvalid(
"Path doesn't exists or isn't a directory (%s)\n" % path)
try:
scm = (set(self.scm_dirs) & set(os.listdir(path))).pop()
except KeyError:
raise RepositoryAdapterNotFound("Can't define repository type")
else:
self.path = path
if scm == ".git":
self.adapter = Git(path)
if scm == ".svn":
self.adapter = Svn(path)
if scm == ".bzr":
<|code_end|>
, predict the next line using imports from the current file:
import os
from yoda.adapter import Bzr
from yoda.adapter import Git
from yoda.adapter import Hg
from yoda.adapter import Svn
and context including class names, function names, and sometimes code from other files:
# Path: yoda/adapter/bzr.py
# class Bzr(Abstract):
# """Bzr Adapter."""
# executable = "bzr"
#
# def status(self):
# """Show bzr status."""
# return self.exec_on_path("%s status" % self.executable)
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("%s pull" % self.executable)
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("%s branch %s %s" % (self.executable,
# url, self.path))
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/git.py
# class Git(Abstract):
# """Git Adapter."""
#
# executable = "git"
#
# def status(self):
# """Show git status."""
# return self.exec_on_path("git status")
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("git clone %s %s" % (url, self.path))
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("git pull --rebase")
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/hg.py
# class Hg(Abstract):
# """Mercurial Adapter."""
# executable = "hg"
#
# def status(self):
# """Show hg status."""
# return self.exec_on_path("%s status" % self.executable)
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("%s pull -u" % self.executable)
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("%s clone %s %s" % (self.executable,
# url, self.path))
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/svn.py
# class Svn(Abstract):
# """Svn Adapter."""
#
# executable = "svn"
#
# def status(self):
# """Show git status."""
# return self.exec_on_path("svn status")
#
# def clone(self, url):
# """Checkout repository from url."""
# return self.execute("svn checkout %s %s" % (url, self.path))
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("svn update")
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
. Output only the next line. | self.adapter = Bzr(path) |
Predict the next line for this snippet: <|code_start|>
class RepositoryPathInvalid(RepositoryError):
"""Repository path is invalid, doesn't exists or is not a directory."""
pass
class RepositoryAdapterNotFound(RepositoryError):
"""Repository invalid because adapter not found."""
pass
class Repository:
path = None
adapter = None
scm_dirs = [".git", ".svn", ".bzr", ".hg"]
def __init__(self, path):
if not os.path.exists(path) or not os.path.isdir(path):
raise RepositoryPathInvalid(
"Path doesn't exists or isn't a directory (%s)\n" % path)
try:
scm = (set(self.scm_dirs) & set(os.listdir(path))).pop()
except KeyError:
raise RepositoryAdapterNotFound("Can't define repository type")
else:
self.path = path
if scm == ".git":
<|code_end|>
with the help of current file imports:
import os
from yoda.adapter import Bzr
from yoda.adapter import Git
from yoda.adapter import Hg
from yoda.adapter import Svn
and context from other files:
# Path: yoda/adapter/bzr.py
# class Bzr(Abstract):
# """Bzr Adapter."""
# executable = "bzr"
#
# def status(self):
# """Show bzr status."""
# return self.exec_on_path("%s status" % self.executable)
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("%s pull" % self.executable)
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("%s branch %s %s" % (self.executable,
# url, self.path))
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/git.py
# class Git(Abstract):
# """Git Adapter."""
#
# executable = "git"
#
# def status(self):
# """Show git status."""
# return self.exec_on_path("git status")
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("git clone %s %s" % (url, self.path))
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("git pull --rebase")
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/hg.py
# class Hg(Abstract):
# """Mercurial Adapter."""
# executable = "hg"
#
# def status(self):
# """Show hg status."""
# return self.exec_on_path("%s status" % self.executable)
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("%s pull -u" % self.executable)
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("%s clone %s %s" % (self.executable,
# url, self.path))
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/svn.py
# class Svn(Abstract):
# """Svn Adapter."""
#
# executable = "svn"
#
# def status(self):
# """Show git status."""
# return self.exec_on_path("svn status")
#
# def clone(self, url):
# """Checkout repository from url."""
# return self.execute("svn checkout %s %s" % (url, self.path))
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("svn update")
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
, which may contain function names, class names, or code. Output only the next line. | self.adapter = Git(path) |
Next line prediction: <|code_start|>
class RepositoryAdapterNotFound(RepositoryError):
"""Repository invalid because adapter not found."""
pass
class Repository:
path = None
adapter = None
scm_dirs = [".git", ".svn", ".bzr", ".hg"]
def __init__(self, path):
if not os.path.exists(path) or not os.path.isdir(path):
raise RepositoryPathInvalid(
"Path doesn't exists or isn't a directory (%s)\n" % path)
try:
scm = (set(self.scm_dirs) & set(os.listdir(path))).pop()
except KeyError:
raise RepositoryAdapterNotFound("Can't define repository type")
else:
self.path = path
if scm == ".git":
self.adapter = Git(path)
if scm == ".svn":
self.adapter = Svn(path)
if scm == ".bzr":
self.adapter = Bzr(path)
if scm == ".hg":
<|code_end|>
. Use current file imports:
(import os
from yoda.adapter import Bzr
from yoda.adapter import Git
from yoda.adapter import Hg
from yoda.adapter import Svn)
and context including class names, function names, or small code snippets from other files:
# Path: yoda/adapter/bzr.py
# class Bzr(Abstract):
# """Bzr Adapter."""
# executable = "bzr"
#
# def status(self):
# """Show bzr status."""
# return self.exec_on_path("%s status" % self.executable)
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("%s pull" % self.executable)
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("%s branch %s %s" % (self.executable,
# url, self.path))
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/git.py
# class Git(Abstract):
# """Git Adapter."""
#
# executable = "git"
#
# def status(self):
# """Show git status."""
# return self.exec_on_path("git status")
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("git clone %s %s" % (url, self.path))
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("git pull --rebase")
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/hg.py
# class Hg(Abstract):
# """Mercurial Adapter."""
# executable = "hg"
#
# def status(self):
# """Show hg status."""
# return self.exec_on_path("%s status" % self.executable)
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("%s pull -u" % self.executable)
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("%s clone %s %s" % (self.executable,
# url, self.path))
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/svn.py
# class Svn(Abstract):
# """Svn Adapter."""
#
# executable = "svn"
#
# def status(self):
# """Show git status."""
# return self.exec_on_path("svn status")
#
# def clone(self, url):
# """Checkout repository from url."""
# return self.execute("svn checkout %s %s" % (url, self.path))
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("svn update")
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
. Output only the next line. | self.adapter = Hg(path) |
Here is a snippet: <|code_start|>class RepositoryPathInvalid(RepositoryError):
"""Repository path is invalid, doesn't exists or is not a directory."""
pass
class RepositoryAdapterNotFound(RepositoryError):
"""Repository invalid because adapter not found."""
pass
class Repository:
path = None
adapter = None
scm_dirs = [".git", ".svn", ".bzr", ".hg"]
def __init__(self, path):
if not os.path.exists(path) or not os.path.isdir(path):
raise RepositoryPathInvalid(
"Path doesn't exists or isn't a directory (%s)\n" % path)
try:
scm = (set(self.scm_dirs) & set(os.listdir(path))).pop()
except KeyError:
raise RepositoryAdapterNotFound("Can't define repository type")
else:
self.path = path
if scm == ".git":
self.adapter = Git(path)
if scm == ".svn":
<|code_end|>
. Write the next line using the current file imports:
import os
from yoda.adapter import Bzr
from yoda.adapter import Git
from yoda.adapter import Hg
from yoda.adapter import Svn
and context from other files:
# Path: yoda/adapter/bzr.py
# class Bzr(Abstract):
# """Bzr Adapter."""
# executable = "bzr"
#
# def status(self):
# """Show bzr status."""
# return self.exec_on_path("%s status" % self.executable)
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("%s pull" % self.executable)
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("%s branch %s %s" % (self.executable,
# url, self.path))
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/git.py
# class Git(Abstract):
# """Git Adapter."""
#
# executable = "git"
#
# def status(self):
# """Show git status."""
# return self.exec_on_path("git status")
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("git clone %s %s" % (url, self.path))
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("git pull --rebase")
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/hg.py
# class Hg(Abstract):
# """Mercurial Adapter."""
# executable = "hg"
#
# def status(self):
# """Show hg status."""
# return self.exec_on_path("%s status" % self.executable)
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("%s pull -u" % self.executable)
#
# def clone(self, url):
# """Clone repository from url."""
# return self.execute("%s clone %s %s" % (self.executable,
# url, self.path))
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
#
# Path: yoda/adapter/svn.py
# class Svn(Abstract):
# """Svn Adapter."""
#
# executable = "svn"
#
# def status(self):
# """Show git status."""
# return self.exec_on_path("svn status")
#
# def clone(self, url):
# """Checkout repository from url."""
# return self.execute("svn checkout %s %s" % (url, self.path))
#
# def update(self):
# """Update repository."""
# return self.exec_on_path("svn update")
#
# def show(self):
# """Show repository details."""
# raise NotImplemented("TODO: Not implemented")
, which may include functions, classes, or code. Output only the next line. | self.adapter = Svn(path) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
return args
class InitActionTestCase(TestCase):
def test_action(self):
with patch('lamvery.actions.init.confirm_overwrite') as c:
c.return_value = True
<|code_end|>
, predict the next line using imports from the current file:
from unittest import TestCase
from nose.tools import eq_
from mock import Mock, patch
from lamvery.actions.init import InitAction
and context including class names, function names, and sometimes code from other files:
# Path: lamvery/actions/init.py
# class InitAction(BaseAction):
#
# def __init__(self, args):
# super(InitAction, self).__init__(args)
# self._conf_file = args.conf_file
#
# def action(self):
# if confirm_overwrite(self._conf_file):
# self._config.write(self._config.get_default(), self._conf_file)
# self._logger.info(
# 'Output initial file: {}'.format(self._conf_file))
#
# files = {
# self._config.get_event_file(): self._config.get_default_events(),
# self._config.get_secret_file(): self._config.get_default_secret(),
# self._config.get_exclude_file(): self._config.get_default_exclude(),
# self._config.get_hook_file(): self._config.get_default_hook(),
# self._config.get_api_file(): self._config.get_default_api(),
# }
#
# for f, c in files.items():
# if confirm_overwrite(f):
# self._config.write(c, f)
# self._logger.info(
# 'Output initial file: {}'.format(f))
. Output only the next line. | action = InitAction(default_args()) |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
EVENT_RULE_DIFF_KEYS = [
('Description', 'description',),
('EventPattern', 'pattern',),
('RoleArn', 'role',),
('ScheduleExpression', 'schedule',),
('State', 'state',),
]
EVENT_TARGET_DIFF_KEYS = [
('Input', 'input',),
('InputPath', 'input_path',),
]
<|code_end|>
. Use current file imports:
import json
from lamvery.actions.base import BaseAction
and context (classes, functions, or code) from other files:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
. Output only the next line. | class EventsAction(BaseAction): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.dry_run = True
args.no_libs = False
args.single_file = False
return args
class BuildActionTestCase(TestCase):
def tearDown(self):
if os.path.exists('test.zip'):
os.remove('test.zip')
def test_action(self):
<|code_end|>
, determine the next line of code. You have imports:
import os
from unittest import TestCase
from nose.tools import ok_
from mock import Mock
from lamvery.actions.build import BuildAction
and context (class names, function names, or code) available:
# Path: lamvery/actions/build.py
# class BuildAction(BaseAction):
#
# def __init__(self, args):
# super(BuildAction, self).__init__(args)
# self._single_file = args.single_file
# self._no_libs = args.no_libs
# self._env = parse_env_args(args.env)
#
# def action(self):
# archive_name = self._config.get_archive_name()
# function_filename = self._config.get_function_filename()
# secret = self._config.generate_lambda_secret()
# exclude = self._config.get_exclude()
#
# builder = Builder(
# filename=archive_name,
# function_filename=function_filename,
# secret=secret,
# single_file=self._single_file,
# no_libs=self._no_libs,
# exclude=exclude,
# runtime=self._config.get_runtime(),
# env=self._env,
# clean_build=self._config.is_clean_build(),
# hooks=self._config.get_build_hooks())
#
# zipfile = builder.build()
# with open(archive_name, 'w') as f:
# f.write(zipfile.read())
# zipfile.close()
#
# self._logger.info('Output archive(zip) to {}'.format(archive_name))
. Output only the next line. | action = BuildAction(default_args()) |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.text = 'foo'
args.secret_name = 'bar'
args.store = False
return args
class EncryptActionTestCase(TestCase):
def test_action(self):
with patch('lamvery.actions.base.KmsClient'):
args = default_args()
<|code_end|>
. Write the next line using the current file imports:
from unittest import TestCase
from mock import Mock, patch
from lamvery.actions.encrypt import EncryptAction
and context from other files:
# Path: lamvery/actions/encrypt.py
# class EncryptAction(BaseAction):
#
# def __init__(self, args):
# super(EncryptAction, self).__init__(args)
# self._text = args.text
# self._name = args.secret_name
# self._store = args.store
#
# def action(self):
# cipher_text = self.get_kms_client().encrypt(
# self._config.get_secret().get('key_id'), self._text)
#
# if self._store:
# self._config.store_secret(self._name, cipher_text)
# else:
# print(cipher_text)
, which may include functions, classes, or code. Output only the next line. | action = EncryptAction(args) |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.keep_empty_events = False
return args
class EventsActionTestCase(TestCase):
@raises(Exception)
def test_action_function_not_exists(self):
with patch('lamvery.actions.base.LambdaClient') as c:
c.get_function_conf = Mock(return_value={})
<|code_end|>
. Use current file imports:
from unittest import TestCase
from nose.tools import eq_, raises
from mock import Mock, patch
from lamvery.actions.events import EventsAction
and context (classes, functions, or code) from other files:
# Path: lamvery/actions/events.py
# class EventsAction(BaseAction):
#
# def __init__(self, args):
# super(EventsAction, self).__init__(args)
# self._keep_empty = args.keep_empty_events
#
# def action(self):
# lambda_client = self.get_lambda_client()
# events_client = self.get_events_client()
# func_name = self._config.get_function_name()
# alias_name = self.get_alias_name()
# conf = lambda_client.get_function_conf(func_name, alias_name)
#
# if len(conf) == 0:
# msg = '"{}" function does not exist. Please `deploy` first.'.format(func_name)
# raise Exception(msg)
#
# arn = conf['FunctionArn']
# local_rules = self._config.get_events().get('rules')
# remote_rules = events_client.get_rules_by_target(arn)
#
# self._clean(remote_rules, local_rules, arn, func_name, alias_name)
# self._put_rules(remote_rules, local_rules, func_name, alias_name)
# self._put_targets(local_rules, arn)
#
# def _put_rules(self, remote, local, function, alias):
# lambda_client = self.get_lambda_client()
# events_client = self.get_events_client()
#
# for l in local:
# l['state'] = self._convert_state(l.get('disabled'))
#
# r = self._search_rule(remote, l['name'])
# if len(r) == 0:
# self._logger.warn(
# '[EventRule] Create new event rule "{}"'.format(l['name']))
#
# self._print_diff(
# prefix='[EventRule] {}:'.format(l['name']),
# keys=EVENT_RULE_DIFF_KEYS,
# remote=r, local=l)
#
# ret = events_client.put_rule(l)
# lambda_client.add_permission(function, alias, l['name'], ret.get('RuleArn'))
#
# def _convert_state(self, disabled):
# if disabled:
# return 'DISABLED'
# else:
# return 'ENABLED'
#
# def _search_rule(self, rules, name):
# for r in rules:
# if name in [r.get('Name'), r.get('name')]:
# return r
# return {}
#
# def _exist_rule(self, rules, name):
# return len(self._search_rule(rules, name)) > 0
#
# def _put_targets(self, local, arn):
# client = self.get_events_client()
#
# for l in local:
# targets = client.get_targets_by_rule(l['name'])
#
# for lt in l['targets']:
# if 'input' in lt:
# lt['input'] = json.dumps(lt['input'])
#
# diff_r = {}
# for rt in targets:
# if rt['Id'] == lt['id']:
# diff_r = rt
# break
# self._logger.warn(
# '[EventRule] {}: Add "{}" to targets'.format(l['name'], lt['id']))
#
# self._print_diff(
# prefix='[EventTarget] {}:'.format(lt['id']),
# keys=EVENT_TARGET_DIFF_KEYS,
# remote=diff_r, local=lt)
#
# client.put_targets(
# rule=l['name'], targets=l['targets'], arn=arn)
#
# def _exist_target(self, targets, target_id):
# for t in targets:
# if target_id in [t.get('id'), t.get('Id')]:
# return True
# return False
#
# def _clean(self, remote, local, arn, function, alias):
# lambda_client = self.get_lambda_client()
# events_client = self.get_events_client()
#
# for r in remote:
# targets = events_client.get_targets_by_rule(r['Name'])
# target_ids = []
#
# l = self._search_rule(local, r['Name'])
#
# for rt in targets:
# msg = '[EventRule] {}: Remove undifined event target "{}"'.format(
# r['Name'], rt['Id'])
# if len(l) > 0:
# if not self._exist_target(l['targets'], rt['Id']):
# self._logger.warn(msg)
# target_ids.append(rt['Id'])
# elif rt['Arn'] == arn:
# self._logger.warn(msg)
# target_ids.append(rt['Id'])
#
# if len(target_ids) > 0:
# events_client.remove_targets(r['Name'], target_ids)
#
# if len(targets) == len(target_ids) and not self._keep_empty:
# self._logger.warn(
# '[EventRule] Delete the event rule "{}" that does not have any targets'.format(
# r['Name']))
# events_client.delete_rule(r['Name'])
# lambda_client.remove_permission(function, alias, r['Name'])
. Output only the next line. | action = EventsAction(default_args()) |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.follow = False
args.interval = 1
args.start = '-1h'
return args
class LogsActionTestCase(TestCase):
def test_action(self):
c = Mock()
ret = [{'message': 'foo', 'eventId': 'bar', 'timestamp': int(time.time() * 1000)}]
c.get_log_events = Mock(return_value=ret)
<|code_end|>
with the help of current file imports:
import time
from unittest import TestCase
from mock import Mock
from lamvery.actions.logs import LogsAction
and context from other files:
# Path: lamvery/actions/logs.py
# class LogsAction(BaseAction):
#
# def __init__(self, args):
# super(LogsAction, self).__init__(args)
# self._follow = args.follow
# self._filter = args.filter
# self._interval = args.interval
# self._exit = False
# self._start = args.start
#
# def action(self):
# self._logger.info('Start viewing the log events...')
#
# def _exit(signum, frame):
# self._logger.info('Exit by code {} ...'.format(signum))
# self._exit = True
#
# signal.signal(signal.SIGTERM, _exit)
# signal.signal(signal.SIGINT, _exit)
#
# start = time.time()
# if self._start is not None:
# time_struct, _ = Calendar().parse(self._start)
# start = mktime(time_struct)
#
# start = int(start * 1000)
#
# client = self.get_logs_client()
# function = self._config.get_function_name()
# event_ids = {}
#
# while self._exit is False:
# events = client.get_log_events(function, start, self._filter)
#
# for e in events:
# if e['eventId'] not in event_ids:
# event_ids[e['eventId']] = None
# print(e['message'])
#
# if e['timestamp'] > start:
# start = e['timestamp']
# event_ids = {}
#
# if not self._follow:
# break
#
# time.sleep(self._interval)
, which may contain function names, class names, or code. Output only the next line. | action = LogsAction(default_args()) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
class UtilsActionTestCase(TestCase):
def test_previous_alias(self):
eq_(previous_alias('foo'), 'foo-pre')
def test_parse_env_args(self):
<|code_end|>
, generate the next line using the imports in this file:
from unittest import TestCase
from nose.tools import eq_, raises
from mock import Mock, patch
from lamvery.utils import (
previous_alias,
parse_env_args,
confirm_overwrite
)
and context (functions, classes, or occasionally code) from other files:
# Path: lamvery/utils.py
# def previous_alias(alias):
# return '{}-pre'.format(alias)
#
# def parse_env_args(env):
# if not isinstance(env, list):
# return None
#
# ret = {}
# for e in env:
# matches = ENV_PATTERN.match(e)
#
# if matches is None:
# raise Exception(
# 'The format of "env" option must be "NAME=VALUE": {}'.format(e))
#
# name = matches.group('name')
# value = matches.group('value')
# k, v = shlex.split('{} {}'.format(name, value))
# ret[k] = v
#
# return ret
#
# def confirm_overwrite(path):
# ret = True
# if os.path.exists(path):
# print('Overwrite {}? [y/n]: '.format(path))
# y_n = sys.stdin.readline()
# if not y_n.startswith('y'):
# ret = False
# return ret
. Output only the next line. | eq_(parse_env_args('foo'), None) |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
class UtilsActionTestCase(TestCase):
def test_previous_alias(self):
eq_(previous_alias('foo'), 'foo-pre')
def test_parse_env_args(self):
eq_(parse_env_args('foo'), None)
eq_(parse_env_args(['foo=bar']), {'foo': 'bar'})
eq_(parse_env_args(['foo="bar"']), {'foo': 'bar'})
eq_(parse_env_args(['foo=\'bar baz\'']), {'foo': 'bar baz'})
@raises(Exception)
def test_parse_env_args_invalid(self):
parse_env_args(['foobar'])
def test_confirm_overwrite(self):
with patch('sys.stdin') as r:
# Overwrite yes
r.readline = Mock(return_value='y')
<|code_end|>
. Use current file imports:
from unittest import TestCase
from nose.tools import eq_, raises
from mock import Mock, patch
from lamvery.utils import (
previous_alias,
parse_env_args,
confirm_overwrite
)
and context (classes, functions, or code) from other files:
# Path: lamvery/utils.py
# def previous_alias(alias):
# return '{}-pre'.format(alias)
#
# def parse_env_args(env):
# if not isinstance(env, list):
# return None
#
# ret = {}
# for e in env:
# matches = ENV_PATTERN.match(e)
#
# if matches is None:
# raise Exception(
# 'The format of "env" option must be "NAME=VALUE": {}'.format(e))
#
# name = matches.group('name')
# value = matches.group('value')
# k, v = shlex.split('{} {}'.format(name, value))
# ret[k] = v
#
# return ret
#
# def confirm_overwrite(path):
# ret = True
# if os.path.exists(path):
# print('Overwrite {}? [y/n]: '.format(path))
# y_n = sys.stdin.readline()
# if not y_n.startswith('y'):
# ret = False
# return ret
. Output only the next line. | eq_(confirm_overwrite('.lamvery.yml'), True) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.secret_name = 'bar'
return args
class DecryptActionTestCase(TestCase):
def test_action(self):
with patch('lamvery.actions.base.KmsClient'):
<|code_end|>
, predict the next line using imports from the current file:
from unittest import TestCase
from mock import Mock, patch
from lamvery.actions.decrypt import DecryptAction
and context including class names, function names, and sometimes code from other files:
# Path: lamvery/actions/decrypt.py
# class DecryptAction(BaseAction):
#
# def __init__(self, args):
# super(DecryptAction, self).__init__(args)
# self._name = args.secret_name
#
# def action(self):
# text = self.get_kms_client().decrypt(
# self._config.get_secret().get('cipher_texts').get(self._name))
# print(text)
. Output only the next line. | action = DecryptAction(default_args()) |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.alias = 'foo'
args.version = '1'
args.json = '{"foo": "bar"}'
return args
class InvokeActionTestCase(TestCase):
def test_action(self):
with patch('lamvery.actions.base.LambdaClient') as c:
m = Mock()
m.invoke = Mock(return_value={'LogResult': base64.b64encode('foo')})
c.return_value = m
<|code_end|>
. Write the next line using the current file imports:
from unittest import TestCase
from mock import Mock, patch
from lamvery.actions.invoke import InvokeAction
import base64
and context from other files:
# Path: lamvery/actions/invoke.py
# class InvokeAction(BaseAction):
#
# def __init__(self, args):
# super(InvokeAction, self).__init__(args)
#
# if os.path.exists(args.json):
# self._json = open(args.json, 'r').read()
# else:
# self._json = args.json
#
# if args.alias is None:
# self._alias = self._config.get_default_alias()
# else:
# self._alias = args.alias
#
# self._version = args.version
#
# def action(self):
# qualifier = self._alias
# client = self.get_lambda_client()
#
# if self._version is not None:
# qualifier = self._version
#
# ret = client.invoke(
# name=self._config.get_function_name(),
# qualifier=qualifier,
# payload=self._json)
#
# if ret.get('FunctionError') is None:
# cprint(base64.b64decode(ret.get('LogResult')), 'green')
# else:
# self._logger.error('{} error occurred'.format(ret.get('FunctionError')))
# cprint(base64.b64decode(ret.get('LogResult')), 'red')
, which may include functions, classes, or code. Output only the next line. | action = InvokeAction(default_args()) |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.dry_run = True
return args
class ConfigureActionTestCase(TestCase):
@raises(Exception)
def test_action_not_exists(self):
with patch('lamvery.actions.base.LambdaClient') as c:
<|code_end|>
, determine the next line of code. You have imports:
from unittest import TestCase
from nose.tools import raises
from mock import Mock, patch
from lamvery.actions.configure import ConfigureAction
and context (class names, function names, or code) available:
# Path: lamvery/actions/configure.py
# class ConfigureAction(BaseAction):
#
# def __init__(self, args):
# super(ConfigureAction, self).__init__(args)
#
# def action(self):
# func_name = self._config.get_function_name()
# local_conf = self._config.get_configuration()
# client = self.get_lambda_client()
# remote_conf = client.get_function_conf(func_name)
# vpc_config = self._config.get_vpc_configuration()
#
# if len(remote_conf) > 0:
# self._print_diff(
# prefix='[Function]',
# remote=remote_conf, local=local_conf, keys=CONF_DIFF_KEYS)
#
# if vpc_config is not None:
# self._print_diff(
# prefix='[Function]',
# remote=remote_conf.get('VpcConfig', {}), local=vpc_config, keys=VPC_DIFF_KEYS)
#
# client.update_function_conf(local_conf)
# else:
# msg = '"{}" function does not exist. Please `deploy` first.'.format(func_name)
# raise Exception(msg)
. Output only the next line. | action = ConfigureAction(default_args()) |
Next line prediction: <|code_start|>
def remove_permission(self, function, alias, rule):
kwargs = {}
kwargs['FunctionName'] = function
kwargs['StatementId'] = self._generate_statement_id(function, rule, alias)
if alias is not None:
kwargs['Qualifier'] = alias
if not self._dry_run:
self._lambda.remove_permission(**kwargs)
def _generate_statement_id(self, function, rule, alias):
return hashlib.sha256(
'lamvery-{}-{}-{}'.format(function, rule, alias)).hexdigest()
def invoke(self, name, qualifier=None, payload=None):
kwargs = {}
kwargs['FunctionName'] = name
kwargs['InvocationType'] = 'RequestResponse'
kwargs['LogType'] = 'Tail'
if payload is not None:
kwargs['Payload'] = payload
if qualifier is not None:
kwargs['Qualifier'] = qualifier
return self._lambda.invoke(**kwargs)
def get_previous_version(self, function, alias):
<|code_end|>
. Use current file imports:
(import botocore
import hashlib
import lamvery.config
from lamvery.clients.base import BaseClient
from lamvery.utils import previous_alias)
and context including class names, function names, or small code snippets from other files:
# Path: lamvery/clients/base.py
# class BaseClient:
#
# __metaclass__ = ABCMeta
#
# def __init__(self, region=None, profile=None, dry_run=False):
# self._session = boto3.session.Session(
# profile_name=profile, region_name=region)
# self._dry_run = dry_run
# self._sts = self._session.client('sts')
#
# def get_account_id(self):
# return self._sts.get_caller_identity().get('Account')
#
# Path: lamvery/utils.py
# def previous_alias(alias):
# return '{}-pre'.format(alias)
. Output only the next line. | ver = self.get_alias(function, previous_alias(alias)) |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.kind = 'function'
return args
class GenerateActionTestCase(TestCase):
def tearDown(self):
for suffix in ['py', 'js']:
filename = 'namespace.{}'.format(suffix)
if os.path.exists(filename):
os.remove(filename)
def test_action(self):
with patch('lamvery.actions.init.confirm_overwrite') as c:
c.return_value = True
<|code_end|>
. Write the next line using the current file imports:
import os
from unittest import TestCase
from nose.tools import eq_, raises
from mock import Mock, patch
from lamvery.actions.generate import GenerateAction
and context from other files:
# Path: lamvery/actions/generate.py
# class GenerateAction(BaseAction):
#
# def __init__(self, args):
# super(GenerateAction, self).__init__(args)
# self._kind = args.kind
#
# def action(self):
# if self._kind == 'function':
# self._generate_function(
# self._config.get_handler_namespace(),
# self._config.get_handler_function(),
# self._config.get_runtime())
# else:
# raise Exception('"{}" kind is not supported.'.format(self._kind))
#
# def _generate_function(self, namespace, function, runtime):
# if 'python' in runtime:
# path = '{}.py'.format(namespace)
# content = PY_CODE.format(function)
# elif 'nodejs' in runtime:
# path = '{}.js'.format(namespace)
# content = JS_CODE.format(function)
# else:
# raise Exception('Runtime "{}" is not supported.'.format(runtime))
#
# if confirm_overwrite(path):
# open(path, 'w').write(content)
# self._logger.info('Output skeleton function: {}'.format(path))
, which may include functions, classes, or code. Output only the next line. | action = GenerateAction(default_args()) |
Continue the code snippet: <|code_start|> def is_exclude_file(self, name):
if self.is_exclude(name):
return True
if name == self._filename:
return True
return False
def is_exclude_dir(self, name):
if self.is_exclude(name):
return True
for ex in EXCLUDE_DIR:
if name == ex:
return True
return False
def is_source_file(self, name):
return PYFILE_PATTERN.match(name) is not None
def _get_paths(self):
paths = []
if self._single_file:
f = self._function_filename
return [os.path.join(os.getcwd(), f)]
if self._clean_build:
for p in os.listdir(self._clean_build_dir):
paths.append(os.path.join(self._clean_build_dir, p))
return paths
<|code_end|>
. Use current file imports:
import os
import sys
import tempfile
import shutil
import re
import warnings
import json
import lamvery.secret
import lamvery.config
from zipfile import PyZipFile, ZIP_DEFLATED
from lamvery.log import get_logger
from lamvery.utils import run_commands
and context (classes, functions, or code) from other files:
# Path: lamvery/log.py
# def get_logger(name):
# global logger
# if logger is None:
# logger = logging.getLogger(name)
# logger.setLevel(logging.INFO)
# handler = ColoredStreamHandler(stream=sys.stderr)
# handler.setLevel(logging.INFO)
# handler.setFormatter(
# logging.Formatter('%(name)s: %(message)s'))
# logger.removeHandler(handler)
# logger.addHandler(handler)
# return logger
#
# Path: lamvery/utils.py
# def run_commands(commands, working_dir=os.getcwd()):
# cwd = os.getcwd()
# os.chdir(working_dir)
#
# for c in commands:
# try:
# subprocess.check_output(
# c, stderr=subprocess.STDOUT, shell=True)
# except subprocess.CalledProcessError as e:
# os.chdir(cwd)
# raise Exception(e.output)
#
# os.chdir(cwd)
. Output only the next line. | logger = get_logger(__name__) |
Here is a snippet: <|code_start|> self._archive_dir(zipfile, p)
else:
self._archive_file(zipfile, p)
if not self._single_file:
secret_path = os.path.join(self._tmpdir, lamvery.secret.SECRET_FILE_NAME)
env_path = os.path.join(self._tmpdir, lamvery.env.ENV_FILE_NAME)
self._generate_json(secret_path, self._secret)
self._generate_json(env_path, self._env)
self._archive_file(zipfile, secret_path)
self._archive_file(zipfile, env_path)
if self._runtime == lamvery.config.RUNTIME_NODE_JS:
self._archive_dist(zipfile, 'lamvery.js')
self._run_hooks(self._hooks.get('post', []))
return open(self._zippath, 'rb')
def _prepare_clean_build(self):
for p in os.listdir(os.getcwd()):
path = os.path.join(os.getcwd(), p)
if not path.startswith(os.environ.get('VIRTUAL_ENV')):
if os.path.isdir(path):
shutil.copytree(path, os.path.join(self._clean_build_dir, p))
else:
shutil.copyfile(path, os.path.join(self._clean_build_dir, p))
def _run_hooks(self, hooks):
if self._clean_build:
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
import tempfile
import shutil
import re
import warnings
import json
import lamvery.secret
import lamvery.config
from zipfile import PyZipFile, ZIP_DEFLATED
from lamvery.log import get_logger
from lamvery.utils import run_commands
and context from other files:
# Path: lamvery/log.py
# def get_logger(name):
# global logger
# if logger is None:
# logger = logging.getLogger(name)
# logger.setLevel(logging.INFO)
# handler = ColoredStreamHandler(stream=sys.stderr)
# handler.setLevel(logging.INFO)
# handler.setFormatter(
# logging.Formatter('%(name)s: %(message)s'))
# logger.removeHandler(handler)
# logger.addHandler(handler)
# return logger
#
# Path: lamvery/utils.py
# def run_commands(commands, working_dir=os.getcwd()):
# cwd = os.getcwd()
# os.chdir(working_dir)
#
# for c in commands:
# try:
# subprocess.check_output(
# c, stderr=subprocess.STDOUT, shell=True)
# except subprocess.CalledProcessError as e:
# os.chdir(cwd)
# raise Exception(e.output)
#
# os.chdir(cwd)
, which may include functions, classes, or code. Output only the next line. | return run_commands(hooks, self._clean_build_dir) |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.dry_run = True
args.publish = True
args.alias = None
args.version = None
args.target = None
return args
class SetAliasActionTestCase(TestCase):
@patch('lamvery.actions.base.LambdaClient')
@raises(Exception)
def test_action_not_exists(self, c):
<|code_end|>
with the help of current file imports:
from unittest import TestCase
from nose.tools import eq_, raises
from mock import Mock, patch
from lamvery.actions.set_alias import SetAliasAction
and context from other files:
# Path: lamvery/actions/set_alias.py
# class SetAliasAction(BaseAction):
#
# def __init__(self, args):
# super(SetAliasAction, self).__init__(args)
#
# if hasattr(args, 'target'):
# self._target = args.target
# else:
# self._target = None
#
# if hasattr(args, 'version'):
# self._version = args.version
# else:
# self._version = None
#
# def action(self):
# alias_name = self.get_alias_name()
# func_name = self._config.get_function_name()
# version = self.get_version(func_name)
# client = self.get_lambda_client()
#
# if alias_name is None:
# raise Exception(
# 'Please specify an alias by `-a` option or `default_alias` configuration.')
#
# current_alias = client.get_alias(func_name, alias_name)
# self._print_alias_diff(alias_name, current_alias, version)
#
# if len(current_alias) > 0:
# client.update_alias(func_name, alias_name, version)
# else:
# client.create_alias(func_name, alias_name, version)
#
# def _print_alias_diff(self, name, current, version):
# self._logger.warn(
# '[Alias] {name}: {cur} -> {new}'.format(
# name=name, cur=current.get('FunctionVersion'), new=version))
#
# def get_version(self, function):
# version = '$LATEST'
#
# if self._version is not None:
# version = self._version
#
# elif self._target is not None:
# target_alias = self.get_lambda_client().get_alias(function, self._target)
# version = target_alias.get('FunctionVersion')
#
# if version is None:
# raise Exception(
# 'Target alias "{}" does not exist in "{}" function.'.format(
# self._target, function))
#
# return version
, which may contain function names, class names, or code. Output only the next line. | action = SetAliasAction(default_args()) |
Next line prediction: <|code_start|>"stage-variables" : {
#foreach($key in $stageVariables.keySet())
"$key" : "$util.escapeJavaScript($stageVariables.get($key))"
#if($foreach.hasNext),#end
#end
},
"context" : {
"account-id" : "$context.identity.accountId",
"api-id" : "$context.apiId",
"api-key" : "$context.identity.apiKey",
"authorizer-principal-id" : "$context.authorizer.principalId",
"caller" : "$context.identity.caller",
"cognito-authentication-provider" : "$context.identity.cognitoAuthenticationProvider",
"cognito-authentication-type" : "$context.identity.cognitoAuthenticationType",
"cognito-identity-id" : "$context.identity.cognitoIdentityId",
"cognito-identity-pool-id" : "$context.identity.cognitoIdentityPoolId",
"http-method" : "$context.httpMethod",
"stage" : "$context.stage",
"source-ip" : "$context.identity.sourceIp",
"user" : "$context.identity.user",
"user-agent" : "$context.identity.userAgent",
"user-arn" : "$context.identity.userArn",
"request-id" : "$context.requestId",
"resource-id" : "$context.resourceId",
"resource-path" : "$context.resourcePath"
}
}
'''
<|code_end|>
. Use current file imports:
(import json
import re
import hashlib
from datetime import datetime
from datadiff import diff
from lamvery.actions.base import BaseAction
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import TerminalFormatter)
and context including class names, function names, or small code snippets from other files:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
. Output only the next line. | class ApiAction(BaseAction): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.dry_run = True
args.publish = True
args.no_libs = False
args.single_file = False
return args
class DeployActionTestCase(TestCase):
@patch('lamvery.actions.deploy.SetAliasAction')
def test_action(self, a):
# Dry run
with patch('lamvery.actions.base.LambdaClient') as c:
<|code_end|>
. Use current file imports:
from unittest import TestCase
from nose.tools import eq_
from mock import Mock, patch
from lamvery.actions.deploy import DeployAction
and context (classes, functions, or code) from other files:
# Path: lamvery/actions/deploy.py
# class DeployAction(BaseAction):
#
# def __init__(self, args):
# super(DeployAction, self).__init__(args)
# self._publish = args.publish
# self._set_alias = SetAliasAction(args)
# self._single_file = args.single_file
# self._no_libs = args.no_libs
# self._env = parse_env_args(args.env)
#
# def action(self):
# archive_name = self._config.get_archive_name()
# function_filename = self._config.get_function_filename()
# secret = self._config.generate_lambda_secret()
# exclude = self._config.get_exclude()
#
# builder = Builder(
# filename=archive_name,
# function_filename=function_filename,
# secret=secret,
# single_file=self._single_file,
# no_libs=self._no_libs,
# exclude=exclude,
# runtime=self._config.get_runtime(),
# env=self._env,
# clean_build=self._config.is_clean_build(),
# hooks=self._config.get_build_hooks())
#
# func_name = self._config.get_function_name()
# local_conf = self._config.get_configuration()
# zipfile = builder.build()
# client = self.get_lambda_client()
# remote_conf = client.get_function_conf(func_name)
# alias_name = self._set_alias.get_alias_name()
# remote_size = client.calculate_capacity()
# local_size = builder.get_size()
# new_version = None
# cur_version = None
# vpc_config = self._config.get_vpc_configuration()
#
# if len(remote_conf) == 0:
# self._logger.info(
# '[Function] Create new function "{}"'.format(func_name))
#
# self._print_diff(
# prefix='[Function]',
# remote=remote_conf, local=local_conf, keys=CONF_DIFF_KEYS)
#
# self._print_diff(
# prefix='[Function-VPC]',
# remote=remote_conf.get('VpcConfig', {}), local=vpc_config, keys=VPC_DIFF_KEYS)
#
# if len(remote_conf) > 0:
#
# if self._enable_versioning():
# cur_version = client.get_alias(
# func_name, alias_name).get('FunctionVersion')
# else:
# local_size -= remote_conf['CodeSize']
#
# client.update_function_conf(local_conf)
# self._print_capacity(remote=remote_size, local=local_size)
# new_version = client.update_function_code(
# zipfile, local_conf, self._enable_versioning())
#
# else:
# if self._enable_versioning():
# local_size *= 2
#
# self._print_capacity(
# remote=remote_size, local=local_size)
# new_version = client.create_function(
# zipfile, local_conf, self._enable_versioning())
#
# zipfile.close()
#
# if new_version is not None:
# self._logger.info(
# '[Function] Deployed version: {}'.format(new_version))
#
# if cur_version is not None:
# self._logger.info(
# '[Function] Previous version: {}'.format(cur_version))
# self._set_alias._alias = previous_alias(alias_name)
# self._set_alias._version = cur_version
# self._set_alias.action()
#
# if alias_name is not None:
# self._set_alias._alias = alias_name
# self._set_alias._version = new_version
# self._set_alias.action()
#
# def _enable_versioning(self):
# if self._publish:
# return True
# return self._config.enable_versioning()
#
# def _print_capacity(self, remote, local):
# self._logger.warn(
# '[Function] Capacity: {r} Bytes -> {t} Bytes'.format(
# r='{:,d}'.format(remote), t='{:,d}'.format(remote + local)))
. Output only the next line. | action = DeployAction(default_args()) |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.dry_run = True
return args
class RollbackActionTestCase(TestCase):
@raises(Exception)
def test_action_function_not_exists(self):
with patch('lamvery.actions.base.LambdaClient') as c:
c.get_function_conf = Mock(return_value={})
c.get_function_conf = Mock(return_value=None)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from unittest import TestCase
from nose.tools import raises
from mock import Mock, patch
from lamvery.actions.rollback import RollbackAction
and context:
# Path: lamvery/actions/rollback.py
# class RollbackAction(BaseAction):
#
# def __init__(self, args):
# super(RollbackAction, self).__init__(args)
# self._set_alias = SetAliasAction(args)
#
# def action(self):
# func_name = self._config.get_function_name()
# client = self.get_lambda_client()
# remote_conf = client.get_function_conf(func_name)
# alias_name = self._set_alias.get_alias_name()
#
# if len(remote_conf) == 0:
# raise Exception(
# '"{}" function does not exist. Please `deploy` first.'.format(func_name))
#
# pre_version = client.get_previous_version(func_name, alias_name)
# if pre_version is None:
# raise Exception(
# 'There is no previous version. ' +
# 'Please `deploy` with `publish` option or `versioning` configuration.')
#
# self._logger.info(
# '[Function] Previous version: {}'.format(pre_version))
#
# self._set_alias._version = pre_version
# self._set_alias.action()
which might include code, classes, or functions. Output only the next line. | action = RollbackAction(default_args()) |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
CONF_DIFF_KEYS = [
('Runtime', 'runtime',),
('Role', 'role',),
('Handler', 'handler',),
('Description', 'description',),
('Timeout', 'timeout',),
('MemorySize', 'memory_size',),
]
VPC_DIFF_KEYS = [
('SubnetIds', 'subnets',),
('SecurityGroupIds', 'security_groups',),
]
<|code_end|>
with the help of current file imports:
from lamvery.actions.base import BaseAction
and context from other files:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
, which may contain function names, class names, or code. Output only the next line. | class ConfigureAction(BaseAction): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.path = 'requirements.txt'
args.name = 'bar.txt'
args.store = False
return args
class EncryptActionTestCase(TestCase):
def test_action(self):
with patch('lamvery.actions.base.KmsClient'):
args = default_args()
<|code_end|>
, determine the next line of code. You have imports:
from unittest import TestCase
from mock import Mock, patch
from lamvery.actions.encrypt_file import EncryptFileAction
and context (class names, function names, or code) available:
# Path: lamvery/actions/encrypt_file.py
# class EncryptFileAction(BaseAction):
#
# def __init__(self, args):
# super(EncryptFileAction, self).__init__(args)
# self._file = args.name
# self._path = args.path
# self._store = args.store
#
# def action(self):
# cipher_text = self.get_kms_client().encrypt(
# self._config.get_secret().get('key_id'), open(self._path, 'r').read())
#
# if self._store:
# self._config.store_secret_file(self._file, cipher_text)
# else:
# print(cipher_text)
. Output only the next line. | action = EncryptFileAction(args) |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
class RollbackAction(BaseAction):
def __init__(self, args):
super(RollbackAction, self).__init__(args)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from lamvery.actions.base import BaseAction
from lamvery.actions.set_alias import SetAliasAction
and context:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
#
# Path: lamvery/actions/set_alias.py
# class SetAliasAction(BaseAction):
#
# def __init__(self, args):
# super(SetAliasAction, self).__init__(args)
#
# if hasattr(args, 'target'):
# self._target = args.target
# else:
# self._target = None
#
# if hasattr(args, 'version'):
# self._version = args.version
# else:
# self._version = None
#
# def action(self):
# alias_name = self.get_alias_name()
# func_name = self._config.get_function_name()
# version = self.get_version(func_name)
# client = self.get_lambda_client()
#
# if alias_name is None:
# raise Exception(
# 'Please specify an alias by `-a` option or `default_alias` configuration.')
#
# current_alias = client.get_alias(func_name, alias_name)
# self._print_alias_diff(alias_name, current_alias, version)
#
# if len(current_alias) > 0:
# client.update_alias(func_name, alias_name, version)
# else:
# client.create_alias(func_name, alias_name, version)
#
# def _print_alias_diff(self, name, current, version):
# self._logger.warn(
# '[Alias] {name}: {cur} -> {new}'.format(
# name=name, cur=current.get('FunctionVersion'), new=version))
#
# def get_version(self, function):
# version = '$LATEST'
#
# if self._version is not None:
# version = self._version
#
# elif self._target is not None:
# target_alias = self.get_lambda_client().get_alias(function, self._target)
# version = target_alias.get('FunctionVersion')
#
# if version is None:
# raise Exception(
# 'Target alias "{}" does not exist in "{}" function.'.format(
# self._target, function))
#
# return version
which might include code, classes, or functions. Output only the next line. | self._set_alias = SetAliasAction(args) |
Predict the next line for this snippet: <|code_start|>RUNTIME_NODE_JS_43 = 'nodejs4.3'
DEFAULT_RUNTIME_NODE_JS = RUNTIME_NODE_JS_43
RUNTIME_AND_EXTENSION = {
RUNTIME_PY_27: 'py',
RUNTIME_NODE_JS: 'js',
RUNTIME_NODE_JS_43: 'js'
}
def represent_odict(dumper, instance):
return dumper.represent_mapping(u'tag:yaml.org,2002:map', instance.items())
yaml.add_representer(OrderedDict, represent_odict)
yaml.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
lambda loader, node: OrderedDict(loader.construct_pairs(node)))
class Config:
def __init__(self, conf_file):
self._file = conf_file
self._template_env = Environment(loader=FileSystemLoader('./', encoding='utf8'))
def load(self, file, default={}):
try:
tmpl = self._template_env.get_template(file)
return yaml.load(tmpl.render({'env': os.environ}))
except TemplateNotFound:
<|code_end|>
with the help of current file imports:
import yaml
import os
import re
from collections import OrderedDict
from jinja2 import Environment, FileSystemLoader
from jinja2.exceptions import TemplateNotFound
from lamvery.log import get_logger
and context from other files:
# Path: lamvery/log.py
# def get_logger(name):
# global logger
# if logger is None:
# logger = logging.getLogger(name)
# logger.setLevel(logging.INFO)
# handler = ColoredStreamHandler(stream=sys.stderr)
# handler.setLevel(logging.INFO)
# handler.setFormatter(
# logging.Formatter('%(name)s: %(message)s'))
# logger.removeHandler(handler)
# logger.addHandler(handler)
# return logger
, which may contain function names, class names, or code. Output only the next line. | get_logger(__name__).warn( |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
class InitAction(BaseAction):
def __init__(self, args):
super(InitAction, self).__init__(args)
self._conf_file = args.conf_file
def action(self):
<|code_end|>
. Use current file imports:
(from lamvery.actions.base import BaseAction
from lamvery.utils import confirm_overwrite)
and context including class names, function names, or small code snippets from other files:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
#
# Path: lamvery/utils.py
# def confirm_overwrite(path):
# ret = True
# if os.path.exists(path):
# print('Overwrite {}? [y/n]: '.format(path))
# y_n = sys.stdin.readline()
# if not y_n.startswith('y'):
# ret = False
# return ret
. Output only the next line. | if confirm_overwrite(self._conf_file): |
Next line prediction: <|code_start|>PY_CODE = """import lamvery
def {}(event, context):
# Use environment variables
# lamvery.env.load()
# print(os.environ['FOO'])
# Use KMS encryption
# print(lamvery.secret.get('foo'))
print('This is a skeleton function.')
"""
JS_CODE = """var lamvery = require('./lamvery.js');
exports.{} = function(event, context) {{
// Use environment variables
// lamvery.env.load();
// console.log(process.env.FOO);
// Use KMS encryption
// lamvery.secret.get('foo', function(err, data) {{
// console.log(data);
// }});
console.log('This is a skeleton function.');
}}
"""
<|code_end|>
. Use current file imports:
(from lamvery.actions.base import BaseAction
from lamvery.utils import confirm_overwrite)
and context including class names, function names, or small code snippets from other files:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
#
# Path: lamvery/utils.py
# def confirm_overwrite(path):
# ret = True
# if os.path.exists(path):
# print('Overwrite {}? [y/n]: '.format(path))
# y_n = sys.stdin.readline()
# if not y_n.startswith('y'):
# ret = False
# return ret
. Output only the next line. | class GenerateAction(BaseAction): |
Next line prediction: <|code_start|> console.log('This is a skeleton function.');
}}
"""
class GenerateAction(BaseAction):
def __init__(self, args):
super(GenerateAction, self).__init__(args)
self._kind = args.kind
def action(self):
if self._kind == 'function':
self._generate_function(
self._config.get_handler_namespace(),
self._config.get_handler_function(),
self._config.get_runtime())
else:
raise Exception('"{}" kind is not supported.'.format(self._kind))
def _generate_function(self, namespace, function, runtime):
if 'python' in runtime:
path = '{}.py'.format(namespace)
content = PY_CODE.format(function)
elif 'nodejs' in runtime:
path = '{}.js'.format(namespace)
content = JS_CODE.format(function)
else:
raise Exception('Runtime "{}" is not supported.'.format(runtime))
<|code_end|>
. Use current file imports:
(from lamvery.actions.base import BaseAction
from lamvery.utils import confirm_overwrite)
and context including class names, function names, or small code snippets from other files:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
#
# Path: lamvery/utils.py
# def confirm_overwrite(path):
# ret = True
# if os.path.exists(path):
# print('Overwrite {}? [y/n]: '.format(path))
# y_n = sys.stdin.readline()
# if not y_n.startswith('y'):
# ret = False
# return ret
. Output only the next line. | if confirm_overwrite(path): |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
SECRET_FILE_NAME = '.lamvery_secret.json'
SECRET_DIR = '/tmp/.lamvery-secret'
def get(name):
data = json.load(open(SECRET_FILE_NAME, 'r'))
if 'cipher_texts' not in data:
return None
if name not in data['cipher_texts']:
return None
<|code_end|>
, generate the next line using the imports in this file:
import json
import os
from lamvery.clients.kms import KmsClient
and context (functions, classes, or occasionally code) from other files:
# Path: lamvery/clients/kms.py
# class KmsClient(BaseClient):
#
# def __init__(self, *args, **kwargs):
# super(KmsClient, self).__init__(*args, **kwargs)
# self._kms = self._session.client('kms')
#
# def encrypt(self, key_id, text):
# res = self._kms.encrypt(KeyId=key_id, Plaintext=text)
# return base64.b64encode(res.get('CiphertextBlob'))
#
# def decrypt(self, cipher_text):
# res = self._kms.decrypt(CiphertextBlob=base64.b64decode(cipher_text))
# return res.get('Plaintext')
. Output only the next line. | client = KmsClient(region=data.get('region')) |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.dry_run = True
args.alias = None
return args
<|code_end|>
using the current file's imports:
from unittest import TestCase
from nose.tools import eq_, raises
from mock import Mock
from lamvery.actions.base import BaseAction
from lamvery.actions.configure import CONF_DIFF_KEYS
and any relevant context from other files:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
#
# Path: lamvery/actions/configure.py
# CONF_DIFF_KEYS = [
# ('Runtime', 'runtime',),
# ('Role', 'role',),
# ('Handler', 'handler',),
# ('Description', 'description',),
# ('Timeout', 'timeout',),
# ('MemorySize', 'memory_size',),
# ]
. Output only the next line. | class TestAction(BaseAction): |
Given the code snippet: <|code_start|>
class BaseActionTestCase(TestCase):
@raises(TypeError)
def test_action(self):
BaseAction(default_args())
def test_get_client(self):
TestAction(default_args())._get_client(Mock)
def test_get_diff(self):
remote = {
'Runtime': 'python2.7',
'Role': 'foo',
'Handler': 'bar',
}
local = {
'runtime': 'python2.7',
'role': 'bar',
}
ret = {
'runtime': None,
'role': ('foo', 'bar',),
'handler': ('bar', None,),
'description': None,
'timeout': None,
'memory_size': None,
}
action = TestAction(default_args())
<|code_end|>
, generate the next line using the imports in this file:
from unittest import TestCase
from nose.tools import eq_, raises
from mock import Mock
from lamvery.actions.base import BaseAction
from lamvery.actions.configure import CONF_DIFF_KEYS
and context (functions, classes, or occasionally code) from other files:
# Path: lamvery/actions/base.py
# class BaseAction:
#
# __metaclass__ = ABCMeta
#
# _logger = None
#
# def __init__(self, args):
# self._config = Config(args.conf_file)
# self._dry_run = False
# self._alias = None
#
# if hasattr(args, 'alias'):
# self._alias = args.alias
#
# logger_name = 'lamvery'
# if hasattr(args, 'dry_run'):
# self._dry_run = args.dry_run
# if self._dry_run:
# logger_name = '(Dry run) lamvery'
#
# self._logger = get_logger(logger_name)
#
# @abstractmethod
# def action(self):
# raise NotImplementedError
#
# def _get_client(self, cls):
# return cls(
# region=self._config.get_region(),
# profile=self._config.get_profile(),
# dry_run=self._dry_run)
#
# def get_lambda_client(self):
# return self._get_client(LambdaClient)
#
# def get_kms_client(self):
# return self._get_client(KmsClient)
#
# def get_events_client(self):
# return self._get_client(EventsClient)
#
# def get_logs_client(self):
# return self._get_client(LogsClient)
#
# def get_apigateway_client(self):
# return self._get_client(ApiGatewayClient)
#
# def get_alias_name(self):
# if self._alias is not None:
# return self._alias
# return self._config.get_default_alias()
#
# def _get_diff(self, remote, local, keys):
# diff = {}
# for k in keys:
# r = remote.get(k[0])
# l = local.get(k[1])
# if r == l:
# diff[k[1]] = None
# else:
# diff[k[1]] = (r, l,)
# return diff
#
# def _print_diff(self, prefix, remote, local, keys):
# diff = self._get_diff(remote, local, keys)
# for k, v in diff.items():
# if v is not None:
# self._logger.warn(
# '{p} {k}: {r} -> {l}'.format(p=prefix, k=k, r=v[0], l=v[1]))
#
# Path: lamvery/actions/configure.py
# CONF_DIFF_KEYS = [
# ('Runtime', 'runtime',),
# ('Role', 'role',),
# ('Handler', 'handler',),
# ('Description', 'description',),
# ('Timeout', 'timeout',),
# ('MemorySize', 'memory_size',),
# ]
. Output only the next line. | eq_(action._get_diff(remote, local, CONF_DIFF_KEYS), ret) |
Based on the snippet: <|code_start|> logger.debug('Skipping subtitle: %r already downloaded', subtitle.language)
continue
if subtitle.provider_name in discarded_providers:
logger.debug('Skipping subtitle from discarded provider %r', subtitle.provider_name)
continue
# initialize provider
if subtitle.provider_name in initialized_providers:
provider = initialized_providers[subtitle.provider_name]
else:
provider = providers_by_name[subtitle.provider_name](**provider_configs.get(subtitle.provider_name, {}))
try:
provider.initialize()
except ProviderNotAvailable as err:
logger.warning('Provider %r is not available, discarding it', subtitle.provider_name)
logger.debug('ProviderNotAvailable error: %r', str(err))
discarded_providers.add(subtitle.provider_name)
continue
except socket_error as err:
logger.warning('Provider %r is not responding, discarding it', subtitle.provider_name)
logger.debug('Provider socket error: %r', str(err))
discarded_providers.add(subtitle.provider_name)
continue
except:
logger.exception('Unexpected error in provider %r', subtitle.provider_name)
discarded_providers.add(subtitle.provider_name)
continue
initialized_providers[subtitle.provider_name] = provider
# download subtitles
<|code_end|>
, predict the immediate next line with the help of imports:
import collections
import io
import logging
import operator
import babelfish
import pkg_resources
from os.path import basename
from .exceptions import ProviderNotAvailable, InvalidSubtitle
from .subtitle import get_subtitle_path
from socket import error as socket_error
and context (classes, functions, sometimes code) from other files:
# Path: Subliminal/subliminal/subtitle.py
# def get_subtitle_path(video_path, language=None):
# """Create the subtitle path from the given `video_path` and `language`
#
# :param string video_path: path to the video
# :param language: language of the subtitle to put in the path
# :type language: :class:`babelfish.Language` or None
# :return: path of the subtitle
# :rtype: string
#
# """
# subtitle_path = os.path.splitext(video_path)[0]
# if isinstance(subtitle_path, str):
# try:
# subtitle_path = subtitle_path.decode('utf-8', errors='ignore')
# except TypeError:
# # python <= 2.6
# subtitle_path = subtitle_path.decode('utf-8', 'ignore')
#
# if language is not None:
# try:
# return subtitle_path + '.%s.%s' % (language.alpha2, 'srt')
# except babelfish.LanguageConvertError:
# return subtitle_path + '.%s.%s' % (language.alpha3, 'srt')
# return subtitle_path + '.srt'
. Output only the next line. | subtitle_path = get_subtitle_path(video.name, None if single else subtitle.language) |
Next line prediction: <|code_start|> raise NotImplementedError
def compute_score(self, video, hi_score_adjust=0):
"""Compute the score of the subtitle against the `video`
There are equivalent matches so that a provider can match one element or its equivalent. This is
to give all provider a chance to have a score in the same range without hurting quality.
* Matching :class:`~subliminal.video.Video`'s `hashes` is equivalent to matching everything else
* Matching :class:`~subliminal.video.Episode`'s `season` and `episode`
is equivalent to matching :class:`~subliminal.video.Episode`'s `title`
* Matching :class:`~subliminal.video.Episode`'s `tvdb_id` is equivalent to matching
:class:`~subliminal.video.Episode`'s `series`
:param video: the video to compute the score against
:type video: :class:`~subliminal.video.Video`
:param hi_score_adjust: adjust hearing impaired matched videos by this value
:return: score of the subtitle
:rtype: int
"""
score = 0
# compute matches
initial_matches = self.compute_matches(video)
matches = initial_matches.copy()
# hash is the perfect match
if 'hash' in matches:
score = video.scores['hash']
else:
# remove equivalences
<|code_end|>
. Use current file imports:
(import logging
import os.path
import babelfish
import pysrt
import re
from .video import Episode, Movie
from chardet import detect as chardet_detect
from chared.detector import get_model_path, EncodingDetector)
and context including class names, function names, or small code snippets from other files:
# Path: Subliminal/subliminal/video.py
# class Episode(Video):
# """Episode :class:`Video`
#
# Scores are defined by a set of equations, see :func:`~subliminal.score.get_episode_equations`
#
# :param string series: series of the episode
# :param int season: season number of the episode
# :param int episode: episode number of the episode
# :param string title: title of the episode
# :param int tvdb_id: TheTVDB id of the episode
#
# """
# scores = {'title': 12, 'video_codec': 2, 'imdb_id': 35, 'audio_codec': 1, 'tvdb_id': 23, 'resolution': 2,
# 'season': 6, 'release_group': 6, 'series': 23, 'episode': 6, 'hash': 46}
#
# def __init__(self, name, series, season, episode, release_group=None, resolution=None, video_codec=None,
# audio_codec=None, imdb_id=None, hashes=None, size=None, subtitle_languages=None, title=None,
# tvdb_id=None):
# super(Episode, self).__init__(name, release_group, resolution, video_codec, audio_codec, imdb_id, hashes,
# size, subtitle_languages)
# self.series = series
# self.season = season
# self.episode = episode
# self.title = title
# self.tvdb_id = tvdb_id
#
# @classmethod
# def fromguess(cls, name, guess):
# if guess['type'] != 'episode':
# raise ValueError('The guess must be an episode guess')
# if 'series' not in guess or 'season' not in guess or 'episodeNumber' not in guess:
# raise ValueError('Insufficient data to process the guess')
# return cls(name, guess['series'], guess['season'], guess['episodeNumber'],
# release_group=guess.get('releaseGroup'), resolution=guess.get('screenSize'),
# video_codec=guess.get('videoCodec'), audio_codec=guess.get('audioCodec'),
# title=guess.get('title'))
#
# def __repr__(self):
# return '<%s [%r, %rx%r]>' % (self.__class__.__name__, self.series, self.season, self.episode)
#
# def __hash__(self):
# return hash((
# self.series,
# self.season,
# self.episode,
# ))
#
# def __eq__(self, other):
# return self.__class__.__name__ == other.__class__.__name__\
# and self.series == other.series\
# and self.season == other.season\
# and self.episode == other.episode
#
# class Movie(Video):
# """Movie :class:`Video`
#
# Scores are defined by a set of equations, see :func:`~subliminal.score.get_movie_equations`
#
# :param string title: title of the movie
# :param int year: year of the movie
#
# """
# scores = {'title': 13, 'video_codec': 2, 'resolution': 2, 'audio_codec': 1, 'year': 7, 'imdb_id': 31,
# 'release_group': 6, 'hash': 31}
#
# def __init__(self, name, title, release_group=None, resolution=None, video_codec=None, audio_codec=None,
# imdb_id=None, hashes=None, size=None, subtitle_languages=None, year=None):
# super(Movie, self).__init__(name, release_group, resolution, video_codec, audio_codec, imdb_id, hashes,
# size, subtitle_languages)
# self.title = title
# self.year = year
#
# @classmethod
# def fromguess(cls, name, guess):
# if guess['type'] != 'movie':
# raise ValueError('The guess must be a movie guess')
# if 'title' not in guess:
# raise ValueError('Insufficient data to process the guess')
# return cls(name, guess['title'], release_group=guess.get('releaseGroup'), resolution=guess.get('screenSize'),
# video_codec=guess.get('videoCodec'), audio_codec=guess.get('audioCodec'),
# year=guess.get('year'))
#
# def __repr__(self):
# if self.year is None:
# return '<%s [%r]>' % (self.__class__.__name__, self.title)
# return '<%s [%r, %r]>' % (self.__class__.__name__, self.title, self.year)
#
# def __hash__(self):
# if self.year is None:
# return hash((
# self.title,
# self.year,
# ))
# return hash(self.title)
#
# def __eq__(self, other):
# return self.__class__.__name__ == other.__class__.__name__\
# and self.title == other.title\
# and self.year == other.year
. Output only the next line. | if isinstance(video, Episode): |
Using the snippet: <|code_start|> except pysrt.Error as e:
if e.args[0] > 80:
return True
except:
logger.exception('Unexpected error when validating subtitle')
return False
def compute_guess_matches(video, guess):
"""Compute matches between a `video` and a `guess`
:param video: the video to compute the matches on
:type video: :class:`~subliminal.video.Video`
:param guess: the guess to compute the matches on
:type guess: :class:`guessit.Guess`
:return: matches of the `guess`
:rtype: set
"""
matches = set()
if isinstance(video, Episode):
# Series
if video.series and 'series' in guess and guess['series'].lower() == video.series.lower():
matches.add('series')
# Season
if video.season and 'seasonNumber' in guess and guess['seasonNumber'] == video.season:
matches.add('season')
# Episode
if video.episode and 'episodeNumber' in guess and guess['episodeNumber'] == video.episode:
matches.add('episode')
<|code_end|>
, determine the next line of code. You have imports:
import logging
import os.path
import babelfish
import pysrt
import re
from .video import Episode, Movie
from chardet import detect as chardet_detect
from chared.detector import get_model_path, EncodingDetector
and context (class names, function names, or code) available:
# Path: Subliminal/subliminal/video.py
# class Episode(Video):
# """Episode :class:`Video`
#
# Scores are defined by a set of equations, see :func:`~subliminal.score.get_episode_equations`
#
# :param string series: series of the episode
# :param int season: season number of the episode
# :param int episode: episode number of the episode
# :param string title: title of the episode
# :param int tvdb_id: TheTVDB id of the episode
#
# """
# scores = {'title': 12, 'video_codec': 2, 'imdb_id': 35, 'audio_codec': 1, 'tvdb_id': 23, 'resolution': 2,
# 'season': 6, 'release_group': 6, 'series': 23, 'episode': 6, 'hash': 46}
#
# def __init__(self, name, series, season, episode, release_group=None, resolution=None, video_codec=None,
# audio_codec=None, imdb_id=None, hashes=None, size=None, subtitle_languages=None, title=None,
# tvdb_id=None):
# super(Episode, self).__init__(name, release_group, resolution, video_codec, audio_codec, imdb_id, hashes,
# size, subtitle_languages)
# self.series = series
# self.season = season
# self.episode = episode
# self.title = title
# self.tvdb_id = tvdb_id
#
# @classmethod
# def fromguess(cls, name, guess):
# if guess['type'] != 'episode':
# raise ValueError('The guess must be an episode guess')
# if 'series' not in guess or 'season' not in guess or 'episodeNumber' not in guess:
# raise ValueError('Insufficient data to process the guess')
# return cls(name, guess['series'], guess['season'], guess['episodeNumber'],
# release_group=guess.get('releaseGroup'), resolution=guess.get('screenSize'),
# video_codec=guess.get('videoCodec'), audio_codec=guess.get('audioCodec'),
# title=guess.get('title'))
#
# def __repr__(self):
# return '<%s [%r, %rx%r]>' % (self.__class__.__name__, self.series, self.season, self.episode)
#
# def __hash__(self):
# return hash((
# self.series,
# self.season,
# self.episode,
# ))
#
# def __eq__(self, other):
# return self.__class__.__name__ == other.__class__.__name__\
# and self.series == other.series\
# and self.season == other.season\
# and self.episode == other.episode
#
# class Movie(Video):
# """Movie :class:`Video`
#
# Scores are defined by a set of equations, see :func:`~subliminal.score.get_movie_equations`
#
# :param string title: title of the movie
# :param int year: year of the movie
#
# """
# scores = {'title': 13, 'video_codec': 2, 'resolution': 2, 'audio_codec': 1, 'year': 7, 'imdb_id': 31,
# 'release_group': 6, 'hash': 31}
#
# def __init__(self, name, title, release_group=None, resolution=None, video_codec=None, audio_codec=None,
# imdb_id=None, hashes=None, size=None, subtitle_languages=None, year=None):
# super(Movie, self).__init__(name, release_group, resolution, video_codec, audio_codec, imdb_id, hashes,
# size, subtitle_languages)
# self.title = title
# self.year = year
#
# @classmethod
# def fromguess(cls, name, guess):
# if guess['type'] != 'movie':
# raise ValueError('The guess must be a movie guess')
# if 'title' not in guess:
# raise ValueError('Insufficient data to process the guess')
# return cls(name, guess['title'], release_group=guess.get('releaseGroup'), resolution=guess.get('screenSize'),
# video_codec=guess.get('videoCodec'), audio_codec=guess.get('audioCodec'),
# year=guess.get('year'))
#
# def __repr__(self):
# if self.year is None:
# return '<%s [%r]>' % (self.__class__.__name__, self.title)
# return '<%s [%r, %r]>' % (self.__class__.__name__, self.title, self.year)
#
# def __hash__(self):
# if self.year is None:
# return hash((
# self.title,
# self.year,
# ))
# return hash(self.title)
#
# def __eq__(self, other):
# return self.__class__.__name__ == other.__class__.__name__\
# and self.title == other.title\
# and self.year == other.year
. Output only the next line. | elif isinstance(video, Movie): |
Given snippet: <|code_start|>
# load from system path
if launch_config.startswith('~') or launch_config.startswith('/'):
launchpath = os.path.expanduser(launch_config)
# load from package path
elif re.match(r'.+\.launch$', launch_config):
rp = rospkg.RosPack()
pkgpath = rp.get_path(launch_config.split('/')[0])
launchpath = os.path.join(pkgpath, '/'.join(launch_config.split('/')[1:]))
# load from config definition
else:
launchcontent = launch_config
launchconfig = roslaunch.config.ROSLaunchConfig()
loader = roslaunch.xmlloader.XmlLoader()
if launchpath is not None:
loader.load(launchpath, launchconfig, verbose=False)
else:
loader.load_string(launchcontent, launchconfig, verbose=False)
self._launchrunner = roslaunch.launch.ROSLaunchRunner(self._run_id, launchconfig)
def store(process_name, exit_code):
self._exit_codes[process_name] = exit_code
self._launchrunner.add_process_listener(Callback(store))
self._wait_cond = wait_cond
self._valid = True
def __enter__(self):
self._launchrunner.launch()
self._launchrunner.spin_once()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import re
import rospy
import rospkg
import roslaunch
from .logger import Logger
and context:
# Path: flexbe_testing/src/flexbe_testing/logger.py
# class Logger(object):
# """ Bundles static methods for test case logging. """
#
# @classmethod
# def _param_positive(cls):
# return not cls._param_compact() and rospy.get_param('~print_debug_positive', True)
#
# @classmethod
# def _param_negative(cls):
# return cls._param_compact() or rospy.get_param('~print_debug_negative', True)
#
# @classmethod
# def _param_compact(cls):
# return rospy.get_param('~compact_format', False)
#
# @classmethod
# def _prefix(cls):
# return ' >' if cls._param_compact() else '>>>'
#
# @classmethod
# def _counter(cls):
# cls._counter_value += 1
# return cls._counter_value
# _counter_value = 0
#
# @classmethod
# def mute_rospy(cls):
# """ Conditionally mute the rospy logging channels. """
# if cls._param_compact() or rospy.get_param('~mute_info', False):
# rospy.loginfo = rospy.logdebug
# if cls._param_compact() or rospy.get_param('~mute_warn', False):
# rospy.logwarn = rospy.logdebug
# if not cls._param_compact() and rospy.get_param('~mute_error', False):
# rospy.logerr = rospy.logdebug
#
# @classmethod
# def print_positive(cls, text):
# """ Print a positive intermediate result. """
# if cls._param_positive():
# print('\033[0m\033[1m +\033[0m %s' % str(text))
#
# @classmethod
# def print_negative(cls, text):
# """ Print a negative intermediate result. """
# if cls._param_negative():
# print('\033[0m\033[1m -\033[0m %s' % str(text))
#
# @classmethod
# def print_title(cls, test_name, test_class, result=None):
# """ Print the title of the test, should be called once and before any other print method. """
# test_result = ' > %s' % result if result is not None else ''
# print('\033[34;1m#%2d %s \033[0m\033[34m(%s%s)\033[0m' % (
# cls._counter(), test_name, test_class, test_result
# ))
#
# @classmethod
# def print_result(cls, test_name, success):
# """ Print the result, should be called once and after any other print method. """
# test_result = 'completed' if success else 'failed'
# color = '32' if success else '31'
# print('\033[%s;1m%s\033[0m\033[%sm %s %s!\033[0m' % (color, cls._prefix(), color, test_name, test_result))
#
# @classmethod
# def print_failure(cls, text):
# """ Instead of a result, print the failure of a test case once after any other print method. """
# traceback.print_exc()
# print('\033[31;1m%s\033[0m\033[31m %s\033[0m' % (cls._prefix(), str(text)))
#
# @classmethod
# def print_error(cls, text):
# """ Print an internal error that might cause unexpected behavior, but does not cause failure itself. """
# print('\033[33;1m \033[0m\033[33m %s\033[0m' % str(text))
#
# def __init__(self):
# """ DO NOT USE: use class print methods instead. """
# raise NotImplementedError("use static methods and attributes")
which might include code, classes, or functions. Output only the next line. | Logger.print_positive('launchfile running') |
Given snippet: <|code_start|>#!/usr/bin/env python
class DataProvider(object):
""" Provides an interface for required test case data. """
def __init__(self, bagfile=None):
self._bag = None
if bagfile is not None:
bagpath = ''
# absolute path
if bagfile.startswith('~') or bagfile.startswith('/'):
bagpath = os.path.expanduser(bagfile)
# package-relative path
else:
rp = rospkg.RosPack()
pkgpath = rp.get_path(bagfile.split('/')[0])
bagpath = os.path.join(pkgpath, '/'.join(bagfile.split('/')[1:]))
self._bag = rosbag.Bag(bagpath)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import rospkg
import rosbag
from .logger import Logger
and context:
# Path: flexbe_testing/src/flexbe_testing/logger.py
# class Logger(object):
# """ Bundles static methods for test case logging. """
#
# @classmethod
# def _param_positive(cls):
# return not cls._param_compact() and rospy.get_param('~print_debug_positive', True)
#
# @classmethod
# def _param_negative(cls):
# return cls._param_compact() or rospy.get_param('~print_debug_negative', True)
#
# @classmethod
# def _param_compact(cls):
# return rospy.get_param('~compact_format', False)
#
# @classmethod
# def _prefix(cls):
# return ' >' if cls._param_compact() else '>>>'
#
# @classmethod
# def _counter(cls):
# cls._counter_value += 1
# return cls._counter_value
# _counter_value = 0
#
# @classmethod
# def mute_rospy(cls):
# """ Conditionally mute the rospy logging channels. """
# if cls._param_compact() or rospy.get_param('~mute_info', False):
# rospy.loginfo = rospy.logdebug
# if cls._param_compact() or rospy.get_param('~mute_warn', False):
# rospy.logwarn = rospy.logdebug
# if not cls._param_compact() and rospy.get_param('~mute_error', False):
# rospy.logerr = rospy.logdebug
#
# @classmethod
# def print_positive(cls, text):
# """ Print a positive intermediate result. """
# if cls._param_positive():
# print('\033[0m\033[1m +\033[0m %s' % str(text))
#
# @classmethod
# def print_negative(cls, text):
# """ Print a negative intermediate result. """
# if cls._param_negative():
# print('\033[0m\033[1m -\033[0m %s' % str(text))
#
# @classmethod
# def print_title(cls, test_name, test_class, result=None):
# """ Print the title of the test, should be called once and before any other print method. """
# test_result = ' > %s' % result if result is not None else ''
# print('\033[34;1m#%2d %s \033[0m\033[34m(%s%s)\033[0m' % (
# cls._counter(), test_name, test_class, test_result
# ))
#
# @classmethod
# def print_result(cls, test_name, success):
# """ Print the result, should be called once and after any other print method. """
# test_result = 'completed' if success else 'failed'
# color = '32' if success else '31'
# print('\033[%s;1m%s\033[0m\033[%sm %s %s!\033[0m' % (color, cls._prefix(), color, test_name, test_result))
#
# @classmethod
# def print_failure(cls, text):
# """ Instead of a result, print the failure of a test case once after any other print method. """
# traceback.print_exc()
# print('\033[31;1m%s\033[0m\033[31m %s\033[0m' % (cls._prefix(), str(text)))
#
# @classmethod
# def print_error(cls, text):
# """ Print an internal error that might cause unexpected behavior, but does not cause failure itself. """
# print('\033[33;1m \033[0m\033[33m %s\033[0m' % str(text))
#
# def __init__(self):
# """ DO NOT USE: use class print methods instead. """
# raise NotImplementedError("use static methods and attributes")
which might include code, classes, or functions. Output only the next line. | Logger.print_positive('using data source: %s' % bagpath) |
Predict the next line after this snippet: <|code_start|> for con_msg in msg.containers:
if con_msg.path == path:
container = con_msg
break
transitions = None
if container.transitions is not None:
transitions = {}
for i in range(len(container.transitions)):
transitions[container.outcomes[i]] = container.transitions[i] + '_mirror'
path_frags = path.split('/')
container_name = path_frags[len(path_frags)-1]
if len(container.children) > 0:
sm_outcomes = []
for outcome in container.outcomes:
sm_outcomes.append(outcome + '_mirror')
sm = PreemptableStateMachine(outcomes=sm_outcomes)
with sm:
for child in container.children:
self._add_node(msg, path+'/'+child)
if len(transitions) > 0:
container_transitions = {}
for i in range(len(container.transitions)):
container_transitions[sm_outcomes[i]] = transitions[container.outcomes[i]]
PreemptableStateMachine.add(container_name + '_mirror', sm, transitions=container_transitions)
else:
self._sm = sm
else:
PreemptableStateMachine.add(container_name + '_mirror',
<|code_end|>
using the current file's imports:
import rospy
import threading
import zlib
from flexbe_core.core import PreemptableState, PreemptableStateMachine, LockableStateMachine
from .mirror_state import MirrorState
from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached
from flexbe_msgs.msg import ContainerStructure, BehaviorSync, BEStatus
from std_msgs.msg import Empty, String, Int32, UInt8
and any relevant context from other files:
# Path: flexbe_mirror/src/flexbe_mirror/mirror_state.py
# class MirrorState(EventState):
# '''
# This state will display its possible outcomes as buttons in the GUI
# and is designed in a way to be created dynamically.
# '''
#
# def __init__(self, target_name, target_path, given_outcomes, outcome_autonomy):
# super(MirrorState, self).__init__(outcomes=given_outcomes)
# self.set_rate(100)
# self._target_name = target_name
# self._target_path = target_path
#
# self._outcome_topic = 'flexbe/mirror/outcome'
#
# self._pub = ProxyPublisher()
# self._sub = ProxySubscriberCached({self._outcome_topic: UInt8})
#
# def execute(self, userdata):
# if self._sub.has_buffered(self._outcome_topic):
# msg = self._sub.get_from_buffer(self._outcome_topic)
# if msg.data < len(self.outcomes):
# rospy.loginfo("State update: %s > %s", self._target_name, self.outcomes[msg.data])
# return self.outcomes[msg.data]
# try:
# self.sleep()
# except ROSInterruptException:
# print('Interrupted mirror sleep.')
#
# def on_enter(self, userdata):
# self._pub.publish('flexbe/behavior_update', String("/" + "/".join(self._target_path.split("/")[1:])))
. Output only the next line. | MirrorState(container_name, path, container.outcomes, container.autonomy), |
Using the snippet: <|code_start|>#!/usr/bin/env python
class TestInterface(object):
""" Interface to states and behaviors that are subject to testing. """
def __init__(self, path, classname):
package = __import__(path, fromlist=[path])
clsmembers = inspect.getmembers(package, lambda member: (
inspect.isclass(member) and member.__module__ == package.__name__
))
self._class = next(c for name, c in clsmembers if name == classname)
self._instance = None
<|code_end|>
, determine the next line of code. You have imports:
import inspect
import rospy
from flexbe_core.core import EventState
from .logger import Logger
and context (class names, function names, or code) available:
# Path: flexbe_testing/src/flexbe_testing/logger.py
# class Logger(object):
# """ Bundles static methods for test case logging. """
#
# @classmethod
# def _param_positive(cls):
# return not cls._param_compact() and rospy.get_param('~print_debug_positive', True)
#
# @classmethod
# def _param_negative(cls):
# return cls._param_compact() or rospy.get_param('~print_debug_negative', True)
#
# @classmethod
# def _param_compact(cls):
# return rospy.get_param('~compact_format', False)
#
# @classmethod
# def _prefix(cls):
# return ' >' if cls._param_compact() else '>>>'
#
# @classmethod
# def _counter(cls):
# cls._counter_value += 1
# return cls._counter_value
# _counter_value = 0
#
# @classmethod
# def mute_rospy(cls):
# """ Conditionally mute the rospy logging channels. """
# if cls._param_compact() or rospy.get_param('~mute_info', False):
# rospy.loginfo = rospy.logdebug
# if cls._param_compact() or rospy.get_param('~mute_warn', False):
# rospy.logwarn = rospy.logdebug
# if not cls._param_compact() and rospy.get_param('~mute_error', False):
# rospy.logerr = rospy.logdebug
#
# @classmethod
# def print_positive(cls, text):
# """ Print a positive intermediate result. """
# if cls._param_positive():
# print('\033[0m\033[1m +\033[0m %s' % str(text))
#
# @classmethod
# def print_negative(cls, text):
# """ Print a negative intermediate result. """
# if cls._param_negative():
# print('\033[0m\033[1m -\033[0m %s' % str(text))
#
# @classmethod
# def print_title(cls, test_name, test_class, result=None):
# """ Print the title of the test, should be called once and before any other print method. """
# test_result = ' > %s' % result if result is not None else ''
# print('\033[34;1m#%2d %s \033[0m\033[34m(%s%s)\033[0m' % (
# cls._counter(), test_name, test_class, test_result
# ))
#
# @classmethod
# def print_result(cls, test_name, success):
# """ Print the result, should be called once and after any other print method. """
# test_result = 'completed' if success else 'failed'
# color = '32' if success else '31'
# print('\033[%s;1m%s\033[0m\033[%sm %s %s!\033[0m' % (color, cls._prefix(), color, test_name, test_result))
#
# @classmethod
# def print_failure(cls, text):
# """ Instead of a result, print the failure of a test case once after any other print method. """
# traceback.print_exc()
# print('\033[31;1m%s\033[0m\033[31m %s\033[0m' % (cls._prefix(), str(text)))
#
# @classmethod
# def print_error(cls, text):
# """ Print an internal error that might cause unexpected behavior, but does not cause failure itself. """
# print('\033[33;1m \033[0m\033[33m %s\033[0m' % str(text))
#
# def __init__(self):
# """ DO NOT USE: use class print methods instead. """
# raise NotImplementedError("use static methods and attributes")
. Output only the next line. | Logger.print_positive('%s imported' % self.get_base_name()) |
Given the code snippet: <|code_start|> if request.user.is_authenticated:
return redirect('/lobby/')
super(HomeView, self).dispatch(request, *args, **kwargs)
class CreateUserView(CreateView):
template_name = 'register.html'
form_class = UserCreationForm
success_url = '/lobby/'
def form_valid(self, form):
valid = super(CreateUserView, self).form_valid(form)
username, password = form.cleaned_data.get('username'), form.cleaned_data.get('password1')
new_user = authenticate(username=username, password=password)
login(self.request, new_user)
return valid
class LobbyView(TemplateView):
template_name = 'components/lobby/lobby.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LobbyView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(LobbyView, self).get_context_data(**kwargs)
# get current open games to prepopulate the list
# we're creating a list of games that contains just the id (for the link) and the creator
<|code_end|>
, generate the next line using the imports in this file:
from django.contrib.auth.forms import PasswordResetForm
from django.shortcuts import redirect
from django.views.generic import CreateView, TemplateView, View, FormView
from django.contrib.auth import authenticate, login
from game.forms import *
from game.models import User, Game
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.contrib.auth import get_user
from django.shortcuts import get_object_or_404
import json
and context (functions, classes, or occasionally code) from other files:
# Path: game/models.py
# class Game(models.Model):
# class GameSquare(models.Model):
# class GameLog(models.Model):
# def __unicode__(self):
# def get_available_games():
# def created_count(user):
# def get_games_for_player(user):
# def get_by_id(id):
# def create_new(user):
# def add_log(self, text, user=None):
# def get_all_game_squares(self):
# def get_game_square(row, col):
# def get_square_by_coords(self, coords):
# def get_game_log(self):
# def send_game_update(self):
# def next_player_turn(self):
# def mark_complete(self, winner):
# def __unicode__(self):
# def get_by_id(id):
# def get_surrounding(self):
# def claim(self, status_type, user):
# def __unicode__(self):
# STATUS_TYPES = (
# ('Free', 'Free'),
# ('Selected', 'Selected'),
# ('Surrounding', 'Surrounding')
# )
. Output only the next line. | available_games = [{'creator': game.creator.username, 'id': game.pk} for game in Game.get_available_games()] |
Using the snippet: <|code_start|> try:
d = load_sitespec(hostname)
except IOError:
d = {}
# If `d` is empty, file for `hostname` could not be loaded or was empty;
# in either case, we go on to the next superdomain.
if not d:
super_domain = hostname.partition(".")[2]
if super_domain:
return load_superdomains(super_domain)
else:
return None
return d
def load_sitespec(hostname):
""" Load sitespec file for a given host.
Args:
hostname (str): Hostname for which to load associated spec file.
Raises:
IOError if there is no sitespec file for the exact hostname.
"""
fpath = os.path.join("sitespecs", hostname + ".txt")
with io.open(fpath) as f:
<|code_end|>
, determine the next line of code. You have imports:
import io
import os
import urlparse
import markdown2
import html2text
from flask import Markup
from . import download
from . import instapaper
from . import newspaper
from saulify.sitespec import load_rules
and context (class names, function names, or code) available:
# Path: saulify/sitespec.py
# def load_rules(f):
# """
# Reads scraping rules from Instapaper spec file.
#
# The scraping rules are stored in a dictionary. For simple directives
# like `strip`, the rules are stored in a list using the directive name as
# the key. Exceptions to this storage format are detailed below.
#
# `find_string` / `replace_string` :
# These directives come in pairs; one find regular expression and one
# replace. Stored as a list of 2-tuples under the key `"find_replace"`.
#
# Boolean properties such as `prune`:
# Stored as `True` or `False` according to the last line specifying
# the value.
#
# Args:
# f (file): Spec file object
#
# Returns:
# Dictionary containing scraper rules.
# """
#
# rules = collections.defaultdict(list)
# find_string = None
#
# boolean_props = ["prune"]
#
# for label, content in parse_specfile(f):
#
# if label.startswith("test_"):
# continue
#
# if label == "find_string":
# find_string = content
#
# elif label == "replace_string":
# if not find_string:
# raise Exception("Invalid spec file")
# rules["find_replace"].append((find_string, content))
#
# elif label in boolean_props:
# if content == "yes":
# rules[label] = True
# elif content == "no":
# rules[label] = False
# else:
# raise Exception("Invalid spec file")
#
# else:
# rules[label].append(content)
#
# return rules
. Output only the next line. | return load_rules(f) |
Predict the next line for this snippet: <|code_start|>
result_colors = {
"PASS": colorama.Fore.GREEN,
"FAIL": colorama.Fore.RED,
"EXCEPTION": colorama.Fore.RED
}
print(result_colors[result] + "{0} : {1}".format(result, report["url"]))
if report["status"] == "EXCEPTION":
print(report["message"])
elif test_passed(report):
r = report["result"]
stats = ", ".join(["{0} {1}".format(len(r[c]["found"]), c) for c in r])
print("Found " + stats)
else:
for category, result in report["result"].items():
if result["missing"]:
count = len(result["missing"])
print("Missing {0} {1}:".format(count, category))
for item in result["missing"]:
print(item)
if __name__ == "__main__":
for fname in os.listdir(SPEC_DIRECTORY):
fpath = os.path.join(SPEC_DIRECTORY, fname)
with io.open(fpath, encoding="utf-8") as f:
<|code_end|>
with the help of current file imports:
import io
import os
import json
import argparse
import colorama
from saulify import sitespec
from saulify.testcase import TestCase
and context from other files:
# Path: saulify/sitespec.py
# def parse_specfile(f):
# def load_testcases(f):
# def load_rules(f):
#
# Path: saulify/testcase.py
# class TestCase(object):
# """
# Test case for the article scraper.
#
# Attributes:
# url (str): URL of the page being tested
# """
#
# def __init__(self, spec):
# """ Create a new TestCase object.
#
# Args:
# spec (defaultdict of list): Dictionary containing test directives
# as returned by `saulify.sitespec.load_testcases`. Must contain a
# `"test_url"` key.
# """
# self.url = spec["test_url"]
# self._spec = spec
#
# def run(self):
# try:
# output = clean_url(self.url)
# except Exception as e:
# return {
# "url": self.url,
# "status": "EXCEPTION",
# "message": e.message
# }
# else:
# norm_space = re.sub(r'\s+', ' ', output["markdown"])
# return {
# "url": self.url,
# "status": "OK",
# "result": {
# "fragments": self.check_fragments(norm_space),
# "images": self.check_images(output["html"]),
# }
# }
#
# def check_fragments(self, text):
# result = {"missing": [], "found": []}
# for s in self._spec["test_contains"]:
# if s in text:
# result["found"].append(s)
# else:
# result["missing"].append(s)
# return result
#
# def check_images(self, html):
# etree = lxml.html.fromstring(html)
# img_rel_urls = etree.xpath("//img/@src")
# img_abs_urls = [urlparse.urljoin(self.url, u) for u in img_rel_urls]
# result = {"missing": [], "found": []}
# for url in self._spec["test_contains_images"]:
# abs_url = urlparse.urljoin(self.url, url)
# if abs_url in img_abs_urls:
# result["found"].append(url)
# else:
# result["missing"].append(url)
# return result
, which may contain function names, class names, or code. Output only the next line. | test_specs = sitespec.load_testcases(f) |
Here is a snippet: <|code_start|> "PASS": colorama.Fore.GREEN,
"FAIL": colorama.Fore.RED,
"EXCEPTION": colorama.Fore.RED
}
print(result_colors[result] + "{0} : {1}".format(result, report["url"]))
if report["status"] == "EXCEPTION":
print(report["message"])
elif test_passed(report):
r = report["result"]
stats = ", ".join(["{0} {1}".format(len(r[c]["found"]), c) for c in r])
print("Found " + stats)
else:
for category, result in report["result"].items():
if result["missing"]:
count = len(result["missing"])
print("Missing {0} {1}:".format(count, category))
for item in result["missing"]:
print(item)
if __name__ == "__main__":
for fname in os.listdir(SPEC_DIRECTORY):
fpath = os.path.join(SPEC_DIRECTORY, fname)
with io.open(fpath, encoding="utf-8") as f:
test_specs = sitespec.load_testcases(f)
for test_spec in test_specs:
<|code_end|>
. Write the next line using the current file imports:
import io
import os
import json
import argparse
import colorama
from saulify import sitespec
from saulify.testcase import TestCase
and context from other files:
# Path: saulify/sitespec.py
# def parse_specfile(f):
# def load_testcases(f):
# def load_rules(f):
#
# Path: saulify/testcase.py
# class TestCase(object):
# """
# Test case for the article scraper.
#
# Attributes:
# url (str): URL of the page being tested
# """
#
# def __init__(self, spec):
# """ Create a new TestCase object.
#
# Args:
# spec (defaultdict of list): Dictionary containing test directives
# as returned by `saulify.sitespec.load_testcases`. Must contain a
# `"test_url"` key.
# """
# self.url = spec["test_url"]
# self._spec = spec
#
# def run(self):
# try:
# output = clean_url(self.url)
# except Exception as e:
# return {
# "url": self.url,
# "status": "EXCEPTION",
# "message": e.message
# }
# else:
# norm_space = re.sub(r'\s+', ' ', output["markdown"])
# return {
# "url": self.url,
# "status": "OK",
# "result": {
# "fragments": self.check_fragments(norm_space),
# "images": self.check_images(output["html"]),
# }
# }
#
# def check_fragments(self, text):
# result = {"missing": [], "found": []}
# for s in self._spec["test_contains"]:
# if s in text:
# result["found"].append(s)
# else:
# result["missing"].append(s)
# return result
#
# def check_images(self, html):
# etree = lxml.html.fromstring(html)
# img_rel_urls = etree.xpath("//img/@src")
# img_abs_urls = [urlparse.urljoin(self.url, u) for u in img_rel_urls]
# result = {"missing": [], "found": []}
# for url in self._spec["test_contains_images"]:
# abs_url = urlparse.urljoin(self.url, url)
# if abs_url in img_abs_urls:
# result["found"].append(url)
# else:
# result["missing"].append(url)
# return result
, which may include functions, classes, or code. Output only the next line. | test_case = TestCase(test_spec) |
Here is a snippet: <|code_start|>__all__ = ["TestCase"]
class TestCase(object):
"""
Test case for the article scraper.
Attributes:
url (str): URL of the page being tested
"""
def __init__(self, spec):
""" Create a new TestCase object.
Args:
spec (defaultdict of list): Dictionary containing test directives
as returned by `saulify.sitespec.load_testcases`. Must contain a
`"test_url"` key.
"""
self.url = spec["test_url"]
self._spec = spec
def run(self):
try:
<|code_end|>
. Write the next line using the current file imports:
import re
import urlparse
import lxml.html
from saulify.scrapers.cascade import clean_url
and context from other files:
# Path: saulify/scrapers/cascade.py
# def clean_url(url):
# """ Extract article from given url using `scraper_cascade`
#
# Args:
# url (str): Url of article to be scraped.
#
# Returns:
# Dictionary detailing the extracted article.
# """
#
# content = download.download_url(url)
# result = scraper_cascade(url, content)
#
# return result
, which may include functions, classes, or code. Output only the next line. | output = clean_url(self.url) |
Based on the snippet: <|code_start|>
def test_pickle_dumps():
data = {"hello": "world", "test": 123}
expected = [
b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00}\x94(\x8c\x05hello\x94\x8c\x05world\x94\x8c\x04test\x94K{u.",
b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00}\x94(\x8c\x04test\x94K{\x8c\x05hello\x94\x8c\x05world\x94u.",
b"\x80\x02}q\x00(X\x04\x00\x00\x00testq\x01K{X\x05\x00\x00\x00helloq\x02X\x05\x00\x00\x00worldq\x03u.",
b"\x80\x05\x95\x1e\x00\x00\x00\x00\x00\x00\x00}\x94(\x8c\x05hello\x94\x8c\x05world\x94\x8c\x04test\x94K{u.",
]
<|code_end|>
, predict the immediate next line with the help of imports:
from .._pickle_api import pickle_dumps, pickle_loads
and context (classes, functions, sometimes code) from other files:
# Path: srsly/_pickle_api.py
# def pickle_dumps(data: JSONInput, protocol: Optional[int] = None) -> bytes:
# """Serialize a Python object with pickle.
#
# data: The object to serialize.
# protocol (int): Protocol to use. -1 for highest.
# RETURNS (bytes): The serialized object.
# """
# return cloudpickle.dumps(data, protocol=protocol)
#
# def pickle_loads(data: bytes) -> JSONOutput:
# """Deserialize bytes with pickle.
#
# data (bytes): The data to deserialize.
# RETURNS: The deserialized Python object.
# """
# return cloudpickle.loads(data)
. Output only the next line. | msg = pickle_dumps(data) |
Here is a snippet: <|code_start|>
def test_pickle_dumps():
data = {"hello": "world", "test": 123}
expected = [
b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00}\x94(\x8c\x05hello\x94\x8c\x05world\x94\x8c\x04test\x94K{u.",
b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00}\x94(\x8c\x04test\x94K{\x8c\x05hello\x94\x8c\x05world\x94u.",
b"\x80\x02}q\x00(X\x04\x00\x00\x00testq\x01K{X\x05\x00\x00\x00helloq\x02X\x05\x00\x00\x00worldq\x03u.",
b"\x80\x05\x95\x1e\x00\x00\x00\x00\x00\x00\x00}\x94(\x8c\x05hello\x94\x8c\x05world\x94\x8c\x04test\x94K{u.",
]
msg = pickle_dumps(data)
assert msg in expected
def test_pickle_loads():
msg = pickle_dumps({"hello": "world", "test": 123})
<|code_end|>
. Write the next line using the current file imports:
from .._pickle_api import pickle_dumps, pickle_loads
and context from other files:
# Path: srsly/_pickle_api.py
# def pickle_dumps(data: JSONInput, protocol: Optional[int] = None) -> bytes:
# """Serialize a Python object with pickle.
#
# data: The object to serialize.
# protocol (int): Protocol to use. -1 for highest.
# RETURNS (bytes): The serialized object.
# """
# return cloudpickle.dumps(data, protocol=protocol)
#
# def pickle_loads(data: bytes) -> JSONOutput:
# """Deserialize bytes with pickle.
#
# data (bytes): The data to deserialize.
# RETURNS: The deserialized Python object.
# """
# return cloudpickle.loads(data)
, which may include functions, classes, or code. Output only the next line. | data = pickle_loads(msg) |
Next line prediction: <|code_start|>
def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.comment = comment
self.anchor = anchor
def __repr__(self):
# type: () -> str
value = self.value
# if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
# else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
def dump(self, indent=0):
# type: (int) -> None
<|code_end|>
. Use current file imports:
(import sys
from .compat import string_types
from typing import Dict, Any, Text # NOQA)
and context including class names, function names, or small code snippets from other files:
# Path: srsly/ruamel_yaml/compat.py
# _DEFAULT_YAML_VERSION = (1, 2)
# PY2 = sys.version_info[0] == 2
# PY3 = sys.version_info[0] == 3
# MAXSIZE = sys.maxsize
# UNICODE_SIZE = 4 if sys.maxunicode > 65535 else 2
# DBG_TOKEN = 1
# DBG_EVENT = 2
# DBG_NODE = 4
# class ordereddict(OrderedDict): # type: ignore
# class ObjectCounter(object):
# class Nprint(object):
# class MutableSliceableSequence(MutableSequence): # type: ignore
# def insert(self, pos, key, value):
# def utf8(s):
# def to_str(s):
# def to_unicode(s):
# def utf8(s):
# def to_str(s):
# def to_unicode(s):
# def with_metaclass(meta, *bases):
# def __init__(self):
# def __call__(self, k):
# def dump(self):
# def dbg(val=None):
# def __init__(self, file_name=None):
# def __call__(self, *args, **kw):
# def set_max_print(self, i):
# def check_namespace_char(ch):
# def check_anchorname_char(ch):
# def version_tnf(t1, t2=None):
# def __getitem__(self, index):
# def __setitem__(self, index, value):
# def __delitem__(self, index):
# def __getsingleitem__(self, index):
# def __setsingleitem__(self, index, value):
# def __delsingleitem__(self, index):
. Output only the next line. | if isinstance(self.value, string_types): |
Next line prediction: <|code_start|>
class Monster(srsly.ruamel_yaml.YAMLObject):
yaml_tag = u'!Monster'
def __init__(self, name, hp, ac, attacks):
self.name = name
self.hp = hp
self.ac = ac
self.attacks = attacks
def __repr__(self):
return "%s(name=%r, hp=%r, ac=%r, attacks=%r)" % (
self.__class__.__name__, self.name, self.hp, self.ac, self.attacks)
data = srsly.ruamel_yaml.load(dedent("""\\
--- !Monster
name: Cave spider
hp: [2,6] # 2d6
ac: 16
attacks: [BITE, HURT]
"""), Loader=srsly.ruamel_yaml.Loader)
# normal dump, keys will be sorted
assert srsly.ruamel_yaml.dump(data) == dedent("""\\
!Monster
ac: 16
attacks: [BITE, HURT]
hp: [2, 6]
name: Cave spider
""")
'''
<|code_end|>
. Use current file imports:
(import sys
import pytest # NOQA
import srsly.ruamel_yaml.comments
from .roundtrip import save_and_run # NOQA
from srsly.ruamel_yaml import YAML
from srsly.ruamel_yaml.compat import StringIO)
and context including class names, function names, or small code snippets from other files:
# Path: srsly/tests/ruamel_yaml/roundtrip.py
# def save_and_run(program, base_dir=None, output=None, file_name=None, optimized=False):
# """
# safe and run a python program, thereby circumventing any restrictions on module level
# imports
# """
# from subprocess import check_output, STDOUT, CalledProcessError
#
# if not hasattr(base_dir, "hash"):
# base_dir = Path(str(base_dir))
# if file_name is None:
# file_name = "safe_and_run_tmp.py"
# file_name = base_dir / file_name
# file_name.write_text(dedent(program))
#
# try:
# cmd = [sys.executable]
# if optimized:
# cmd.append("-O")
# cmd.append(str(file_name))
# print("running:", *cmd)
# res = check_output(cmd, stderr=STDOUT, universal_newlines=True)
# if output is not None:
# if "__pypy__" in sys.builtin_module_names:
# res = res.splitlines(True)
# res = [line for line in res if "no version info" not in line]
# res = "".join(res)
# print("result: ", res, end="")
# print("expected:", output, end="")
# assert res == output
# except CalledProcessError as exception:
# print("##### Running '{} {}' FAILED #####".format(sys.executable, file_name))
# print(exception.output)
# return exception.returncode
# return 0
. Output only the next line. | assert save_and_run(program_src, tmpdir) == 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.