Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Here is a snippet: <|code_start|> print '""".split()'
max_score = float(length) / max(lengths)
print 'for index, name in enumerate(POPULAR_%sES[%d]):' % (
upper, length)
print ' %s_SCORES[name] = (%d - index) / %.1f' % (
upper, len(names), len(names) / max_score)
def score_function(variable, slice):
func = """
def prefix_score(name):
best_score = 0.0
best_prefix = ''
for length in range(2, min(6, len(name)) + 1):
prefix = name[slice]
score = PREFIX_SCORES.get(prefix, None)
if score > best_score:
best_score = score
best_prefix = prefix
return best_score, best_prefix
""".strip()
func = func.replace('PREFIX', variable)
func = func.replace('prefix', variable.lower())
func = func.replace('[slice]', slice)
print '\n\n' + func
def main():
all_lengths(Prefix, LENGTHS)
print
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
from common.appenginepatch.aecmd import setup_env
from prefixes.models import Prefix, Suffix
and context from other files:
# Path: prefixes/models.py
# class Prefix(db.Expando):
# """
# Count all domains in the datastore that start with the same
# prefix. The key name is the prefix, with a leading dot and
# optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Prefix.name_cut('123456789')
# '123456789'
# >>> Prefix.name_cut('123456789', 3)
# '123'
# """
# if length is None:
# return name
# else:
# return name[:length]
#
# class Suffix(db.Expando):
# """
# Count all domains in the datastore that end with the same suffix.
# The key name is the suffix backwards, with a leading dot
# and optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Suffix.name_cut('123456789')
# '987654321'
# >>> Suffix.name_cut('123456789', 3)
# '987'
# """
# if length is None:
# return name[::-1]
# else:
# return name[::-1][:length]
, which may include functions, classes, or code. Output only the next line. | all_lengths(Suffix, LENGTHS) |
Based on the snippet: <|code_start|>
def get_count(name):
"""
Retrieve the value for a given sharded counter.
"""
total = memcache.get(name)
if total is None:
total = 0
<|code_end|>
, predict the immediate next line with the help of imports:
import random
from google.appengine.ext import db
from google.appengine.api import memcache
from counters.models import Shard, Config
and context (classes, functions, sometimes code) from other files:
# Path: counters/models.py
# class Shard(db.Model):
# name = db.StringProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
#
# class Config(db.Model):
# name = db.StringProperty(required=True)
# num_shards = db.IntegerProperty(required=True, default=20)
. Output only the next line. | for counter in Shard.all().filter('name = ', name): |
Here is a snippet: <|code_start|>
def get_count(name):
"""
Retrieve the value for a given sharded counter.
"""
total = memcache.get(name)
if total is None:
total = 0
for counter in Shard.all().filter('name = ', name):
total += counter.count
memcache.add(name, str(total), 60)
return total
def increment(name, delta=1):
"""
Increment the value for a given sharded counter.
"""
<|code_end|>
. Write the next line using the current file imports:
import random
from google.appengine.ext import db
from google.appengine.api import memcache
from counters.models import Shard, Config
and context from other files:
# Path: counters/models.py
# class Shard(db.Model):
# name = db.StringProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
#
# class Config(db.Model):
# name = db.StringProperty(required=True)
# num_shards = db.IntegerProperty(required=True, default=20)
, which may include functions, classes, or code. Output only the next line. | config = Config.get_or_insert(name, name=name) |
Predict the next line after this snippet: <|code_start|>
class FeedbackAdmin(admin.ModelAdmin):
list_display = ('message', 'page', 'points', 'submitter', 'submitted')
search_fields = ('message', 'page')
<|code_end|>
using the current file's imports:
from django.contrib import admin
from feedback.models import Feedback
and any relevant context from other files:
# Path: feedback/models.py
# class Feedback(db.Model):
# page = db.StringProperty(required=True)
# message = db.StringProperty(required=True)
# points = db.IntegerProperty(default=1)
# ip = db.StringProperty()
# submitter = db.ReferenceProperty(User)
# submitted = db.DateTimeProperty(auto_now_add=True)
#
# def __unicode__(self):
# return self.message[:50]
. Output only the next line. | admin.site.register(Feedback, FeedbackAdmin) |
Based on the snippet: <|code_start|>
def statistics(path):
results = {'path': path}
missing = []
seconds1 = []
seconds2 = []
total = errors = failures = 0
<|code_end|>
, predict the immediate next line with the help of imports:
from datetime import datetime, timedelta
from ragendja.template import render_to_response
from ragendja.dbutils import get_object_or_404
from tests.models import Comparison
and context (classes, functions, sometimes code) from other files:
# Path: tests/models.py
# class Comparison(db.Model):
# message = db.StringProperty(default='')
# timestamp = db.DateTimeProperty(required=True)
# path = db.StringProperty(required=True)
# params = db.StringProperty(default='')
# # Query 1 and results.
# gql1 = db.StringProperty()
# seconds1 = db.FloatProperty()
# result1 = db.TextProperty()
# trunc1 = db.TextProperty()
# missing1 = db.IntegerProperty()
# # Query 2 and results.
# gql2 = db.StringProperty()
# seconds2 = db.FloatProperty()
# result2 = db.TextProperty()
# trunc2 = db.TextProperty()
# missing2 = db.IntegerProperty()
#
# def fetch1(self, gql, *args, **kwargs):
# self.messages = []
# limit = kwargs.get('limit', 100)
# start_time = time.time()
# self.keys1 = GqlQuery(gql, *args).fetch(limit)
# self.seconds1 = time.time() - start_time
# self.names1 = [key.name() for key in self.keys1]
# self.gql1 = '%s LIMIT %d' % (replace_args(gql, *args), limit)
# self.result1 = ' '.join(self.names1)
#
# def fetch2(self, gql, *args, **kwargs):
# limit = kwargs.get('limit', 100)
# start_time = time.time()
# self.keys2 = GqlQuery(gql, *args).fetch(limit)
# self.seconds2 = time.time() - start_time
# self.names2 = [key.name() for key in self.keys2]
# self.gql2 = '%s LIMIT %d' % (replace_args(gql, *args), limit)
# self.result2 = ' '.join(self.names2)
#
# def check_sort_order(self):
# sorted1 = sorted(self.names1, reverse='DESC' in self.gql1)
# if sorted1 != self.names1:
# self.messages.append(
# "the first query returned incorrect sort order")
# sorted2 = sorted(self.names2, reverse='DESC' in self.gql2)
# if sorted2 != self.names2:
# self.messages.append(
# "the second query returned incorrect sort order")
#
# def truncate_front_back(self):
# trunc1 = self.names1[:]
# trunc2 = self.names2[:]
# if trunc1 and trunc2 and trunc2[-1] < trunc1[0]:
# while trunc1 and trunc2 and trunc2[-1] < trunc1[0]:
# del trunc2[-1]
# elif trunc1 and trunc2 and trunc1[0] < trunc2[-1]:
# while trunc1 and trunc2 and trunc1[0] < trunc2[-1]:
# del trunc1[0]
# self.set1 = set(trunc1)
# self.set2 = set(trunc2)
# self.trunc1 = ' '.join(trunc1)
# self.trunc2 = ' '.join(trunc2)
# self.colored1 = color_names(self.names1, self.set1, self.set2)
# self.colored2 = color_names(self.names2, self.set2, self.set1)
#
# def count_missing_items(self):
# self.missing1 = sum([int(name not in self.set1) for name in self.set2])
# self.missing2 = sum([int(name not in self.set2) for name in self.set1])
# if self.missing1:
# self.messages.append(
# "the first query missed %d items" % self.missing1)
# if self.missing2:
# self.messages.append(
# "the second query missed %d items" % self.missing2)
#
# def update_and_put(self):
# self.message = ' and '.join(self.messages)
# self.timestamp = datetime.now()
# self.put()
. Output only the next line. | query = Comparison.all().filter('path', path) |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
setup_env()
LETTERS = 'abcdefghijklmnopqrstuvwxyz'
for c1 in LETTERS:
for c2 in LETTERS:
for c3 in LETTERS:
for c4 in LETTERS:
for c5 in LETTERS:
word = ''.join((c1, c2, c3, c4, c5))
<|code_end|>
. Use current file imports:
import os
import sys
from common.appenginepatch.aecmd import setup_env
from languages import english
from languages.utils import word_score
and context (classes, functions, or code) from other files:
# Path: languages/english.py
# TRIPLE_SCORES = {}
#
# Path: languages/utils.py
# def word_score(word, triple_scores):
# triples = list(word_triples(word))
# result = 0.0
# for triple in triples:
# result += triple_scores.get(triple, 0.0)
# return result / len(triples)
. Output only the next line. | score = word_score(word, english.TRIPLE_SCORES) |
Next line prediction: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
setup_env()
LETTERS = 'abcdefghijklmnopqrstuvwxyz'
for c1 in LETTERS:
for c2 in LETTERS:
for c3 in LETTERS:
for c4 in LETTERS:
for c5 in LETTERS:
word = ''.join((c1, c2, c3, c4, c5))
<|code_end|>
. Use current file imports:
(import os
import sys
from common.appenginepatch.aecmd import setup_env
from languages import english
from languages.utils import word_score)
and context including class names, function names, or small code snippets from other files:
# Path: languages/english.py
# TRIPLE_SCORES = {}
#
# Path: languages/utils.py
# def word_score(word, triple_scores):
# triples = list(word_triples(word))
# result = 0.0
# for triple in triples:
# result += triple_scores.get(triple, 0.0)
# return result / len(triples)
. Output only the next line. | score = word_score(word, english.TRIPLE_SCORES) |
Here is a snippet: <|code_start|>def fetch_file(Model, length):
kind = Model.kind()
filename = '.data/popular/%ses.%d.txt' % (kind.split('_')[-1], length)
outfile = open(filename, 'w')
start = db.Key.from_path(kind, '-')
while True:
query = Model.all().filter('length', length).order('__key__')
query.filter('__key__ >', start)
prefixes = retry(query.fetch, 1000)
for prefix in prefixes:
name = prefix.key().name()
if name.startswith('.'):
continue
if kind == 'prefixes_suffix':
name = name[::-1]
outfile.write('%d %s\n' % (prefix.com, name))
if len(prefixes) < 1000:
break
start = prefixes[-1].key()
outfile.close()
def auth_func():
return open('.passwd').read().split(':')
if __name__ == '__main__':
remote_api_stub.ConfigureRemoteDatastore(
'scoretool', '/remote_api_hidden', auth_func, 'www.nxdom.com')
for length in range(2, 7):
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
from common.appenginepatch.aecmd import setup_env
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from prefixes.models import Prefix, Suffix
from tools.retry import retry
and context from other files:
# Path: prefixes/models.py
# class Prefix(db.Expando):
# """
# Count all domains in the datastore that start with the same
# prefix. The key name is the prefix, with a leading dot and
# optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Prefix.name_cut('123456789')
# '123456789'
# >>> Prefix.name_cut('123456789', 3)
# '123'
# """
# if length is None:
# return name
# else:
# return name[:length]
#
# class Suffix(db.Expando):
# """
# Count all domains in the datastore that end with the same suffix.
# The key name is the suffix backwards, with a leading dot
# and optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Suffix.name_cut('123456789')
# '987654321'
# >>> Suffix.name_cut('123456789', 3)
# '987'
# """
# if length is None:
# return name[::-1]
# else:
# return name[::-1][:length]
#
# Path: tools/retry.py
# def retry(func, *args, **kwargs):
# for attempt in range(MAX_ATTEMPTS):
# if attempt:
# seconds = min(300, 2 ** attempt)
# logging.warning(
# "Attempt %d of %d will start in %d seconds." % (
# attempt + 1, MAX_ATTEMPTS, seconds))
# time.sleep(seconds)
# try:
# return func(*args, **kwargs)
# except (datastore_errors.Timeout, apiproxy_errors.Error,
# urllib2.URLError, socket.error), error:
# logging.error(type(error))
# logging.error(str(error))
# if attempt + 1 >= MAX_ATTEMPTS:
# raise
, which may include functions, classes, or code. Output only the next line. | fetch_file(Prefix, length) |
Based on the snippet: <|code_start|> kind = Model.kind()
filename = '.data/popular/%ses.%d.txt' % (kind.split('_')[-1], length)
outfile = open(filename, 'w')
start = db.Key.from_path(kind, '-')
while True:
query = Model.all().filter('length', length).order('__key__')
query.filter('__key__ >', start)
prefixes = retry(query.fetch, 1000)
for prefix in prefixes:
name = prefix.key().name()
if name.startswith('.'):
continue
if kind == 'prefixes_suffix':
name = name[::-1]
outfile.write('%d %s\n' % (prefix.com, name))
if len(prefixes) < 1000:
break
start = prefixes[-1].key()
outfile.close()
def auth_func():
return open('.passwd').read().split(':')
if __name__ == '__main__':
remote_api_stub.ConfigureRemoteDatastore(
'scoretool', '/remote_api_hidden', auth_func, 'www.nxdom.com')
for length in range(2, 7):
fetch_file(Prefix, length)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
from common.appenginepatch.aecmd import setup_env
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from prefixes.models import Prefix, Suffix
from tools.retry import retry
and context (classes, functions, sometimes code) from other files:
# Path: prefixes/models.py
# class Prefix(db.Expando):
# """
# Count all domains in the datastore that start with the same
# prefix. The key name is the prefix, with a leading dot and
# optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Prefix.name_cut('123456789')
# '123456789'
# >>> Prefix.name_cut('123456789', 3)
# '123'
# """
# if length is None:
# return name
# else:
# return name[:length]
#
# class Suffix(db.Expando):
# """
# Count all domains in the datastore that end with the same suffix.
# The key name is the suffix backwards, with a leading dot
# and optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Suffix.name_cut('123456789')
# '987654321'
# >>> Suffix.name_cut('123456789', 3)
# '987'
# """
# if length is None:
# return name[::-1]
# else:
# return name[::-1][:length]
#
# Path: tools/retry.py
# def retry(func, *args, **kwargs):
# for attempt in range(MAX_ATTEMPTS):
# if attempt:
# seconds = min(300, 2 ** attempt)
# logging.warning(
# "Attempt %d of %d will start in %d seconds." % (
# attempt + 1, MAX_ATTEMPTS, seconds))
# time.sleep(seconds)
# try:
# return func(*args, **kwargs)
# except (datastore_errors.Timeout, apiproxy_errors.Error,
# urllib2.URLError, socket.error), error:
# logging.error(type(error))
# logging.error(str(error))
# if attempt + 1 >= MAX_ATTEMPTS:
# raise
. Output only the next line. | fetch_file(Suffix, length) |
Given the code snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
setup_env()
def fetch_file(Model, length):
kind = Model.kind()
filename = '.data/popular/%ses.%d.txt' % (kind.split('_')[-1], length)
outfile = open(filename, 'w')
start = db.Key.from_path(kind, '-')
while True:
query = Model.all().filter('length', length).order('__key__')
query.filter('__key__ >', start)
<|code_end|>
, generate the next line using the imports in this file:
import os
import sys
from common.appenginepatch.aecmd import setup_env
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from prefixes.models import Prefix, Suffix
from tools.retry import retry
and context (functions, classes, or occasionally code) from other files:
# Path: prefixes/models.py
# class Prefix(db.Expando):
# """
# Count all domains in the datastore that start with the same
# prefix. The key name is the prefix, with a leading dot and
# optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Prefix.name_cut('123456789')
# '123456789'
# >>> Prefix.name_cut('123456789', 3)
# '123'
# """
# if length is None:
# return name
# else:
# return name[:length]
#
# class Suffix(db.Expando):
# """
# Count all domains in the datastore that end with the same suffix.
# The key name is the suffix backwards, with a leading dot
# and optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Suffix.name_cut('123456789')
# '987654321'
# >>> Suffix.name_cut('123456789', 3)
# '987'
# """
# if length is None:
# return name[::-1]
# else:
# return name[::-1][:length]
#
# Path: tools/retry.py
# def retry(func, *args, **kwargs):
# for attempt in range(MAX_ATTEMPTS):
# if attempt:
# seconds = min(300, 2 ** attempt)
# logging.warning(
# "Attempt %d of %d will start in %d seconds." % (
# attempt + 1, MAX_ATTEMPTS, seconds))
# time.sleep(seconds)
# try:
# return func(*args, **kwargs)
# except (datastore_errors.Timeout, apiproxy_errors.Error,
# urllib2.URLError, socket.error), error:
# logging.error(type(error))
# logging.error(str(error))
# if attempt + 1 >= MAX_ATTEMPTS:
# raise
. Output only the next line. | prefixes = retry(query.fetch, 1000) |
Predict the next line after this snippet: <|code_start|>
register = template.Library()
@register.simple_tag
def feedback_form(request):
page = request.META['PATH_INFO']
feedback_form = FeedbackForm(initial={'page': page})
return render_to_string('feedback/form.html', locals())
def render_query(request, query, limit=10):
feedback_list = []
for feedback in query.fetch(limit):
try:
submitter = feedback.submitter # Attempt to dereference.
feedback_list.append(feedback)
except datastore_errors.Error:
pass # Ignore feedback if the submitter doesn't exist.
already_voted = get_already_voted(request)
return render_to_string('feedback/messages.html', locals())
@register.simple_tag
def feedback_recently(request, page=None):
if page is None:
page = request.META['PATH_INFO']
<|code_end|>
using the current file's imports:
from django import template
from django.template.loader import render_to_string
from google.appengine.api import datastore_errors
from feedback.models import Feedback, Vote
from feedback.forms import FeedbackForm
from feedback.views import get_already_voted
and any relevant context from other files:
# Path: feedback/models.py
# class Feedback(db.Model):
# page = db.StringProperty(required=True)
# message = db.StringProperty(required=True)
# points = db.IntegerProperty(default=1)
# ip = db.StringProperty()
# submitter = db.ReferenceProperty(User)
# submitted = db.DateTimeProperty(auto_now_add=True)
#
# def __unicode__(self):
# return self.message[:50]
#
# class Vote(db.Model):
# feedback = db.ReferenceProperty(Feedback, required=True)
# ip = db.StringProperty(required=True)
#
# def feedback_id(self):
# return Vote.feedback.get_value_for_datastore(self).id()
#
# Path: feedback/forms.py
# class FeedbackForm(forms.Form):
# """
# Simple form for user feedback.
# """
# message = forms.CharField(max_length=400, required=True,
# label="How can we improve this page?",
# widget=forms.TextInput(attrs={'class': 'text span-6'}))
# page = forms.CharField(max_length=400, required=True,
# widget=forms.HiddenInput)
#
# Path: feedback/views.py
# def get_already_voted(request):
# """
# Don't show vote buttons if posted or voted from the same IP.
# """
# ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
# posted = [feedback.id()
# for feedback in Feedback.all(keys_only=True).filter('ip', ip)]
# voted = [vote.feedback_id()
# for vote in Vote.all().filter('ip', ip)]
# # logging.debug('posted=%s voted=%s' % (posted, voted))
# return set(posted + voted)
. Output only the next line. | query = Feedback.all() |
Predict the next line for this snippet: <|code_start|>
register = template.Library()
@register.simple_tag
def feedback_form(request):
page = request.META['PATH_INFO']
<|code_end|>
with the help of current file imports:
from django import template
from django.template.loader import render_to_string
from google.appengine.api import datastore_errors
from feedback.models import Feedback, Vote
from feedback.forms import FeedbackForm
from feedback.views import get_already_voted
and context from other files:
# Path: feedback/models.py
# class Feedback(db.Model):
# page = db.StringProperty(required=True)
# message = db.StringProperty(required=True)
# points = db.IntegerProperty(default=1)
# ip = db.StringProperty()
# submitter = db.ReferenceProperty(User)
# submitted = db.DateTimeProperty(auto_now_add=True)
#
# def __unicode__(self):
# return self.message[:50]
#
# class Vote(db.Model):
# feedback = db.ReferenceProperty(Feedback, required=True)
# ip = db.StringProperty(required=True)
#
# def feedback_id(self):
# return Vote.feedback.get_value_for_datastore(self).id()
#
# Path: feedback/forms.py
# class FeedbackForm(forms.Form):
# """
# Simple form for user feedback.
# """
# message = forms.CharField(max_length=400, required=True,
# label="How can we improve this page?",
# widget=forms.TextInput(attrs={'class': 'text span-6'}))
# page = forms.CharField(max_length=400, required=True,
# widget=forms.HiddenInput)
#
# Path: feedback/views.py
# def get_already_voted(request):
# """
# Don't show vote buttons if posted or voted from the same IP.
# """
# ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
# posted = [feedback.id()
# for feedback in Feedback.all(keys_only=True).filter('ip', ip)]
# voted = [vote.feedback_id()
# for vote in Vote.all().filter('ip', ip)]
# # logging.debug('posted=%s voted=%s' % (posted, voted))
# return set(posted + voted)
, which may contain function names, class names, or code. Output only the next line. | feedback_form = FeedbackForm(initial={'page': page}) |
Using the snippet: <|code_start|>
register = template.Library()
@register.simple_tag
def feedback_form(request):
page = request.META['PATH_INFO']
feedback_form = FeedbackForm(initial={'page': page})
return render_to_string('feedback/form.html', locals())
def render_query(request, query, limit=10):
feedback_list = []
for feedback in query.fetch(limit):
try:
submitter = feedback.submitter # Attempt to dereference.
feedback_list.append(feedback)
except datastore_errors.Error:
pass # Ignore feedback if the submitter doesn't exist.
<|code_end|>
, determine the next line of code. You have imports:
from django import template
from django.template.loader import render_to_string
from google.appengine.api import datastore_errors
from feedback.models import Feedback, Vote
from feedback.forms import FeedbackForm
from feedback.views import get_already_voted
and context (class names, function names, or code) available:
# Path: feedback/models.py
# class Feedback(db.Model):
# page = db.StringProperty(required=True)
# message = db.StringProperty(required=True)
# points = db.IntegerProperty(default=1)
# ip = db.StringProperty()
# submitter = db.ReferenceProperty(User)
# submitted = db.DateTimeProperty(auto_now_add=True)
#
# def __unicode__(self):
# return self.message[:50]
#
# class Vote(db.Model):
# feedback = db.ReferenceProperty(Feedback, required=True)
# ip = db.StringProperty(required=True)
#
# def feedback_id(self):
# return Vote.feedback.get_value_for_datastore(self).id()
#
# Path: feedback/forms.py
# class FeedbackForm(forms.Form):
# """
# Simple form for user feedback.
# """
# message = forms.CharField(max_length=400, required=True,
# label="How can we improve this page?",
# widget=forms.TextInput(attrs={'class': 'text span-6'}))
# page = forms.CharField(max_length=400, required=True,
# widget=forms.HiddenInput)
#
# Path: feedback/views.py
# def get_already_voted(request):
# """
# Don't show vote buttons if posted or voted from the same IP.
# """
# ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
# posted = [feedback.id()
# for feedback in Feedback.all(keys_only=True).filter('ip', ip)]
# voted = [vote.feedback_id()
# for vote in Vote.all().filter('ip', ip)]
# # logging.debug('posted=%s voted=%s' % (posted, voted))
# return set(posted + voted)
. Output only the next line. | already_voted = get_already_voted(request) |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
WORDS = """
ich mit einem wenig kostenlosen begabten verschraubenden
Fuchs Traum Menschenfresser Dampfschiffahrt Dampfschifffahrt
me with another little inexpensive ridiculous interlocking
Python beautiful illusionist Antidisestablishmentarianism
""".split()
print '%-8s %-8s %-8s %-8s %s' % tuple(
'english spanish french german word'.split())
for word in WORDS:
print '%-8d %-8d %-8d %-8d %s' % (
<|code_end|>
using the current file's imports:
import os
import sys
from languages.utils import word_score
from languages import english, spanish, french, german
and any relevant context from other files:
# Path: languages/utils.py
# def word_score(word, triple_scores):
# triples = list(word_triples(word))
# result = 0.0
# for triple in triples:
# result += triple_scores.get(triple, 0.0)
# return result / len(triples)
#
# Path: languages/english.py
# TRIPLE_SCORES = {}
#
# Path: languages/spanish.py
# TRIPLE_SCORES = {}
#
# Path: languages/french.py
# TRIPLE_SCORES = {}
#
# Path: languages/german.py
# TRIPLE_SCORES = {}
. Output only the next line. | word_score(word, english.TRIPLE_SCORES), |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
WORDS = """
ich mit einem wenig kostenlosen begabten verschraubenden
Fuchs Traum Menschenfresser Dampfschiffahrt Dampfschifffahrt
me with another little inexpensive ridiculous interlocking
Python beautiful illusionist Antidisestablishmentarianism
""".split()
print '%-8s %-8s %-8s %-8s %s' % tuple(
'english spanish french german word'.split())
for word in WORDS:
print '%-8d %-8d %-8d %-8d %s' % (
<|code_end|>
using the current file's imports:
import os
import sys
from languages.utils import word_score
from languages import english, spanish, french, german
and any relevant context from other files:
# Path: languages/utils.py
# def word_score(word, triple_scores):
# triples = list(word_triples(word))
# result = 0.0
# for triple in triples:
# result += triple_scores.get(triple, 0.0)
# return result / len(triples)
#
# Path: languages/english.py
# TRIPLE_SCORES = {}
#
# Path: languages/spanish.py
# TRIPLE_SCORES = {}
#
# Path: languages/french.py
# TRIPLE_SCORES = {}
#
# Path: languages/german.py
# TRIPLE_SCORES = {}
. Output only the next line. | word_score(word, english.TRIPLE_SCORES), |
Given snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
WORDS = """
ich mit einem wenig kostenlosen begabten verschraubenden
Fuchs Traum Menschenfresser Dampfschiffahrt Dampfschifffahrt
me with another little inexpensive ridiculous interlocking
Python beautiful illusionist Antidisestablishmentarianism
""".split()
print '%-8s %-8s %-8s %-8s %s' % tuple(
'english spanish french german word'.split())
for word in WORDS:
print '%-8d %-8d %-8d %-8d %s' % (
word_score(word, english.TRIPLE_SCORES),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import sys
from languages.utils import word_score
from languages import english, spanish, french, german
and context:
# Path: languages/utils.py
# def word_score(word, triple_scores):
# triples = list(word_triples(word))
# result = 0.0
# for triple in triples:
# result += triple_scores.get(triple, 0.0)
# return result / len(triples)
#
# Path: languages/english.py
# TRIPLE_SCORES = {}
#
# Path: languages/spanish.py
# TRIPLE_SCORES = {}
#
# Path: languages/french.py
# TRIPLE_SCORES = {}
#
# Path: languages/german.py
# TRIPLE_SCORES = {}
which might include code, classes, or functions. Output only the next line. | word_score(word, spanish.TRIPLE_SCORES), |
Next line prediction: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
WORDS = """
ich mit einem wenig kostenlosen begabten verschraubenden
Fuchs Traum Menschenfresser Dampfschiffahrt Dampfschifffahrt
me with another little inexpensive ridiculous interlocking
Python beautiful illusionist Antidisestablishmentarianism
""".split()
print '%-8s %-8s %-8s %-8s %s' % tuple(
'english spanish french german word'.split())
for word in WORDS:
print '%-8d %-8d %-8d %-8d %s' % (
word_score(word, english.TRIPLE_SCORES),
word_score(word, spanish.TRIPLE_SCORES),
<|code_end|>
. Use current file imports:
(import os
import sys
from languages.utils import word_score
from languages import english, spanish, french, german)
and context including class names, function names, or small code snippets from other files:
# Path: languages/utils.py
# def word_score(word, triple_scores):
# triples = list(word_triples(word))
# result = 0.0
# for triple in triples:
# result += triple_scores.get(triple, 0.0)
# return result / len(triples)
#
# Path: languages/english.py
# TRIPLE_SCORES = {}
#
# Path: languages/spanish.py
# TRIPLE_SCORES = {}
#
# Path: languages/french.py
# TRIPLE_SCORES = {}
#
# Path: languages/german.py
# TRIPLE_SCORES = {}
. Output only the next line. | word_score(word, french.TRIPLE_SCORES), |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
WORDS = """
ich mit einem wenig kostenlosen begabten verschraubenden
Fuchs Traum Menschenfresser Dampfschiffahrt Dampfschifffahrt
me with another little inexpensive ridiculous interlocking
Python beautiful illusionist Antidisestablishmentarianism
""".split()
print '%-8s %-8s %-8s %-8s %s' % tuple(
'english spanish french german word'.split())
for word in WORDS:
print '%-8d %-8d %-8d %-8d %s' % (
word_score(word, english.TRIPLE_SCORES),
word_score(word, spanish.TRIPLE_SCORES),
word_score(word, french.TRIPLE_SCORES),
<|code_end|>
with the help of current file imports:
import os
import sys
from languages.utils import word_score
from languages import english, spanish, french, german
and context from other files:
# Path: languages/utils.py
# def word_score(word, triple_scores):
# triples = list(word_triples(word))
# result = 0.0
# for triple in triples:
# result += triple_scores.get(triple, 0.0)
# return result / len(triples)
#
# Path: languages/english.py
# TRIPLE_SCORES = {}
#
# Path: languages/spanish.py
# TRIPLE_SCORES = {}
#
# Path: languages/french.py
# TRIPLE_SCORES = {}
#
# Path: languages/german.py
# TRIPLE_SCORES = {}
, which may contain function names, class names, or code. Output only the next line. | word_score(word, german.TRIPLE_SCORES), |
Here is a snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
setup_env()
def auth_func():
return open('.passwd').read().split(':')
def print_counts(name, counts):
print name, '= {'
keys = counts.keys()
keys.sort()
for key in keys:
if counts[key]:
print "'%s': %d," % (key, counts[key])
print '}'
def main():
one = {}
two = {}
total = 0
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
from common.appenginepatch.aecmd import setup_env
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.datastore_errors import Timeout
from domains.models import DOMAIN_CHARS
from prefixes.models import Prefix
from optparse import OptionParser
and context from other files:
# Path: domains/models.py
# DOMAIN_CHARS = 'abcdefghijklmnopqrstuvwxyz-0123456789'
#
# Path: prefixes/models.py
# class Prefix(db.Expando):
# """
# Count all domains in the datastore that start with the same
# prefix. The key name is the prefix, with a leading dot and
# optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Prefix.name_cut('123456789')
# '123456789'
# >>> Prefix.name_cut('123456789', 3)
# '123'
# """
# if length is None:
# return name
# else:
# return name[:length]
, which may include functions, classes, or code. Output only the next line. | for c1 in DOMAIN_CHARS: |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
setup_env()
def auth_func():
return open('.passwd').read().split(':')
def print_counts(name, counts):
print name, '= {'
keys = counts.keys()
keys.sort()
for key in keys:
if counts[key]:
print "'%s': %d," % (key, counts[key])
print '}'
def main():
one = {}
two = {}
total = 0
for c1 in DOMAIN_CHARS:
<|code_end|>
with the help of current file imports:
import os
import sys
from common.appenginepatch.aecmd import setup_env
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.datastore_errors import Timeout
from domains.models import DOMAIN_CHARS
from prefixes.models import Prefix
from optparse import OptionParser
and context from other files:
# Path: domains/models.py
# DOMAIN_CHARS = 'abcdefghijklmnopqrstuvwxyz-0123456789'
#
# Path: prefixes/models.py
# class Prefix(db.Expando):
# """
# Count all domains in the datastore that start with the same
# prefix. The key name is the prefix, with a leading dot and
# optional property 'resume' if incomplete.
# """
# length = db.IntegerProperty(required=True)
# count = db.IntegerProperty(required=True, default=0)
# com = db.IntegerProperty()
# percentage = db.FloatProperty()
# timestamp = db.DateTimeProperty()
#
# @classmethod
# def name_cut(cls, name, length=None):
# """
# >>> Prefix.name_cut('123456789')
# '123456789'
# >>> Prefix.name_cut('123456789', 3)
# '123'
# """
# if length is None:
# return name
# else:
# return name[:length]
, which may contain function names, class names, or code. Output only the next line. | query = Prefix.all().filter('length', 2) |
Based on the snippet: <|code_start|>#!/usr/bin/env python
# Setup project environment in the parent directory.
sys.path[0] = os.path.dirname(sys.path[0])
ACCEPTABLE_WORD_REGEX = re.compile('^[a-z0-9-]+$')
def count_triples(words, counters):
rejfile = open('rejected.txt', 'w')
for word in words:
word = word.lower().strip()
word = word.lstrip('*').rstrip('#')
word = word.replace('\\', '').replace('/', '')
word = word.replace('~', '').replace('$', '')
word = word.replace('^', '').replace('`', '')
word = word.replace('"', '').replace("'", '')
if not ACCEPTABLE_WORD_REGEX.match(word):
print >> rejfile, word
continue
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
import re
from languages.utils import word_triples
and context (classes, functions, sometimes code) from other files:
# Path: languages/utils.py
# def word_triples(word):
# """
# >>> list(word_triples('weight'))
# ['^wei', 'weight', 'eight$']
# >>> list(word_triples('eightyfive'))
# ['^eight', 'eighty', 'ghtyf', 'yfi', 'fiv', 'ive', 've$']
# """
# groups = ['^'] + list(word_groups(word)) + ['$']
# for start in range(len(groups) - 2):
# yield ''.join(groups[start:start + 3])
. Output only the next line. | for triple in word_triples(word): |
Here is a snippet: <|code_start|> .. code-block:: json
{
"jsonrpc": "2.0",
"method": "runCmds",
"params": {
"version": 1,
"cmds": [
<commands>
],
"format": [json, text],
}
"id": <reqid>
}
Args:
commands (list): A list of commands to include in the eAPI
request object
encoding (string): The encoding method passed as the `format`
parameter in the eAPI request
reqid (string): A custom value to assign to the request ID
field. This value is automatically generated if not passed
**kwargs: Additional keyword arguments for expanded eAPI
functionality. Only supported eAPI params are used in building
the request
Returns:
A JSON encoding request structure that can be send over eAPI
"""
<|code_end|>
. Write the next line using the current file imports:
import sys
import json
import socket
import base64
import logging
import ssl
import re
from http.client import HTTPConnection, HTTPSConnection
from httplib import HTTPConnection, HTTPSConnection
from pyeapi.utils import make_iterable
and context from other files:
# Path: pyeapi/utils.py
# def make_iterable(value):
# """Converts the supplied value to a list object
#
# This function will inspect the supplied value and return an
# iterable in the form of a list.
#
# Args:
# value (object): An valid Python object
#
# Returns:
# An iterable object of type list
# """
# if sys.version_info <= (3, 0):
# # Convert unicode values to strings for Python 2
# if isinstance(value, unicode):
# value = str(value)
# if isinstance(value, str) or isinstance(value, dict):
# value = [value]
#
# if sys.version_info <= (3, 3):
# if not isinstance(value, collections.Iterable):
# raise TypeError('value must be an iterable object')
# else:
# if not isinstance(value, collections.abc.Iterable):
# raise TypeError('value must be an iterable object')
#
# return value
, which may include functions, classes, or code. Output only the next line. | commands = make_iterable(commands) |
Next line prediction: <|code_start|> super(Acls, self).__init__(node, *args, **kwargs)
self._instances = dict()
def get(self, name):
return self.get_instance(name)[name]
def getall(self):
"""Returns all ACLs in a dict object.
Returns:
A Python dictionary object containing all ACL
configuration indexed by ACL name::
{
"<ACL1 name>": {...},
"<ACL2 name>": {...}
}
"""
acl_re = re.compile(r'^ip access-list (?:(standard) )?(.+)$', re.M)
response = {'standard': {}, 'extended': {}}
for acl_type, name in acl_re.findall(self.config):
acl = self.get(name)
if acl_type and acl_type == 'standard':
response['standard'][name] = acl
else:
response['extended'][name] = acl
return response
def __getattr__(self, name):
<|code_end|>
. Use current file imports:
(import re
import netaddr
from pyeapi.api import EntityCollection
from pyeapi.utils import ProxyCall)
and context including class names, function names, or small code snippets from other files:
# Path: pyeapi/utils.py
# class ProxyCall(object):
#
# def __init__(self, proxy, method):
# self.proxy = proxy
# self.method = method
#
# def __call__(self, *args, **kwargs):
# return self.proxy(self.method, *args, **kwargs)
. Output only the next line. | return ProxyCall(self.marshall, name) |
Continue the code snippet: <|code_start|> command = 'no vrf instance %s' % vrf_name
else:
command = 'no vrf definition %s' % vrf_name
return self.configure(command)
def default(self, vrf_name):
""" Defaults the VRF configuration for given name
Args:
vrf_name (str): The VRF name to default
Returns:
True if the operation was successful otherwise False
"""
if self.version_number >= '4.23':
command = 'default vrf instance %s' % vrf_name
else:
command = 'default vrf definition %s' % vrf_name
return self.configure(command)
def configure_vrf(self, vrf_name, commands):
""" Configures the specified VRF using commands
Args:
vrf_name (str): The VRF name to configure
commands: The list of commands to configure
Returns:
True if the commands completed successfully
"""
<|code_end|>
. Use current file imports:
import re
from pyeapi.api import EntityCollection
from pyeapi.utils import make_iterable
and context (classes, functions, or code) from other files:
# Path: pyeapi/utils.py
# def make_iterable(value):
# """Converts the supplied value to a list object
#
# This function will inspect the supplied value and return an
# iterable in the form of a list.
#
# Args:
# value (object): An valid Python object
#
# Returns:
# An iterable object of type list
# """
# if sys.version_info <= (3, 0):
# # Convert unicode values to strings for Python 2
# if isinstance(value, unicode):
# value = str(value)
# if isinstance(value, str) or isinstance(value, dict):
# value = [value]
#
# if sys.version_info <= (3, 3):
# if not isinstance(value, collections.Iterable):
# raise TypeError('value must be an iterable object')
# else:
# if not isinstance(value, collections.abc.Iterable):
# raise TypeError('value must be an iterable object')
#
# return value
. Output only the next line. | commands = make_iterable(commands) |
Given snippet: <|code_start|> True if the create operation succeeds otherwise False.
"""
string = 'switchport trunk allowed vlan'
command = self.command_builder(string, value=value, default=default,
disable=disable)
return self.configure_interface(name, command)
def set_trunk_groups(self, intf, value=None, default=False, disable=False):
"""Configures the switchport trunk group value
Args:
intf (str): The interface identifier to configure.
value (str): The set of values to configure the trunk group
default (bool): Configures the trunk group default value
disable (bool): Negates all trunk group settings
Returns:
True if the config operation succeeds otherwise False
"""
if default:
cmd = 'default switchport trunk group'
return self.configure_interface(intf, cmd)
if disable:
cmd = 'no switchport trunk group'
return self.configure_interface(intf, cmd)
current_value = self.get(intf)['trunk_groups']
failure = False
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from pyeapi.api import EntityCollection
from pyeapi.utils import make_iterable
and context:
# Path: pyeapi/utils.py
# def make_iterable(value):
# """Converts the supplied value to a list object
#
# This function will inspect the supplied value and return an
# iterable in the form of a list.
#
# Args:
# value (object): An valid Python object
#
# Returns:
# An iterable object of type list
# """
# if sys.version_info <= (3, 0):
# # Convert unicode values to strings for Python 2
# if isinstance(value, unicode):
# value = str(value)
# if isinstance(value, str) or isinstance(value, dict):
# value = [value]
#
# if sys.version_info <= (3, 3):
# if not isinstance(value, collections.Iterable):
# raise TypeError('value must be an iterable object')
# else:
# if not isinstance(value, collections.abc.Iterable):
# raise TypeError('value must be an iterable object')
#
# return value
which might include code, classes, or functions. Output only the next line. | value = make_iterable(value) |
Next line prediction: <|code_start|> def __init__(self, node, *args, **kwargs):
super(Interfaces, self).__init__(node, *args, **kwargs)
self._instances = dict()
def get(self, name):
return self.get_instance(name)[name]
def getall(self):
"""Returns all interfaces in a dict object.
Returns:
A Python dictionary object containing all interface
configuration indexed by interface name::
{
"Ethernet1": {...},
"Ethernet2": {...}
}
"""
interfaces_re = re.compile(r'(?<=^interface\s)(.+)$', re.M)
response = dict()
for name in interfaces_re.findall(self.config):
interface = self.get(name)
if interface:
response[name] = interface
return response
def __getattr__(self, name):
<|code_end|>
. Use current file imports:
(import re
from pyeapi.api import EntityCollection
from pyeapi.utils import ProxyCall)
and context including class names, function names, or small code snippets from other files:
# Path: pyeapi/utils.py
# class ProxyCall(object):
#
# def __init__(self, proxy, method):
# self.proxy = proxy
# self.method = method
#
# def __call__(self, *args, **kwargs):
# return self.proxy(self.method, *args, **kwargs)
. Output only the next line. | return ProxyCall(self.marshall, name) |
Here is a snippet: <|code_start|> """
command = 'no vlan %s' % vid
return self.configure(command) if isvlan(vid) else False
def default(self, vid):
""" Defaults the VLAN configuration
.. code-block:: none
default vlan <vlanid>
Args:
vid (str): The VLAN ID to default
Returns:
True if the operation was successful otherwise False
"""
command = 'default vlan %s' % vid
return self.configure(command) if isvlan(vid) else False
def configure_vlan(self, vid, commands):
""" Configures the specified Vlan using commands
Args:
vid (str): The VLAN ID to configure
commands: The list of commands to configure
Returns:
True if the commands completed successfully
"""
<|code_end|>
. Write the next line using the current file imports:
import re
from pyeapi.api import EntityCollection
from pyeapi.utils import make_iterable
and context from other files:
# Path: pyeapi/utils.py
# def make_iterable(value):
# """Converts the supplied value to a list object
#
# This function will inspect the supplied value and return an
# iterable in the form of a list.
#
# Args:
# value (object): An valid Python object
#
# Returns:
# An iterable object of type list
# """
# if sys.version_info <= (3, 0):
# # Convert unicode values to strings for Python 2
# if isinstance(value, unicode):
# value = str(value)
# if isinstance(value, str) or isinstance(value, dict):
# value = [value]
#
# if sys.version_info <= (3, 3):
# if not isinstance(value, collections.Iterable):
# raise TypeError('value must be an iterable object')
# else:
# if not isinstance(value, collections.abc.Iterable):
# raise TypeError('value must be an iterable object')
#
# return value
, which may include functions, classes, or code. Output only the next line. | commands = make_iterable(commands) |
Given the following code snippet before the placeholder: <|code_start|> """
try:
parent = r'^%s$' % parent
return self.node.section(parent, config=config)
except TypeError:
return None
def configure(self, commands):
"""Sends the commands list to the node in config mode
This method performs configuration the node using the array of
commands specified. This method wraps the configuration commands
in a try/except block and stores any exceptions in the error
property.
Note:
If the return from this method is False, use the error property
to investigate the exception
Args:
commands (list): A list of commands to be sent to the node in
config mode
Returns:
True if the commands are executed without exception otherwise
False is returned
"""
try:
self.node.config(commands)
return True
<|code_end|>
, predict the next line using imports from the current file:
from collections.abc import Callable, Mapping
from pyeapi.eapilib import CommandError, ConnectionError
from pyeapi.utils import make_iterable
and context including class names, function names, and sometimes code from other files:
# Path: pyeapi/eapilib.py
# class CommandError(EapiError):
# """Base exception raised for command errors
#
# The CommandError instance provides a custom exception that can be used
# if the eAPI command(s) fail. It provides some additional information
# that can be used to understand what caused the exception.
#
# Args:
# error_code (int): The error code returned from the eAPI call.
# error_text (string): The error text message that coincides with the
# error_code
# commands (array): The list of commands that were sent to the node
# that generated the error
# message (string): The exception error message which is a concatenation
# of the error_code and error_text
# """
# def __init__(self, code, message, **kwargs):
# cmd_err = kwargs.get('command_error')
# if int(code) in [1000, 1002, 1004]:
# msg_fmt = 'Error [{}]: {} [{}]'.format(code, message, cmd_err)
# else:
# msg_fmt = 'Error [{}]: {}'.format(code, message)
#
# super(CommandError, self).__init__(msg_fmt)
# self.error_code = code
# self.error_text = message
# self.command_error = cmd_err
# self.commands = kwargs.get('commands')
# self.output = kwargs.get('output')
# self.message = msg_fmt
#
# @property
# def trace(self):
# return self.get_trace()
#
# def get_trace(self):
# trace = list()
# index = None
#
# for index, out in enumerate(self.output):
# _entry = {'command': self.commands[index], 'output': out}
# trace.append(_entry)
#
# if index:
# index += 1
# for cmd in self.commands[index:]:
# _entry = {'command': cmd, 'output': None}
# trace.append(_entry)
#
# return trace
#
# class ConnectionError(EapiError):
# """Base exception raised for connection errors
#
# Connection errors are raised when a connection object is unable to
# connect to the node. Typically these errors can result from using
# the wrong transport type or not providing valid credentials.
#
# Args:
# commands (array): The list of commands there were sent to the
# node that when the exception was raised
# connection_type (string): The string identifier for the connection
# object that generate the error
# message (string): The exception error message
# response (string): The message generate from the response packet
#
# """
# def __init__(self, connection_type, message, commands=None):
# self.message = message
# self.connection_type = connection_type
# self.commands = commands
# super(ConnectionError, self).__init__(message)
#
# Path: pyeapi/utils.py
# def make_iterable(value):
# """Converts the supplied value to a list object
#
# This function will inspect the supplied value and return an
# iterable in the form of a list.
#
# Args:
# value (object): An valid Python object
#
# Returns:
# An iterable object of type list
# """
# if sys.version_info <= (3, 0):
# # Convert unicode values to strings for Python 2
# if isinstance(value, unicode):
# value = str(value)
# if isinstance(value, str) or isinstance(value, dict):
# value = [value]
#
# if sys.version_info <= (3, 3):
# if not isinstance(value, collections.Iterable):
# raise TypeError('value must be an iterable object')
# else:
# if not isinstance(value, collections.abc.Iterable):
# raise TypeError('value must be an iterable object')
#
# return value
. Output only the next line. | except (CommandError): |
Next line prediction: <|code_start|> disable (bool): Specifies the command should use the no
keyword argument. Disable preempts value.
Returns:
A command string that can be used to configure the node
"""
if default:
return 'default %s' % string
elif disable:
return 'no %s' % string
elif value is True:
return string
elif value:
return '%s %s' % (string, value)
else:
return 'no %s' % string
# -- above line to be deprecated and replaced with the error below
# raise ValueError("abstract.command_builder: No value "
# "received '%s'" % value)
def configure_interface(self, name, commands):
"""Configures the specified interface with the commands
Args:
name (str): The interface name to configure
commands: The commands to configure in the interface
Returns:
True if the commands completed successfully
"""
<|code_end|>
. Use current file imports:
(from collections.abc import Callable, Mapping
from pyeapi.eapilib import CommandError, ConnectionError
from pyeapi.utils import make_iterable)
and context including class names, function names, or small code snippets from other files:
# Path: pyeapi/eapilib.py
# class CommandError(EapiError):
# """Base exception raised for command errors
#
# The CommandError instance provides a custom exception that can be used
# if the eAPI command(s) fail. It provides some additional information
# that can be used to understand what caused the exception.
#
# Args:
# error_code (int): The error code returned from the eAPI call.
# error_text (string): The error text message that coincides with the
# error_code
# commands (array): The list of commands that were sent to the node
# that generated the error
# message (string): The exception error message which is a concatenation
# of the error_code and error_text
# """
# def __init__(self, code, message, **kwargs):
# cmd_err = kwargs.get('command_error')
# if int(code) in [1000, 1002, 1004]:
# msg_fmt = 'Error [{}]: {} [{}]'.format(code, message, cmd_err)
# else:
# msg_fmt = 'Error [{}]: {}'.format(code, message)
#
# super(CommandError, self).__init__(msg_fmt)
# self.error_code = code
# self.error_text = message
# self.command_error = cmd_err
# self.commands = kwargs.get('commands')
# self.output = kwargs.get('output')
# self.message = msg_fmt
#
# @property
# def trace(self):
# return self.get_trace()
#
# def get_trace(self):
# trace = list()
# index = None
#
# for index, out in enumerate(self.output):
# _entry = {'command': self.commands[index], 'output': out}
# trace.append(_entry)
#
# if index:
# index += 1
# for cmd in self.commands[index:]:
# _entry = {'command': cmd, 'output': None}
# trace.append(_entry)
#
# return trace
#
# class ConnectionError(EapiError):
# """Base exception raised for connection errors
#
# Connection errors are raised when a connection object is unable to
# connect to the node. Typically these errors can result from using
# the wrong transport type or not providing valid credentials.
#
# Args:
# commands (array): The list of commands there were sent to the
# node that when the exception was raised
# connection_type (string): The string identifier for the connection
# object that generate the error
# message (string): The exception error message
# response (string): The message generate from the response packet
#
# """
# def __init__(self, connection_type, message, commands=None):
# self.message = message
# self.connection_type = connection_type
# self.commands = commands
# super(ConnectionError, self).__init__(message)
#
# Path: pyeapi/utils.py
# def make_iterable(value):
# """Converts the supplied value to a list object
#
# This function will inspect the supplied value and return an
# iterable in the form of a list.
#
# Args:
# value (object): An valid Python object
#
# Returns:
# An iterable object of type list
# """
# if sys.version_info <= (3, 0):
# # Convert unicode values to strings for Python 2
# if isinstance(value, unicode):
# value = str(value)
# if isinstance(value, str) or isinstance(value, dict):
# value = [value]
#
# if sys.version_info <= (3, 3):
# if not isinstance(value, collections.Iterable):
# raise TypeError('value must be an iterable object')
# else:
# if not isinstance(value, collections.abc.Iterable):
# raise TypeError('value must be an iterable object')
#
# return value
. Output only the next line. | commands = make_iterable(commands) |
Predict the next line for this snippet: <|code_start|> def create(self, ospf_process_id, vrf=None):
"""Creates a OSPF process in the specified VRF or the default VRF.
Args:
ospf_process_id (str): The OSPF process Id value
vrf (str): The VRF to apply this OSPF process to
Returns:
bool: True if the command completed successfully
Exception:
ValueError: If the ospf_process_id passed in less
than 0 or greater than 65536
"""
value = int(ospf_process_id)
if not 0 < value < 65536:
raise ValueError('ospf as must be between 1 and 65535')
command = 'router ospf {}'.format(ospf_process_id)
if vrf:
command += ' vrf %s' % vrf
return self.configure(command)
def configure_ospf(self, cmd):
"""Allows for a list of OSPF subcommands to be configured"
Args:
cmd: (list or str): Subcommand to be entered
Returns:
bool: True if all the commands completed successfully
"""
config = self.get()
cmds = ['router ospf {}'.format(config['ospf_process_id'])]
<|code_end|>
with the help of current file imports:
import re
from pyeapi.api import Entity
from pyeapi.utils import make_iterable
and context from other files:
# Path: pyeapi/utils.py
# def make_iterable(value):
# """Converts the supplied value to a list object
#
# This function will inspect the supplied value and return an
# iterable in the form of a list.
#
# Args:
# value (object): An valid Python object
#
# Returns:
# An iterable object of type list
# """
# if sys.version_info <= (3, 0):
# # Convert unicode values to strings for Python 2
# if isinstance(value, unicode):
# value = str(value)
# if isinstance(value, str) or isinstance(value, dict):
# value = [value]
#
# if sys.version_info <= (3, 3):
# if not isinstance(value, collections.Iterable):
# raise TypeError('value must be an iterable object')
# else:
# if not isinstance(value, collections.abc.Iterable):
# raise TypeError('value must be an iterable object')
#
# return value
, which may contain function names, class names, or code. Output only the next line. | cmds.extend(make_iterable(cmd)) |
Given the code snippet: <|code_start|> match = re.search(r'^router bgp (\d+)', config)
return dict(bgp_as=int(match.group(1)))
def _parse_router_id(self, config):
match = re.search(r'router-id ([^\s]+)', config)
value = match.group(1) if match else None
return dict(router_id=value)
def _parse_max_paths(self, config):
match = re.search(r'maximum-paths\s+(\d+)\s+ecmp\s+(\d+)', config)
paths = int(match.group(1)) if match else None
ecmp_paths = int(match.group(2)) if match else None
return dict(maximum_paths=paths, maximum_ecmp_paths=ecmp_paths)
def _parse_shutdown(self, config):
value = 'no shutdown' in config
return dict(shutdown=not value)
def _parse_networks(self, config):
networks = list()
regexp = r'network (.+)/(\d+)(?: route-map (\w+))*'
matches = re.findall(regexp, config)
for (prefix, mask, rmap) in matches:
rmap = None if rmap == '' else rmap
networks.append(dict(prefix=prefix, masklen=mask, route_map=rmap))
return dict(networks=networks)
def configure_bgp(self, cmd):
config = self.get()
cmds = ['router bgp {}'.format(config['bgp_as'])]
<|code_end|>
, generate the next line using the imports in this file:
import re
import netaddr
from collections import namedtuple
from pyeapi.api import Entity, EntityCollection
from pyeapi.utils import make_iterable
and context (functions, classes, or occasionally code) from other files:
# Path: pyeapi/utils.py
# def make_iterable(value):
# """Converts the supplied value to a list object
#
# This function will inspect the supplied value and return an
# iterable in the form of a list.
#
# Args:
# value (object): An valid Python object
#
# Returns:
# An iterable object of type list
# """
# if sys.version_info <= (3, 0):
# # Convert unicode values to strings for Python 2
# if isinstance(value, unicode):
# value = str(value)
# if isinstance(value, str) or isinstance(value, dict):
# value = [value]
#
# if sys.version_info <= (3, 3):
# if not isinstance(value, collections.Iterable):
# raise TypeError('value must be an iterable object')
# else:
# if not isinstance(value, collections.abc.Iterable):
# raise TypeError('value must be an iterable object')
#
# return value
. Output only the next line. | cmds.extend(make_iterable(cmd)) |
Based on the snippet: <|code_start|>#!/usr/bin/env python
#
# Patchwork - automated patch tracking system
# Copyright (C) 2008 Jeremy Kerr <jk@ozlabs.org>
# Copyright (C) 2015 Intel Corporation
#
# This file is part of the Patchwork package.
#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class Command(BaseCommand):
help = 'Update the hashes on existing patches'
args = '[<patch_id>...]'
def handle(self, *args, **options):
<|code_end|>
, predict the immediate next line with the help of imports:
from django.core.management.base import BaseCommand
from patchwork.models import Patch
and context (classes, functions, sometimes code) from other files:
# Path: patchwork/models.py
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
. Output only the next line. | query = Patch.objects |
Continue the code snippet: <|code_start|>#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
def settings(request):
return {'settings': settings}
def site(request):
return {'site': Site.objects.get_current()}
def bundle(request):
user = request.user
if not user.is_authenticated():
return {}
<|code_end|>
. Use current file imports:
from django.contrib.sites.models import Site
from patchwork.models import Bundle
and context (classes, functions, or code) from other files:
# Path: patchwork/models.py
# class Bundle(models.Model):
# owner = models.ForeignKey(User, on_delete=models.CASCADE)
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# name = models.CharField(max_length=50, null=False, blank=False)
# patches = models.ManyToManyField(Patch, through='BundlePatch')
# public = models.BooleanField(default=False)
#
# def n_patches(self):
# return self.patches.all().count()
#
# def ordered_patches(self):
# return self.patches.order_by('bundlepatch__order')
#
# def append_patch(self, patch):
# # todo: use the aggregate queries in django 1.1
# orders = BundlePatch.objects.filter(bundle=self).order_by('-order') \
# .values('order')
#
# if len(orders) > 0:
# max_order = orders[0]['order']
# else:
# max_order = 0
#
# # see if the patch is already in this bundle
# if BundlePatch.objects.filter(bundle=self, patch=patch).count():
# raise Exception("patch is already in bundle")
#
# bp = BundlePatch.objects.create(bundle=self, patch=patch,
# order=max_order + 1)
# bp.save()
#
# def public_url(self):
# if not self.public:
# return None
# site = Site.objects.get_current()
# return 'http://%s%s' % (site.domain,
# reverse('bundle',
# kwargs={
# 'username': self.owner.username,
# 'bundlename': self.name
# }))
#
# @models.permalink
# def get_absolute_url(self):
# return ('bundle', (), {
# 'username': self.owner.username,
# 'bundlename': self.name,
# })
#
# class Meta:
# unique_together = [('owner', 'name')]
. Output only the next line. | return {'bundles': Bundle.objects.filter(owner=user)} |
Here is a snippet: <|code_start|># but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
def _confirmation_url(conf):
return reverse('confirm', kwargs={'key': conf.key})
class TestUser(object):
username = 'testuser'
email = 'test@example.com'
secondary_email = 'test2@example.com'
password = None
def __init__(self):
self.password = User.objects.make_random_password()
self.user = User.objects.create_user(self.username,
self.email, self.password)
class InvalidConfirmationTest(TestCase):
def setUp(self):
<|code_end|>
. Write the next line using the current file imports:
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from patchwork.models import EmailConfirmation, Person
and context from other files:
# Path: patchwork/models.py
# class EmailConfirmation(models.Model):
# validity = datetime.timedelta(days=settings.CONFIRMATION_VALIDITY_DAYS)
# type = models.CharField(max_length=20, choices=[
# ('userperson', 'User-Person association'),
# ('registration', 'Registration'),
# ('optout', 'Email opt-out'),
# ])
# email = models.CharField(max_length=200)
# user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
# key = HashField()
# date = models.DateTimeField(default=datetime.datetime.now)
# active = models.BooleanField(default=True)
#
# def deactivate(self):
# self.active = False
# self.save()
#
# def is_valid(self):
# return self.date + self.validity > datetime.datetime.now()
#
# def save(self):
# max = 1 << 32
# if self.key == '':
# str = '%s%s%d' % (self.user, self.email, random.randint(0, max))
# self.key = self._meta.get_field('key').construct(str).hexdigest()
# super(EmailConfirmation, self).save()
#
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
, which may include functions, classes, or code. Output only the next line. | EmailConfirmation.objects.all().delete() |
Next line prediction: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
def _confirmation_url(conf):
return reverse('confirm', kwargs={'key': conf.key})
class TestUser(object):
username = 'testuser'
email = 'test@example.com'
secondary_email = 'test2@example.com'
password = None
def __init__(self):
self.password = User.objects.make_random_password()
self.user = User.objects.create_user(self.username,
self.email, self.password)
class InvalidConfirmationTest(TestCase):
def setUp(self):
EmailConfirmation.objects.all().delete()
<|code_end|>
. Use current file imports:
(from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from patchwork.models import EmailConfirmation, Person)
and context including class names, function names, or small code snippets from other files:
# Path: patchwork/models.py
# class EmailConfirmation(models.Model):
# validity = datetime.timedelta(days=settings.CONFIRMATION_VALIDITY_DAYS)
# type = models.CharField(max_length=20, choices=[
# ('userperson', 'User-Person association'),
# ('registration', 'Registration'),
# ('optout', 'Email opt-out'),
# ])
# email = models.CharField(max_length=200)
# user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
# key = HashField()
# date = models.DateTimeField(default=datetime.datetime.now)
# active = models.BooleanField(default=True)
#
# def deactivate(self):
# self.active = False
# self.save()
#
# def is_valid(self):
# return self.date + self.validity > datetime.datetime.now()
#
# def save(self):
# max = 1 << 32
# if self.key == '':
# str = '%s%s%d' % (self.user, self.email, random.randint(0, max))
# self.key = self._meta.get_field('key').construct(str).hexdigest()
# super(EmailConfirmation, self).save()
#
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
. Output only the next line. | Person.objects.all().delete() |
Based on the snippet: <|code_start|># Patchwork - automated patch tracking system
# Copyright (C) 2015 Intel Corporation
#
# This file is part of the Patchwork package.
#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class LoginTestCase(SeleniumTestCase):
def setUp(self):
super(LoginTestCase, self).setUp()
<|code_end|>
, predict the immediate next line with the help of imports:
from patchwork.tests.browser import SeleniumTestCase
from patchwork.tests.test_user import TestUser
and context (classes, functions, sometimes code) from other files:
# Path: patchwork/tests/browser.py
# class SeleniumTestCase(StaticLiveServerTestCase):
# _SCREENSHOT_DIR = os.path.dirname(__file__) + '/../../selenium_screenshots'
#
# def setUp(self):
# self.skip = os.getenv('PATCHWORK_SKIP_BROWSER_TESTS', None)
# if self.skip:
# self.skipTest("Disabled by environment variable")
#
# super(SeleniumTestCase, self).setUp()
#
# self.browser = os.getenv('SELENIUM_BROWSER', 'chrome')
# if self.browser == 'firefox':
# self.selenium = webdriver.Firefox()
# if self.browser == 'chrome':
# options = webdriver.ChromeOptions()
# options.add_argument("--no-sandbox")
# options.set_headless(True)
# self.selenium = webdriver.Chrome(
# service_args=["--verbose", "--log-path=selenium.log"],
# options=options
# )
#
# mkdir(self._SCREENSHOT_DIR)
# self._screenshot_number = 1
#
# def tearDown(self):
# self.selenium.quit()
# super(SeleniumTestCase, self).tearDown()
#
# def screenshot(self):
# name = "%s_%d.png" % (self._testMethodName, self._screenshot_number)
# path = os.path.join(self._SCREENSHOT_DIR, name)
# self.selenium.get_screenshot_as_file(path)
# self._screenshot_number += 1
#
# def get(self, relative_url):
# self.selenium.get('%s%s' % (self.live_server_url, relative_url))
# self.screenshot()
#
# def find(self, selector):
# return self.selenium.find_element_by_css_selector(selector)
#
# def focused_element(self):
# active_element = self.selenium.switch_to.active_element
# # XXX: WA for Firefox driver
# # selenium does not perform w3c conformance negotiation
# # resulting in focused element being wrapped in a dict
# if isinstance(active_element, dict):
# active_element = active_element['value']
# return active_element
#
# def wait_until_present(self, name):
# def is_present(driver):
# return driver.find_element_by_name(name)
# msg = "An element named '%s' should be on the page" % name
# element = Wait(self.selenium).until(is_present, msg)
# self.screenshot()
# return element
#
# def wait_until_visible(self, selector):
# def is_visible(driver):
# return self.find(selector).is_displayed()
# msg = "The element matching '%s' should be visible" % selector
# Wait(self.selenium).until(is_visible, msg)
# self.screenshot()
# return self.find(selector)
#
# def wait_until_focused(self, selector):
# def is_focused(driver):
# return self.find(selector) == self.focused_element()
# msg = "The element matching '%s' should be focused" % selector
# Wait(self.selenium).until(is_focused, msg)
# self.screenshot()
# return self.find(selector)
#
# def enter_text(self, name, value):
# field = self.wait_until_present(name)
# field.send_keys(value)
# return field
#
# def click(self, selector):
# element = self.wait_until_visible(selector)
# element.click()
# return element
#
# Path: patchwork/tests/test_user.py
# class TestUser(object):
#
# def __init__(self, username='testuser', email='test@example.com',
# secondary_email='test2@example.com'):
# self.username = username
# self.email = email
# self.secondary_email = secondary_email
# self.password = User.objects.make_random_password()
# self.user = User.objects.create_user(
# self.username, self.email, self.password)
#
# def basic_auth_header(self):
# userpass = ("%s:%s" % (self.username, self.password)).encode('utf-8')
# return 'Basic ' + base64.b64encode(userpass).decode('utf-8')
#
# def add_to_maintainers(self, project):
# profile = self.user.profile
# profile.maintainer_projects.add(project)
# profile.save()
. Output only the next line. | self.user = TestUser() |
Predict the next line after this snippet: <|code_start|># (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class MultipleUpdateTest(TestCase):
fixtures = ['default_states', 'default_events']
def setUp(self):
defaults.project.save()
self.user = create_maintainer(defaults.project)
self.client.login(username=self.user.username,
password=self.user.username)
self.properties_form_id = 'patchform-properties'
self.url = reverse('patch_list', args=[defaults.project.linkname])
self.base_data = {
'action': 'Update', 'project': str(defaults.project.id),
'form': 'patchlistform', 'archived': '*', 'delegate': '*',
'state': '*'}
self.patches = []
for name in ['patch one', 'patch two', 'patch three']:
<|code_end|>
using the current file's imports:
from django.core.urlresolvers import reverse
from django.test import TestCase
from patchwork.models import Patch, Person, State
from patchwork.tests.utils import defaults, create_maintainer
and any relevant context from other files:
# Path: patchwork/models.py
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
#
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
#
# class State(models.Model):
# name = models.CharField(max_length=100)
# ordering = models.IntegerField(unique=True)
# action_required = models.BooleanField(default=True)
#
# @classmethod
# def from_string(cls, name):
# return State.objects.get(name__iexact=name)
#
# def __str__(self):
# return self.name
#
# class Meta:
# ordering = ['ordering']
. Output only the next line. | patch = Patch(project=defaults.project, msgid=name, |
Predict the next line after this snippet: <|code_start|># Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class MultipleUpdateTest(TestCase):
fixtures = ['default_states', 'default_events']
def setUp(self):
defaults.project.save()
self.user = create_maintainer(defaults.project)
self.client.login(username=self.user.username,
password=self.user.username)
self.properties_form_id = 'patchform-properties'
self.url = reverse('patch_list', args=[defaults.project.linkname])
self.base_data = {
'action': 'Update', 'project': str(defaults.project.id),
'form': 'patchlistform', 'archived': '*', 'delegate': '*',
'state': '*'}
self.patches = []
for name in ['patch one', 'patch two', 'patch three']:
patch = Patch(project=defaults.project, msgid=name,
name=name, content='',
<|code_end|>
using the current file's imports:
from django.core.urlresolvers import reverse
from django.test import TestCase
from patchwork.models import Patch, Person, State
from patchwork.tests.utils import defaults, create_maintainer
and any relevant context from other files:
# Path: patchwork/models.py
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
#
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
#
# class State(models.Model):
# name = models.CharField(max_length=100)
# ordering = models.IntegerField(unique=True)
# action_required = models.BooleanField(default=True)
#
# @classmethod
# def from_string(cls, name):
# return State.objects.get(name__iexact=name)
#
# def __str__(self):
# return self.name
#
# class Meta:
# ordering = ['ordering']
. Output only the next line. | submitter=Person.objects.get(user=self.user)) |
Here is a snippet: <|code_start|> response = self.client.post(self.url, data)
self.assertContains(response, 'No patches to display',
status_code=200)
for patch in [Patch.objects.get(pk=p.pk) for p in self.patches]:
self.assertTrue(patch.archived)
def testUnArchivingPatches(self):
# Start with one patch archived and the remaining ones unarchived.
self.patches[0].archived = True
self.patches[0].save()
data = self.base_data.copy()
data.update({'archived': 'False'})
self._selectAllPatches(data)
response = self.client.post(self.url, data)
self.assertContains(response, self.properties_form_id,
status_code=200)
for patch in [Patch.objects.get(pk=p.pk) for p in self.patches]:
self.assertFalse(patch.archived)
def _testStateChange(self, state):
data = self.base_data.copy()
data.update({'state': str(state)})
self._selectAllPatches(data)
response = self.client.post(self.url, data)
self.assertContains(response, self.properties_form_id,
status_code=200)
return response
def testStateChangeValid(self):
states = [patch.state.pk for patch in self.patches]
<|code_end|>
. Write the next line using the current file imports:
from django.core.urlresolvers import reverse
from django.test import TestCase
from patchwork.models import Patch, Person, State
from patchwork.tests.utils import defaults, create_maintainer
and context from other files:
# Path: patchwork/models.py
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
#
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
#
# class State(models.Model):
# name = models.CharField(max_length=100)
# ordering = models.IntegerField(unique=True)
# action_required = models.BooleanField(default=True)
#
# @classmethod
# def from_string(cls, name):
# return State.objects.get(name__iexact=name)
#
# def __str__(self):
# return self.name
#
# class Meta:
# ordering = ['ordering']
, which may include functions, classes, or code. Output only the next line. | state = State.objects.exclude(pk__in=states)[0] |
Predict the next line after this snippet: <|code_start|>#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class PatchNotificationModelTest(TestCase):
fixtures = ['default_states', 'default_events']
"""Tests for the creation & update of the PatchChangeNotification model"""
def setUp(self):
self.project = defaults.project
self.project.send_notifications = True
self.project.save()
self.submitter = defaults.patch_author_person
self.submitter.save()
<|code_end|>
using the current file's imports:
import datetime
from django.conf import settings
from django.core import mail
from django.test import TestCase
from patchwork.models import Patch, State, PatchChangeNotification, EmailOptout
from patchwork.tests.utils import defaults
from patchwork.utils import send_notifications
and any relevant context from other files:
# Path: patchwork/models.py
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
#
# class State(models.Model):
# name = models.CharField(max_length=100)
# ordering = models.IntegerField(unique=True)
# action_required = models.BooleanField(default=True)
#
# @classmethod
# def from_string(cls, name):
# return State.objects.get(name__iexact=name)
#
# def __str__(self):
# return self.name
#
# class Meta:
# ordering = ['ordering']
#
# class PatchChangeNotification(models.Model):
# patch = models.OneToOneField(Patch, primary_key=True,
# on_delete=models.CASCADE)
# last_modified = models.DateTimeField(default=datetime.datetime.now)
# orig_state = models.ForeignKey(State, on_delete=models.CASCADE)
#
# class EmailOptout(models.Model):
# email = models.CharField(max_length=200, primary_key=True)
#
# @classmethod
# def is_optout(cls, email):
# email = email.lower().strip()
# return cls.objects.filter(email=email).count() > 0
#
# def __str__(self):
# return self.email
. Output only the next line. | self.patch = Patch(project=self.project, msgid='testpatch', |
Predict the next line after this snippet: <|code_start|> self.project = defaults.project
self.project.send_notifications = True
self.project.save()
self.submitter = defaults.patch_author_person
self.submitter.save()
self.patch = Patch(project=self.project, msgid='testpatch',
name='testpatch', content='',
submitter=self.submitter)
def tearDown(self):
self.patch.delete()
self.submitter.delete()
self.project.delete()
def testPatchCreation(self):
"""Ensure we don't get a notification on create"""
self.patch.save()
self.assertEqual(PatchChangeNotification.objects.count(), 0)
def testPatchUninterestingChange(self):
"""Ensure we don't get a notification for "uninteresting" changes"""
self.patch.save()
self.patch.archived = True
self.patch.save()
self.assertEqual(PatchChangeNotification.objects.count(), 0)
def testPatchChange(self):
"""Ensure we get a notification for interesting patch changes"""
self.patch.save()
oldstate = self.patch.state
<|code_end|>
using the current file's imports:
import datetime
from django.conf import settings
from django.core import mail
from django.test import TestCase
from patchwork.models import Patch, State, PatchChangeNotification, EmailOptout
from patchwork.tests.utils import defaults
from patchwork.utils import send_notifications
and any relevant context from other files:
# Path: patchwork/models.py
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
#
# class State(models.Model):
# name = models.CharField(max_length=100)
# ordering = models.IntegerField(unique=True)
# action_required = models.BooleanField(default=True)
#
# @classmethod
# def from_string(cls, name):
# return State.objects.get(name__iexact=name)
#
# def __str__(self):
# return self.name
#
# class Meta:
# ordering = ['ordering']
#
# class PatchChangeNotification(models.Model):
# patch = models.OneToOneField(Patch, primary_key=True,
# on_delete=models.CASCADE)
# last_modified = models.DateTimeField(default=datetime.datetime.now)
# orig_state = models.ForeignKey(State, on_delete=models.CASCADE)
#
# class EmailOptout(models.Model):
# email = models.CharField(max_length=200, primary_key=True)
#
# @classmethod
# def is_optout(cls, email):
# email = email.lower().strip()
# return cls.objects.filter(email=email).count() > 0
#
# def __str__(self):
# return self.email
. Output only the next line. | state = State.objects.exclude(pk=oldstate.pk)[0] |
Predict the next line for this snippet: <|code_start|># along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class PatchNotificationModelTest(TestCase):
fixtures = ['default_states', 'default_events']
"""Tests for the creation & update of the PatchChangeNotification model"""
def setUp(self):
self.project = defaults.project
self.project.send_notifications = True
self.project.save()
self.submitter = defaults.patch_author_person
self.submitter.save()
self.patch = Patch(project=self.project, msgid='testpatch',
name='testpatch', content='',
submitter=self.submitter)
def tearDown(self):
self.patch.delete()
self.submitter.delete()
self.project.delete()
def testPatchCreation(self):
"""Ensure we don't get a notification on create"""
self.patch.save()
<|code_end|>
with the help of current file imports:
import datetime
from django.conf import settings
from django.core import mail
from django.test import TestCase
from patchwork.models import Patch, State, PatchChangeNotification, EmailOptout
from patchwork.tests.utils import defaults
from patchwork.utils import send_notifications
and context from other files:
# Path: patchwork/models.py
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
#
# class State(models.Model):
# name = models.CharField(max_length=100)
# ordering = models.IntegerField(unique=True)
# action_required = models.BooleanField(default=True)
#
# @classmethod
# def from_string(cls, name):
# return State.objects.get(name__iexact=name)
#
# def __str__(self):
# return self.name
#
# class Meta:
# ordering = ['ordering']
#
# class PatchChangeNotification(models.Model):
# patch = models.OneToOneField(Patch, primary_key=True,
# on_delete=models.CASCADE)
# last_modified = models.DateTimeField(default=datetime.datetime.now)
# orig_state = models.ForeignKey(State, on_delete=models.CASCADE)
#
# class EmailOptout(models.Model):
# email = models.CharField(max_length=200, primary_key=True)
#
# @classmethod
# def is_optout(cls, email):
# email = email.lower().strip()
# return cls.objects.filter(email=email).count() > 0
#
# def __str__(self):
# return self.email
, which may contain function names, class names, or code. Output only the next line. | self.assertEqual(PatchChangeNotification.objects.count(), 0) |
Predict the next line for this snippet: <|code_start|> orig_state=self.patch.state).save()
self._expireNotifications()
errors = send_notifications()
self.assertEqual(errors, [])
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.to, [self.submitter.email])
self.assertIn(self.patch.get_absolute_url(), msg.body)
def testNotificationEscaping(self):
self.patch.name = 'Patch name with " character'
self.patch.save()
PatchChangeNotification(patch=self.patch,
orig_state=self.patch.state).save()
self._expireNotifications()
errors = send_notifications()
self.assertEqual(errors, [])
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.to, [self.submitter.email])
self.assertNotIn('"', msg.body)
def testNotificationOptout(self):
"""ensure opt-out addresses don't get notifications"""
PatchChangeNotification(patch=self.patch,
orig_state=self.patch.state).save()
self._expireNotifications()
<|code_end|>
with the help of current file imports:
import datetime
from django.conf import settings
from django.core import mail
from django.test import TestCase
from patchwork.models import Patch, State, PatchChangeNotification, EmailOptout
from patchwork.tests.utils import defaults
from patchwork.utils import send_notifications
and context from other files:
# Path: patchwork/models.py
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
#
# class State(models.Model):
# name = models.CharField(max_length=100)
# ordering = models.IntegerField(unique=True)
# action_required = models.BooleanField(default=True)
#
# @classmethod
# def from_string(cls, name):
# return State.objects.get(name__iexact=name)
#
# def __str__(self):
# return self.name
#
# class Meta:
# ordering = ['ordering']
#
# class PatchChangeNotification(models.Model):
# patch = models.OneToOneField(Patch, primary_key=True,
# on_delete=models.CASCADE)
# last_modified = models.DateTimeField(default=datetime.datetime.now)
# orig_state = models.ForeignKey(State, on_delete=models.CASCADE)
#
# class EmailOptout(models.Model):
# email = models.CharField(max_length=200, primary_key=True)
#
# @classmethod
# def is_optout(cls, email):
# email = email.lower().strip()
# return cls.objects.filter(email=email).count() > 0
#
# def __str__(self):
# return self.email
, which may contain function names, class names, or code. Output only the next line. | EmailOptout(email=self.submitter.email).save() |
Based on the snippet: <|code_start|> def series(self):
try:
rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
return rev.series
except Exception:
return None
def _set_tag(self, tag, count):
if count == 0:
self.patchtag_set.filter(tag=tag).delete()
return
(patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
if patchtag.count != count:
patchtag.count = count
patchtag.save()
def refresh_tag_counts(self):
tags = self.project.tags
counter = Counter()
for comment in self.comment_set.all():
counter = counter + extract_tags(comment.content, tags)
for tag in tags:
self._set_tag(tag, counter[tag])
def save(self):
if not hasattr(self, 'state') or not self.state:
self.state = get_default_initial_patch_state()
if self.hash is None and self.content is not None:
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import Counter, OrderedDict
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.six.moves import filter
from patchwork.fields import HashField
from patchwork.parser import hash_patch, extract_tags
import datetime
import jsonfield
import random
import re
import patchwork.threadlocalrequest as threadlocalrequest
import django.dispatch
and context (classes, functions, sometimes code) from other files:
# Path: patchwork/parser.py
# def hash_patch(str):
# # normalise spaces
# str = str.replace('\r', '')
# str = str.strip() + '\n'
#
# prefixes = ['-', '+', ' ']
# hash = hashlib.sha1()
#
# for line in str.split('\n'):
#
# if len(line) <= 0:
# continue
#
# hunk_match = _hunk_re.match(line)
# filename_match = _filename_re.match(line)
#
# if filename_match:
# # normalise -p1 top-directories
# if filename_match.group(1) == '---':
# filename = 'a/'
# else:
# filename = 'b/'
# filename += '/'.join(filename_match.group(2).split('/')[1:])
#
# line = filename_match.group(1) + ' ' + filename
#
# elif hunk_match:
# # remove line numbers, but leave line counts
# def fn(x):
# if not x:
# return 1
# return int(x)
# line_nos = list(map(fn, hunk_match.groups()))
# line = '@@ -%d +%d @@' % tuple(line_nos)
#
# elif line[0] in prefixes:
# # if we have a +, - or context line, leave as-is
# pass
#
# else:
# # other lines are ignored
# continue
#
# hash.update((line + '\n').encode('utf-8'))
#
# return hash
#
# def extract_tags(content, tags):
# counts = Counter()
#
# for tag in tags:
# regex = re.compile(tag.pattern, re.MULTILINE | re.IGNORECASE)
# counts[tag] = len(regex.findall(content))
#
# return counts
. Output only the next line. | self.hash = hash_patch(self.content).hexdigest() |
Using the snippet: <|code_start|>
def answers(self):
"""Retrieves the answers (ie all comments but the commit message)"""
return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
def comments(self):
"""Retrieves all comments of this patch ie. the commit message and the
answers"""
return Comment.objects.filter(patch=self)
def series(self):
try:
rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
return rev.series
except Exception:
return None
def _set_tag(self, tag, count):
if count == 0:
self.patchtag_set.filter(tag=tag).delete()
return
(patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
if patchtag.count != count:
patchtag.count = count
patchtag.save()
def refresh_tag_counts(self):
tags = self.project.tags
counter = Counter()
for comment in self.comment_set.all():
<|code_end|>
, determine the next line of code. You have imports:
from collections import Counter, OrderedDict
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.six.moves import filter
from patchwork.fields import HashField
from patchwork.parser import hash_patch, extract_tags
import datetime
import jsonfield
import random
import re
import patchwork.threadlocalrequest as threadlocalrequest
import django.dispatch
and context (class names, function names, or code) available:
# Path: patchwork/parser.py
# def hash_patch(str):
# # normalise spaces
# str = str.replace('\r', '')
# str = str.strip() + '\n'
#
# prefixes = ['-', '+', ' ']
# hash = hashlib.sha1()
#
# for line in str.split('\n'):
#
# if len(line) <= 0:
# continue
#
# hunk_match = _hunk_re.match(line)
# filename_match = _filename_re.match(line)
#
# if filename_match:
# # normalise -p1 top-directories
# if filename_match.group(1) == '---':
# filename = 'a/'
# else:
# filename = 'b/'
# filename += '/'.join(filename_match.group(2).split('/')[1:])
#
# line = filename_match.group(1) + ' ' + filename
#
# elif hunk_match:
# # remove line numbers, but leave line counts
# def fn(x):
# if not x:
# return 1
# return int(x)
# line_nos = list(map(fn, hunk_match.groups()))
# line = '@@ -%d +%d @@' % tuple(line_nos)
#
# elif line[0] in prefixes:
# # if we have a +, - or context line, leave as-is
# pass
#
# else:
# # other lines are ignored
# continue
#
# hash.update((line + '\n').encode('utf-8'))
#
# return hash
#
# def extract_tags(content, tags):
# counts = Counter()
#
# for tag in tags:
# regex = re.compile(tag.pattern, re.MULTILINE | re.IGNORECASE)
# counts[tag] = len(regex.findall(content))
#
# return counts
. Output only the next line. | counter = counter + extract_tags(comment.content, tags) |
Next line prediction: <|code_start|> response = self.client.post(self.url, {'email': email})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'patchwork/mail-settings.html')
self.assertEqual(response.context['email'], email)
def testMailSettingsPOSTEmpty(self):
response = self.client.post(self.url, {'email': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'patchwork/mail-form.html')
self.assertFormError(response, 'form', 'email',
'This field is required.')
def testMailSettingsPOSTInvalid(self):
response = self.client.post(self.url, {'email': 'foo'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'patchwork/mail-form.html')
self.assertFormError(response, 'form', 'email', error_strings['email'])
def testMailSettingsPOSTOptedIn(self):
email = u'foo@example.com'
response = self.client.post(self.url, {'email': email})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'patchwork/mail-settings.html')
self.assertEqual(response.context['is_optout'], False)
self.assertContains(response, '<strong>may</strong>')
optout_url = reverse('mail_optout')
self.assertContains(response, ('action="%s"' % optout_url))
def testMailSettingsPOSTOptedOut(self):
email = u'foo@example.com'
<|code_end|>
. Use current file imports:
(import re
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from patchwork.models import EmailOptout, EmailConfirmation, Person
from patchwork.tests.utils import create_user, error_strings)
and context including class names, function names, or small code snippets from other files:
# Path: patchwork/models.py
# class EmailOptout(models.Model):
# email = models.CharField(max_length=200, primary_key=True)
#
# @classmethod
# def is_optout(cls, email):
# email = email.lower().strip()
# return cls.objects.filter(email=email).count() > 0
#
# def __str__(self):
# return self.email
#
# class EmailConfirmation(models.Model):
# validity = datetime.timedelta(days=settings.CONFIRMATION_VALIDITY_DAYS)
# type = models.CharField(max_length=20, choices=[
# ('userperson', 'User-Person association'),
# ('registration', 'Registration'),
# ('optout', 'Email opt-out'),
# ])
# email = models.CharField(max_length=200)
# user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
# key = HashField()
# date = models.DateTimeField(default=datetime.datetime.now)
# active = models.BooleanField(default=True)
#
# def deactivate(self):
# self.active = False
# self.save()
#
# def is_valid(self):
# return self.date + self.validity > datetime.datetime.now()
#
# def save(self):
# max = 1 << 32
# if self.key == '':
# str = '%s%s%d' % (self.user, self.email, random.randint(0, max))
# self.key = self._meta.get_field('key').construct(str).hexdigest()
# super(EmailConfirmation, self).save()
#
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
. Output only the next line. | EmailOptout(email=email).save() |
Predict the next line after this snippet: <|code_start|> self.assertContains(response, '<strong>may</strong>')
optout_url = reverse('mail_optout')
self.assertContains(response, ('action="%s"' % optout_url))
def testMailSettingsPOSTOptedOut(self):
email = u'foo@example.com'
EmailOptout(email=email).save()
response = self.client.post(self.url, {'email': email})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'patchwork/mail-settings.html')
self.assertEqual(response.context['is_optout'], True)
self.assertContains(response, '<strong>may not</strong>')
optin_url = reverse('mail_optin')
self.assertContains(response, ('action="%s"' % optin_url))
class OptoutRequestTest(TestCase):
def setUp(self):
self.url = reverse('mail_optout')
def testOptOutRequestGET(self):
response = self.client.get(self.url)
self.assertRedirects(response, reverse('mail_settings'))
def testOptoutRequestValidPOST(self):
email = u'foo@example.com'
response = self.client.post(self.url, {'email': email})
# check for a confirmation object
<|code_end|>
using the current file's imports:
import re
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from patchwork.models import EmailOptout, EmailConfirmation, Person
from patchwork.tests.utils import create_user, error_strings
and any relevant context from other files:
# Path: patchwork/models.py
# class EmailOptout(models.Model):
# email = models.CharField(max_length=200, primary_key=True)
#
# @classmethod
# def is_optout(cls, email):
# email = email.lower().strip()
# return cls.objects.filter(email=email).count() > 0
#
# def __str__(self):
# return self.email
#
# class EmailConfirmation(models.Model):
# validity = datetime.timedelta(days=settings.CONFIRMATION_VALIDITY_DAYS)
# type = models.CharField(max_length=20, choices=[
# ('userperson', 'User-Person association'),
# ('registration', 'Registration'),
# ('optout', 'Email opt-out'),
# ])
# email = models.CharField(max_length=200)
# user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
# key = HashField()
# date = models.DateTimeField(default=datetime.datetime.now)
# active = models.BooleanField(default=True)
#
# def deactivate(self):
# self.active = False
# self.save()
#
# def is_valid(self):
# return self.date + self.validity > datetime.datetime.now()
#
# def save(self):
# max = 1 << 32
# if self.key == '':
# str = '%s%s%d' % (self.user, self.email, random.randint(0, max))
# self.key = self._meta.get_field('key').construct(str).hexdigest()
# super(EmailConfirmation, self).save()
#
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
. Output only the next line. | self.assertEqual(EmailConfirmation.objects.count(), 1) |
Here is a snippet: <|code_start|> self.url = reverse('user')
self.optout_url = reverse('mail_optout')
self.optin_url = reverse('mail_optin')
self.form_re_template = (r'<form\s+[^>]*action="%(url)s"[^>]*>'
r'.*?<input\s+[^>]*value="%(email)s"[^>]*>.*?'
r'</form>')
self.secondary_email = 'test2@example.com'
self.user = create_user()
self.client.login(username=self.user.username,
password=self.user.username)
def _form_re(self, url, email):
return re.compile(self.form_re_template % {'url': url, 'email': email},
re.DOTALL)
def testMainEmailOptoutForm(self):
form_re = self._form_re(self.optout_url, self.user.email)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTrue(form_re.search(response.content.decode()) is not None)
def testMainEmailOptinForm(self):
EmailOptout(email=self.user.email).save()
form_re = self._form_re(self.optin_url, self.user.email)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTrue(form_re.search(response.content.decode()) is not None)
def testSecondaryEmailOptoutForm(self):
<|code_end|>
. Write the next line using the current file imports:
import re
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from patchwork.models import EmailOptout, EmailConfirmation, Person
from patchwork.tests.utils import create_user, error_strings
and context from other files:
# Path: patchwork/models.py
# class EmailOptout(models.Model):
# email = models.CharField(max_length=200, primary_key=True)
#
# @classmethod
# def is_optout(cls, email):
# email = email.lower().strip()
# return cls.objects.filter(email=email).count() > 0
#
# def __str__(self):
# return self.email
#
# class EmailConfirmation(models.Model):
# validity = datetime.timedelta(days=settings.CONFIRMATION_VALIDITY_DAYS)
# type = models.CharField(max_length=20, choices=[
# ('userperson', 'User-Person association'),
# ('registration', 'Registration'),
# ('optout', 'Email opt-out'),
# ])
# email = models.CharField(max_length=200)
# user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
# key = HashField()
# date = models.DateTimeField(default=datetime.datetime.now)
# active = models.BooleanField(default=True)
#
# def deactivate(self):
# self.active = False
# self.save()
#
# def is_valid(self):
# return self.date + self.validity > datetime.datetime.now()
#
# def save(self):
# max = 1 << 32
# if self.key == '':
# str = '%s%s%d' % (self.user, self.email, random.randint(0, max))
# self.key = self._meta.get_field('key').construct(str).hexdigest()
# super(EmailConfirmation, self).save()
#
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
, which may include functions, classes, or code. Output only the next line. | p = Person(email=self.secondary_email, user=self.user) |
Next line prediction: <|code_start|> ('WGirwRXgK', 'WGSrwRXgK@TriIETY.GhE',
d(2014, 2, 14, 13, 4, 50, 169305)),
('isjNIuiAc', 'issNIuiAc@OsEirYx.EJh',
d(2014, 3, 15, 13, 4, 50, 176264)),
('XkAQpYGws', 'XkFQpYGws@hzntTcm.JSE',
d(2014, 1, 18, 13, 4, 50, 182493)),
('uJuCPWMvi', 'uJACPWMvi@AVRBOBl.ecy',
d(2014, 3, 12, 13, 4, 50, 189554)),
('TyQmWtcbg', 'TylmWtcbg@DzrNeNH.JuB',
d(2014, 2, 3, 13, 4, 50, 195685)),
('FpvAhWRdX', 'FpKAhWRdX@agxnCAI.wFO',
d(2014, 3, 15, 13, 4, 50, 201398)),
('bmoYvnyWa', 'bmdYvnyWa@aeoPnlX.juy',
d(2014, 3, 4, 13, 4, 50, 206800)),
('CiReUQsAq', 'CiieUQsAq@DnOYRuf.TTI',
d(2014, 3, 28, 13, 4, 50, 212169)),
]
def setUp(self):
defaults.project.save()
for (name, email, date) in self.patchmeta:
patch_name = 'testpatch' + name
<|code_end|>
. Use current file imports:
(import datetime
import re
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.six.moves import zip
from patchwork.models import Person, Patch
from patchwork.tests.utils import defaults)
and context including class names, function names, or small code snippets from other files:
# Path: patchwork/models.py
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
#
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
. Output only the next line. | person = Person(name=name, email=email) |
Continue the code snippet: <|code_start|>
('isjNIuiAc', 'issNIuiAc@OsEirYx.EJh',
d(2014, 3, 15, 13, 4, 50, 176264)),
('XkAQpYGws', 'XkFQpYGws@hzntTcm.JSE',
d(2014, 1, 18, 13, 4, 50, 182493)),
('uJuCPWMvi', 'uJACPWMvi@AVRBOBl.ecy',
d(2014, 3, 12, 13, 4, 50, 189554)),
('TyQmWtcbg', 'TylmWtcbg@DzrNeNH.JuB',
d(2014, 2, 3, 13, 4, 50, 195685)),
('FpvAhWRdX', 'FpKAhWRdX@agxnCAI.wFO',
d(2014, 3, 15, 13, 4, 50, 201398)),
('bmoYvnyWa', 'bmdYvnyWa@aeoPnlX.juy',
d(2014, 3, 4, 13, 4, 50, 206800)),
('CiReUQsAq', 'CiieUQsAq@DnOYRuf.TTI',
d(2014, 3, 28, 13, 4, 50, 212169)),
]
def setUp(self):
defaults.project.save()
for (name, email, date) in self.patchmeta:
patch_name = 'testpatch' + name
person = Person(name=name, email=email)
person.save()
<|code_end|>
. Use current file imports:
import datetime
import re
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.six.moves import zip
from patchwork.models import Person, Patch
from patchwork.tests.utils import defaults
and context (classes, functions, or code) from other files:
# Path: patchwork/models.py
# class Person(models.Model):
# email = models.CharField(max_length=255, unique=True)
# name = models.CharField(max_length=255, null=True, blank=True)
# user = models.ForeignKey(User, null=True, blank=True,
# on_delete=models.SET_NULL)
#
# def display_name(self):
# if self.name:
# return self.name
# else:
# return self.email
#
# def email_name(self):
# if (self.name):
# return "\"%s\" <%s>" % (self.name, self.email)
# else:
# return self.email
#
# def link_to_user(self, user):
# self.name = user.profile.name()
# self.user = user
#
# def __str__(self):
# return self.display_name()
#
# class Meta:
# verbose_name_plural = 'People'
#
# class Patch(models.Model):
# project = models.ForeignKey(Project, on_delete=models.CASCADE)
# msgid = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# date = models.DateTimeField(default=datetime.datetime.now)
# last_updated = models.DateTimeField(auto_now=True)
# submitter = models.ForeignKey(Person, on_delete=models.CASCADE)
# delegate = models.ForeignKey(User, blank=True, null=True,
# on_delete=models.CASCADE)
# state = models.ForeignKey(State, null=True, on_delete=models.CASCADE)
# archived = models.BooleanField(default=False)
# headers = models.TextField(blank=True)
# content = models.TextField(null=True, blank=True)
# pull_url = models.CharField(max_length=255, null=True, blank=True)
# commit_ref = models.CharField(max_length=255, null=True, blank=True)
# hash = HashField(null=True, blank=True)
# tags = models.ManyToManyField(Tag, through=PatchTag)
#
# objects = PatchManager()
#
# def commit_message(self):
# """Retrieves the commit message"""
# return Comment.objects.filter(patch=self, msgid=self.msgid)
#
# def answers(self):
# """Retrieves the answers (ie all comments but the commit message)"""
# return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid))
#
# def comments(self):
# """Retrieves all comments of this patch ie. the commit message and the
# answers"""
# return Comment.objects.filter(patch=self)
#
# def series(self):
# try:
# rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision
# return rev.series
# except Exception:
# return None
#
# def _set_tag(self, tag, count):
# if count == 0:
# self.patchtag_set.filter(tag=tag).delete()
# return
# (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag)
# if patchtag.count != count:
# patchtag.count = count
# patchtag.save()
#
# def refresh_tag_counts(self):
# tags = self.project.tags
# counter = Counter()
# for comment in self.comment_set.all():
# counter = counter + extract_tags(comment.content, tags)
#
# for tag in tags:
# self._set_tag(tag, counter[tag])
#
# def save(self):
# if not hasattr(self, 'state') or not self.state:
# self.state = get_default_initial_patch_state()
#
# if self.hash is None and self.content is not None:
# self.hash = hash_patch(self.content).hexdigest()
#
# super(Patch, self).save()
#
# def filename(self):
# return filename(self.name, '.patch')
#
# def human_name(self):
# return self.name
#
# @models.permalink
# def get_absolute_url(self):
# return ('patch', (), {'patch_id': self.id})
#
# def __str__(self):
# return self.name
#
# class Meta:
# verbose_name_plural = 'Patches'
# ordering = ['date']
# unique_together = [('msgid', 'project')]
. Output only the next line. | patch = Patch(project=defaults.project, msgid=patch_name, |
Given the code snippet: <|code_start|>#! /usr/bin/python
#--------------------------------------------------------------------
# PROGRAM : get_path.py
# CREATED BY : hjkim @IIS.2015-07-13 13:02:05.942756
# MODIFED BY :
#
# USAGE : $ ./get_path.py
#
# DESCRIPTION:
#------------------------------------------------------cf0.2@20120401
def get_path(srcDir, sDTime, eDTime):
'''
select GPM(hdf5) and TRMM(hdf4) files and return their paths)
'''
prjName, prdLv, prdVer = srcDir.split(os.path.sep)[-3:]
<|code_end|>
, generate the next line using the imports in this file:
import os,sys
from optparse import OptionParser
from parse_fname_trmm import parse_fname_trmm
from parse_fname_gpm import parse_fname_gpm
and context (functions, classes, or occasionally code) from other files:
# Path: parse_fname_trmm.py
# def parse_fname_trmm(fName, ATTR):
# '''
# fName : TRMM HDF filename
# ATTR : list of attributes (i.e., 'sDTime' and/or 'eDTime')
# '''
#
# sDTime = datetime.strptime( re.findall(r'\d{8}', fName)[0], '%Y%m%d' )
#
# offset = timedelta( seconds=86400 )
#
# dictFunc= {'sDTime': sDTime,
# 'eDTime': sDTime+offset,
# }
#
# return [dictFunc[attr] for attr in ATTR]
#
# Path: parse_fname_gpm.py
# def parse_fname_gpm(fName, ATTR):
# '''
# fName : GPM HDF path
# ATTR : list of attributes (i.e., 'sDTime' and/or 'eDTime')
# '''
#
# fName = fName.split('_')
#
# dictFunc= {'sDTime': datetime.strptime(fName[2], '%y%m%d%H%M'),
# 'eDTime': datetime.strptime(fName[2][:6]+fName[3], '%y%m%d%H%M')
# }
#
# if dictFunc['eDTime'] < dictFunc['sDTime']:
# dictFunc['eDTime'] += timedelta( days=1 )
#
# return [dictFunc[attr] for attr in ATTR]
. Output only the next line. | parse_fname = {'TRMM': parse_fname_trmm, |
Given the following code snippet before the placeholder: <|code_start|>#! /usr/bin/python
#--------------------------------------------------------------------
# PROGRAM : get_path.py
# CREATED BY : hjkim @IIS.2015-07-13 13:02:05.942756
# MODIFED BY :
#
# USAGE : $ ./get_path.py
#
# DESCRIPTION:
#------------------------------------------------------cf0.2@20120401
def get_path(srcDir, sDTime, eDTime):
'''
select GPM(hdf5) and TRMM(hdf4) files and return their paths)
'''
prjName, prdLv, prdVer = srcDir.split(os.path.sep)[-3:]
parse_fname = {'TRMM': parse_fname_trmm,
<|code_end|>
, predict the next line using imports from the current file:
import os,sys
from optparse import OptionParser
from parse_fname_trmm import parse_fname_trmm
from parse_fname_gpm import parse_fname_gpm
and context including class names, function names, and sometimes code from other files:
# Path: parse_fname_trmm.py
# def parse_fname_trmm(fName, ATTR):
# '''
# fName : TRMM HDF filename
# ATTR : list of attributes (i.e., 'sDTime' and/or 'eDTime')
# '''
#
# sDTime = datetime.strptime( re.findall(r'\d{8}', fName)[0], '%Y%m%d' )
#
# offset = timedelta( seconds=86400 )
#
# dictFunc= {'sDTime': sDTime,
# 'eDTime': sDTime+offset,
# }
#
# return [dictFunc[attr] for attr in ATTR]
#
# Path: parse_fname_gpm.py
# def parse_fname_gpm(fName, ATTR):
# '''
# fName : GPM HDF path
# ATTR : list of attributes (i.e., 'sDTime' and/or 'eDTime')
# '''
#
# fName = fName.split('_')
#
# dictFunc= {'sDTime': datetime.strptime(fName[2], '%y%m%d%H%M'),
# 'eDTime': datetime.strptime(fName[2][:6]+fName[3], '%y%m%d%H%M')
# }
#
# if dictFunc['eDTime'] < dictFunc['sDTime']:
# dictFunc['eDTime'] += timedelta( days=1 )
#
# return [dictFunc[attr] for attr in ATTR]
. Output only the next line. | 'GPM' : parse_fname_gpm}[ prjName.split('.')[0] ] |
Here is a snippet: <|code_start|>#! /usr/bin/python
#--------------------------------------------------------------------
# PROGRAM : search_granules.py
# CREATED BY : hjkim @IIS.2015-07-13 12:59:51.759752
# MODIFED BY :
#
# USAGE : $ ./search_granules.py
#
# DESCRIPTION:
#------------------------------------------------------cf0.2@20120401
class SearchGranules( object ):
def search_granules(self, srcDir, sDTime, eDTime, BBox=[[-90,-180],[90,180]], thresh=0.001):
'''
BBox : [[lllat,lllon], [urlat,urlon]] /* lat: -90 ~ 90 */
/* lon: -180 ~ 180 */
'''
<|code_end|>
. Write the next line using the current file imports:
import os,sys
from optparse import OptionParser
from numpy import arange, ma
from get_path import get_path
from get_gtrack_dim import get_gtrack_dim
and context from other files:
# Path: get_path.py
# def get_path(srcDir, sDTime, eDTime):
# '''
# select GPM(hdf5) and TRMM(hdf4) files and return their paths)
# '''
#
# prjName, prdLv, prdVer = srcDir.split(os.path.sep)[-3:]
#
# parse_fname = {'TRMM': parse_fname_trmm,
# 'GPM' : parse_fname_gpm}[ prjName.split('.')[0] ]
#
#
# if sDTime == eDTime:
# raise ValueError, '%s == %s'%(sDTime, eDTime)
#
#
#
# # do not know the reason of implementation ++++++++++++++++++++++
# # consider to use trange
# #srcDIR = [os.path.join(srcDir, '%i/%02d'%(y,m))
# srcDIR = [os.path.join(srcDir, str(y), '%02d'%m)
# for y in range(sDTime.year,eDTime.year+1)
# for m in range(1,13)]
#
# srcDIR = srcDIR[sDTime.month-1 : eDTime.month-12 if eDTime.month != 12 else 12]
# # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# srcPATH = []
#
# for srcDir in srcDIR:
# if not os.path.exists(srcDir):
# print 'Warning [%s] directory does not exists!'%srcDir
# continue
#
# for srcFName in sorted( os.listdir(srcDir) ):
#
# sdt_gtrk, edt_gtrk = parse_fname( srcFName, ['sDTime','eDTime'] )
#
# if sDTime <= edt_gtrk and eDTime >= sdt_gtrk:
# srcPATH.append( os.path.join(srcDir, srcFName) )
# else:
# continue
#
# return srcPATH
#
# Path: get_gtrack_dim.py
# def get_gtrack_dim(srcPath, fn_read, cache=False, cache_dir=None):
# '''
# scan granules and return dimension (T,Y,X) or ground tracks
#
# cache : mode of cf.devel.collection.cached
# ['cached', 'cached-verbose', 'skip', 'update']
# '''
#
# verbose = False if 'verbose' in cache \
# else True
# verbose = True
#
# prjName, prdLv, prdVer, yyyy, mm, srcFName = srcPath.split(os.path.sep)[-6:]
#
# get_dtime, get_location = {'TRMM': [get_dtime_trmm, get_location_trmm],
# 'GPM' : [get_dtime_gpm, get_location_gpm ],
# }[ prjName.split('.')[0] ]
#
#
# print '+ Get Groundtrack Dimension: {}'.format( srcPath )
#
# cache_dir = os.path.join( cache_dir, prjName, prdLv, prdVer, yyyy, mm )
#
# Lat, Lon = cached( srcFName + '.latlon',
# cache_dir,
# mode=cache,
# verbose=verbose )(get_location)(srcPath, fn_read)#, cache, cache_dir)
#
# Timetuple = cached( srcFName + '.timetuple',
# cache_dir,
# mode=cache,
# verbose=verbose )(get_dtime )(srcPath, fn_read)#, cache, cache_dir)
#
#
# # exception handling for us 1000000 instead of 0 ------------------------------------
# DTime = []
# for y,m,d,H,M,S,uS in Timetuple:
#
# if uS == 1000000:
# DTime.append( datetime(y,m,d,H,M,S,0)+timedelta(seconds=1) )
# print 'Warning [NS/ScanTime/Millisecond] == 1000 : %i %i %i %i %i %i %i' \
# %(y,m,d,H,M,S,uS/1000)
#
# else:
# DTime.append( datetime(y,m,d,H,M,S,uS) )
# # -----------------------------------------------------------------------------------
#
# DTime = array( DTime )
#
# return DTime, Lat, Lon
, which may include functions, classes, or code. Output only the next line. | srcPATH = get_path(srcDir, sDTime, eDTime) |
Next line prediction: <|code_start|>#! /usr/bin/python
#--------------------------------------------------------------------
# PROGRAM : search_granules.py
# CREATED BY : hjkim @IIS.2015-07-13 12:59:51.759752
# MODIFED BY :
#
# USAGE : $ ./search_granules.py
#
# DESCRIPTION:
#------------------------------------------------------cf0.2@20120401
class SearchGranules( object ):
def search_granules(self, srcDir, sDTime, eDTime, BBox=[[-90,-180],[90,180]], thresh=0.001):
'''
BBox : [[lllat,lllon], [urlat,urlon]] /* lat: -90 ~ 90 */
/* lon: -180 ~ 180 */
'''
srcPATH = get_path(srcDir, sDTime, eDTime)
<|code_end|>
. Use current file imports:
(import os,sys
from optparse import OptionParser
from numpy import arange, ma
from get_path import get_path
from get_gtrack_dim import get_gtrack_dim)
and context including class names, function names, or small code snippets from other files:
# Path: get_path.py
# def get_path(srcDir, sDTime, eDTime):
# '''
# select GPM(hdf5) and TRMM(hdf4) files and return their paths)
# '''
#
# prjName, prdLv, prdVer = srcDir.split(os.path.sep)[-3:]
#
# parse_fname = {'TRMM': parse_fname_trmm,
# 'GPM' : parse_fname_gpm}[ prjName.split('.')[0] ]
#
#
# if sDTime == eDTime:
# raise ValueError, '%s == %s'%(sDTime, eDTime)
#
#
#
# # do not know the reason of implementation ++++++++++++++++++++++
# # consider to use trange
# #srcDIR = [os.path.join(srcDir, '%i/%02d'%(y,m))
# srcDIR = [os.path.join(srcDir, str(y), '%02d'%m)
# for y in range(sDTime.year,eDTime.year+1)
# for m in range(1,13)]
#
# srcDIR = srcDIR[sDTime.month-1 : eDTime.month-12 if eDTime.month != 12 else 12]
# # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# srcPATH = []
#
# for srcDir in srcDIR:
# if not os.path.exists(srcDir):
# print 'Warning [%s] directory does not exists!'%srcDir
# continue
#
# for srcFName in sorted( os.listdir(srcDir) ):
#
# sdt_gtrk, edt_gtrk = parse_fname( srcFName, ['sDTime','eDTime'] )
#
# if sDTime <= edt_gtrk and eDTime >= sdt_gtrk:
# srcPATH.append( os.path.join(srcDir, srcFName) )
# else:
# continue
#
# return srcPATH
#
# Path: get_gtrack_dim.py
# def get_gtrack_dim(srcPath, fn_read, cache=False, cache_dir=None):
# '''
# scan granules and return dimension (T,Y,X) or ground tracks
#
# cache : mode of cf.devel.collection.cached
# ['cached', 'cached-verbose', 'skip', 'update']
# '''
#
# verbose = False if 'verbose' in cache \
# else True
# verbose = True
#
# prjName, prdLv, prdVer, yyyy, mm, srcFName = srcPath.split(os.path.sep)[-6:]
#
# get_dtime, get_location = {'TRMM': [get_dtime_trmm, get_location_trmm],
# 'GPM' : [get_dtime_gpm, get_location_gpm ],
# }[ prjName.split('.')[0] ]
#
#
# print '+ Get Groundtrack Dimension: {}'.format( srcPath )
#
# cache_dir = os.path.join( cache_dir, prjName, prdLv, prdVer, yyyy, mm )
#
# Lat, Lon = cached( srcFName + '.latlon',
# cache_dir,
# mode=cache,
# verbose=verbose )(get_location)(srcPath, fn_read)#, cache, cache_dir)
#
# Timetuple = cached( srcFName + '.timetuple',
# cache_dir,
# mode=cache,
# verbose=verbose )(get_dtime )(srcPath, fn_read)#, cache, cache_dir)
#
#
# # exception handling for us 1000000 instead of 0 ------------------------------------
# DTime = []
# for y,m,d,H,M,S,uS in Timetuple:
#
# if uS == 1000000:
# DTime.append( datetime(y,m,d,H,M,S,0)+timedelta(seconds=1) )
# print 'Warning [NS/ScanTime/Millisecond] == 1000 : %i %i %i %i %i %i %i' \
# %(y,m,d,H,M,S,uS/1000)
#
# else:
# DTime.append( datetime(y,m,d,H,M,S,uS) )
# # -----------------------------------------------------------------------------------
#
# DTime = array( DTime )
#
# return DTime, Lat, Lon
. Output only the next line. | gtrkDim = [get_gtrack_dim(path, self.func_read, self.cached, self.cacheDir) |
Using the snippet: <|code_start|>"""
GridEYE_test.py
Alexander Hiam <alex@graycat.io>
Example program for PyBBIO's GridEYE library. Reads the current
thermal sensor data and saves to thermal-image.png in the current
directory.
Requires the Python Imaging Library:
# pip install PIL
This example program is in the public domain.
"""
# Width and height of final image, 8x8 thermal image scaled to this size:
WIDTH = 600
HEIGHT = 600
I2C1.open()
<|code_end|>
, determine the next line of code. You have imports:
from bbio import *
from bbio.libraries.GridEYE import GridEYE
from PIL import Image
import colorsys
and context (class names, function names, or code) available:
# Path: bbio/libraries/GridEYE/GridEYE.py
# class GridEYE(object):
# AMG88_ADDR = 0x68
# AMG88_ALT_ADDR = 0x69
#
# REG_PCTL = 0x00
# PCTL_NORMAL = 0x00
# PCTL_SLEEP = 0x10
# PCTL_STANDBY_60 = 0x20
# PCTL_STANDBY_10 = 0x21
#
# REG_RST = 0x01
# RST_FLAG = 0x30
# RST_INIT = 0x3f
#
# REG_FPSC = 0x02
# FPSC_1FPS = 0x01
# FPSC_10FPS = 0x00
#
#
# REG_AVE = 0x07
# AVE_ENABLE = 0x20
# AVE_DISABLE = 0x00
#
# REG_TTHL = 0x0e
# REG_TTHH = 0x0f
#
# REG_TEMP_START = 0x80
# TEMP_N_BYTES = 128 # 64 px, 2 byte/px
#
# def __init__(self, i2c_bus, addr=AMG88_ADDR):
# self.i2c_bus = i2c_bus
# self.addr = addr
# self.reset()
# bbio.delay(100)
#
# def getFrame(self):
# """ Reads and returns the current temperature map as a 1-dimensional list
# of values in Celsius.
# """
# data = self.i2c_bus.readTransaction(self.addr, self.REG_TEMP_START,
# self.TEMP_N_BYTES)
# frame = []
# for i in range(0, self.TEMP_N_BYTES, 2):
# temp = data[i+1]<<8 | data[i]
# if temp & (0x1<<11):
# printit = True
# # do 12-bit 2's compliment conversion
# temp -= 4096
# temp *= 0.25 # convert to C
# frame.append(temp)
# return frame
#
# def getAmbientTemp(self):
# """ Reads and returns the temperature of the AMG88's internal thermistor.
# """
# low, high = self.i2c_bus.readTransaction(self.addr, self.REG_TTHL, 2)
# temp = high<<8 | low
# if temp & (0x1<<11):
# # do 12-bit 2's compliment conversion
# temp -= 4096
# temp *= 0.0625 # convert to C
# return temp
#
# def enableAveraging(self):
# """ Enables the AMG88's built-in moving average.
# """
# # This sequence is from a Grid-EYE specifications document, I haven't
# # seen it described anywhere:
# self.i2c_bus.write(self.addr, [0x1f, 0x50])
# self.i2c_bus.write(self.addr, [0x1f, 0x45])
# self.i2c_bus.write(self.addr, [0x1f, 0x57])
# self.i2c_bus.write(self.addr, [self.REG_AVE, self.AVE_ENABLE])
# self.i2c_bus.write(self.addr, [0x1f, 0x00])
#
# def disableAveraging(self):
# """ Disables the AMG88's built-in moving average.
# """
# self.i2c_bus.write(self.addr, [0x1f, 0x50])
# self.i2c_bus.write(self.addr, [0x1f, 0x45])
# self.i2c_bus.write(self.addr, [0x1f, 0x57])
# self.i2c_bus.write(self.addr, [self.REG_AVE, self.AVE_DISABLE])
# self.i2c_bus.write(self.addr, [0x1f, 0x00])
#
# def reset(self):
# """ Resets all registers to initial settings.
# """
# self.i2c_bus.write(self.addr, [self.REG_RST, self.RST_INIT])
. Output only the next line. | grideye = GridEYE(I2C1) |
Continue the code snippet: <|code_start|> exiting.
*This should generally not be called directly from user code. """
if not gpio_pin:
print "*unknown pinmux pin: %s" % gpio_pin
return
if SLOTS_FILE:
status = pinMux_dtOverlays(gpio_pin, mode, preserve_mode_on_exit)
else:
status = pinMux_universalIO(gpio_pin, mode, preserve_mode_on_exit)
if not status:
print "*could not configure pinmux for pin %s" % gpio_pin
def export(gpio_pin, unexport_on_exit=False):
""" Reserves a pin for userspace use with sysfs /sys/class/gpio interface.
If unexport_on_exit=True unexport(gpio_pin) will be called automatically
when the program exits. Returns True if pin was exported, False if it was
already under userspace control. """
if ("USR" in gpio_pin):
# The user LEDs are already under userspace control
return True
gpio_num = GPIO[gpio_pin]['gpio_num']
gpio_file = '%s/gpio%i' % (GPIO_FILE_BASE, gpio_num)
if (os.path.exists(gpio_file)):
# Pin already under userspace control
return True
with open(EXPORT_FILE, 'wb') as f:
f.write(str(gpio_num))
if unexport_on_exit:
<|code_end|>
. Use current file imports:
from config import OCP_PATH, GPIO, GPIO_FILE_BASE, EXPORT_FILE, UNEXPORT_FILE,\
SLOTS_FILE
from bbio.common import addToCleanup
import glob, os, cape_manager, bbio
and context (classes, functions, or code) from other files:
# Path: bbio/common.py
# def addToCleanup(routine):
# """ Takes a callable object to be called during the cleanup once a
# program has stopped, e.g. a function to close a log file, kill
# a thread, etc. """
# ADDITIONAL_CLEANUP.append(routine)
. Output only the next line. | addToCleanup(lambda: unexport(gpio_pin)) |
Here is a snippet: <|code_start|>"""
GridEYE_test.py
Alexander Hiam <alex@graycat.io>
Example program for PyBBIO's GridEYE library.
This example program is in the public domain.
"""
# Initialize the I2C bus:
I2C1.open()
# Create a GridEYE object:
<|code_end|>
. Write the next line using the current file imports:
from bbio import *
from bbio.libraries.GridEYE import GridEYE
and context from other files:
# Path: bbio/libraries/GridEYE/GridEYE.py
# class GridEYE(object):
# AMG88_ADDR = 0x68
# AMG88_ALT_ADDR = 0x69
#
# REG_PCTL = 0x00
# PCTL_NORMAL = 0x00
# PCTL_SLEEP = 0x10
# PCTL_STANDBY_60 = 0x20
# PCTL_STANDBY_10 = 0x21
#
# REG_RST = 0x01
# RST_FLAG = 0x30
# RST_INIT = 0x3f
#
# REG_FPSC = 0x02
# FPSC_1FPS = 0x01
# FPSC_10FPS = 0x00
#
#
# REG_AVE = 0x07
# AVE_ENABLE = 0x20
# AVE_DISABLE = 0x00
#
# REG_TTHL = 0x0e
# REG_TTHH = 0x0f
#
# REG_TEMP_START = 0x80
# TEMP_N_BYTES = 128 # 64 px, 2 byte/px
#
# def __init__(self, i2c_bus, addr=AMG88_ADDR):
# self.i2c_bus = i2c_bus
# self.addr = addr
# self.reset()
# bbio.delay(100)
#
# def getFrame(self):
# """ Reads and returns the current temperature map as a 1-dimensional list
# of values in Celsius.
# """
# data = self.i2c_bus.readTransaction(self.addr, self.REG_TEMP_START,
# self.TEMP_N_BYTES)
# frame = []
# for i in range(0, self.TEMP_N_BYTES, 2):
# temp = data[i+1]<<8 | data[i]
# if temp & (0x1<<11):
# printit = True
# # do 12-bit 2's compliment conversion
# temp -= 4096
# temp *= 0.25 # convert to C
# frame.append(temp)
# return frame
#
# def getAmbientTemp(self):
# """ Reads and returns the temperature of the AMG88's internal thermistor.
# """
# low, high = self.i2c_bus.readTransaction(self.addr, self.REG_TTHL, 2)
# temp = high<<8 | low
# if temp & (0x1<<11):
# # do 12-bit 2's compliment conversion
# temp -= 4096
# temp *= 0.0625 # convert to C
# return temp
#
# def enableAveraging(self):
# """ Enables the AMG88's built-in moving average.
# """
# # This sequence is from a Grid-EYE specifications document, I haven't
# # seen it described anywhere:
# self.i2c_bus.write(self.addr, [0x1f, 0x50])
# self.i2c_bus.write(self.addr, [0x1f, 0x45])
# self.i2c_bus.write(self.addr, [0x1f, 0x57])
# self.i2c_bus.write(self.addr, [self.REG_AVE, self.AVE_ENABLE])
# self.i2c_bus.write(self.addr, [0x1f, 0x00])
#
# def disableAveraging(self):
# """ Disables the AMG88's built-in moving average.
# """
# self.i2c_bus.write(self.addr, [0x1f, 0x50])
# self.i2c_bus.write(self.addr, [0x1f, 0x45])
# self.i2c_bus.write(self.addr, [0x1f, 0x57])
# self.i2c_bus.write(self.addr, [self.REG_AVE, self.AVE_DISABLE])
# self.i2c_bus.write(self.addr, [0x1f, 0x00])
#
# def reset(self):
# """ Resets all registers to initial settings.
# """
# self.i2c_bus.write(self.addr, [self.REG_RST, self.RST_INIT])
, which may include functions, classes, or code. Output only the next line. | grideye = GridEYE(I2C1) |
Given snippet: <|code_start|> if not self.term.protocol:
# Empty protocol list means any protocol, but any protocol in HF is
# represented as "all"
protocols_and_ports = [{'ipProtocol': 'all'}]
else:
for proto in filtered_protocols:
# If the protocol name is not supported, use the protocol number.
if proto not in self._ALLOW_PROTO_NAME:
proto = str(self.PROTO_MAP[proto])
logging.info('INFO: Term %s is being rendered using protocol number',
self.term.name)
proto_ports = {'ipProtocol': proto}
if self.term.destination_port:
ports = self._GetPorts()
if ports: # Only set when non-empty.
proto_ports['ports'] = ports
protocols_and_ports.append(proto_ports)
if self.api_version == 'ga':
term_dict['match'] = {layer_4_config: protocols_and_ports}
else:
term_dict['match'] = {'config': {layer_4_config: protocols_and_ports}}
# match needs a field called versionedExpr with value FIREWALL
# See documentation:
# https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
term_dict['match']['versionedExpr'] = 'FIREWALL'
ip_version = self.AF_MAP[self.address_family]
if ip_version == 4:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import copy
import re
from typing import Dict, Any
from absl import logging
from capirca.lib import gcp
from capirca.lib import nacaddr
and context:
# Path: capirca/lib/gcp.py
# class Error(Exception):
# class TermError(Error):
# class HeaderError(Error):
# class UnsupportedFilterTypeError(Error):
# class Term(aclgenerator.Term):
# class GCP(aclgenerator.ACLGenerator):
# _ALLOW_PROTO_NAME = frozenset(
# ['tcp', 'udp', 'icmp', 'esp', 'ah', 'ipip', 'sctp',
# 'all'
# ])
# _GOOD_DIRECTION = ['INGRESS', 'EGRESS']
# def _GetPorts(self):
# def _GetLoggingSetting(self):
# def __str__(self):
# def IsDefaultDeny(term):
# def IsProjectIDValid(project):
# def IsVPCNameValid(vpc):
# def TruncateString(raw_string, max_length):
# def GetIpv6TermName(term_name):
#
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
which might include code, classes, or functions. Output only the next line. | any_ip = [nacaddr.IP('0.0.0.0/0')] |
Given snippet: <|code_start|> self.destination = []
self.sport = []
self.dport = []
self.action = []
self.option = []
self.protocol = []
def __str__(self):
rval = []
rval.append(' Term: %s' % self.name)
rval.append(' Source-address:: %s' % ' '.join(self.source))
rval.append(' Destination-address:: %s' % ' '.join(self.destination))
rval.append(' Source-port:: %s' % ' '.join(self.sport))
rval.append(' Destination-port:: %s' % ' '.join(self.dport))
rval.append(' Protocol:: %s' % ' '.join(self.protocol))
rval.append(' Option:: %s' % ' '.join(self.option))
rval.append(' Action:: %s' % ' '.join(self.action))
return '\n'.join(rval)
class Policy:
"""Holds basic attributes of an unexpanded policy definition file."""
def __init__(self, filename, defs_data=None):
"""Build policy object and naming definitions from provided filenames.
Args:
filename: location of a .pol file
defs_data: location of naming definitions directory, if any
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from capirca.lib import naming
and context:
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
which might include code, classes, or functions. Output only the next line. | self.defs = naming.Naming(defs_data) |
Predict the next line after this snippet: <|code_start|> self._Parse(naming_dir, 'services')
self._CheckUnseen('services')
self._Parse(naming_dir, 'networks')
self._CheckUnseen('networks')
def _CheckUnseen(self, def_type):
if def_type == 'services':
if self.unseen_services:
raise UndefinedServiceError('%s %s' % (
'The following tokens were nested as a values, but not defined',
self.unseen_services))
if def_type == 'networks':
if self.unseen_networks:
raise UndefinedAddressError('%s %s' % (
'The following tokens were nested as a values, but not defined',
self.unseen_networks))
def GetIpParents(self, query):
"""Return network tokens that contain IP in query.
Args:
query: an ip string ('10.1.1.1') or nacaddr.IP object
Returns:
A sorted list of unique parent tokens.
"""
base_parents = []
recursive_parents = []
# convert string to nacaddr, if arg is ipaddr then convert str() to nacaddr
<|code_end|>
using the current file's imports:
import glob
import os
import re
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import port as portlib
and any relevant context from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/port.py
# class Error(Exception):
# class BadPortValue(Error):
# class BadPortRange(Error):
# class InvalidRange(Error):
# class NotSinglePort(Error):
# class PPP:
# def __init__(self, service):
# def is_range(self):
# def is_single_port(self):
# def start(self):
# def end(self):
# def __contains__(self, other):
# def __lt__(self, other):
# def __gt__(self, other):
# def __le__(self, other):
# def __ge__(self, other):
# def __eq__(self, other):
# def Port(port):
. Output only the next line. | if (not isinstance(query, nacaddr.IPv4) and |
Next line prediction: <|code_start|> service = next_item.split('#')[0].strip()
# Recognized token, not a value.
if '/' not in service:
# Make sure we are not descending into recursion hell.
if service not in already_done:
already_done.add(service)
try:
expandset.update(self.GetService(service))
except UndefinedServiceError as e:
# One of the services in query is undefined, refine the error msg.
raise UndefinedServiceError('%s (in %s)' % (e, query))
else:
expandset.add(service)
return sorted(expandset)
def GetPortParents(self, query, proto):
"""Returns a list of all service tokens containing the port/protocol pair.
Args:
query: port number ('22') as str
proto: protocol name ('tcp') as str
Returns:
A list of service tokens: ['SSH', 'HTTPS']
Raises:
UndefinedPortError: If the port/protocol pair isn't used in any
service tokens.
"""
# turn the given port and protocol into a PortProtocolPair object
<|code_end|>
. Use current file imports:
(import glob
import os
import re
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import port as portlib)
and context including class names, function names, or small code snippets from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/port.py
# class Error(Exception):
# class BadPortValue(Error):
# class BadPortRange(Error):
# class InvalidRange(Error):
# class NotSinglePort(Error):
# class PPP:
# def __init__(self, service):
# def is_range(self):
# def is_single_port(self):
# def start(self):
# def end(self):
# def __contains__(self, other):
# def __lt__(self, other):
# def __gt__(self, other):
# def __le__(self, other):
# def __ge__(self, other):
# def __eq__(self, other):
# def Port(port):
. Output only the next line. | given_ppp = portlib.PPP(query + '/' + proto) |
Predict the next line after this snippet: <|code_start|>
To find the difference of network tokens to which 2 IPs belong use
$ cgrep.py -g 1.1.1.1 2.2.2.2
To find which IPs are in the 'FOO' network token use
$ cgrep.py -o FOO
To find which port & protocol pairs are in a service token 'FOO' use
$ cgrep.py -s FOO
To find which service tokens contain port '22' and protocol 'tcp' use
$ cgrep.py -p 22 tcp
"""
def is_valid_ip(arg):
"""Validates a value to be an IP or not.
Args:
arg: potential IP address as a string.
Returns:
arg as IP object (if arg is an IP)
Raises:
Error (if arg is not an IP)
"""
try:
<|code_end|>
using the current file's imports:
import argparse
import pprint
import sys
from absl import app
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import naming
and any relevant context from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
. Output only the next line. | nacaddr.IP(arg) |
Predict the next line for this snippet: <|code_start|> metavar=('OBJ', 'OBJ'),
help=('Compare the two given network '
'definition tokens'))
exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2,
type=is_valid_ip, metavar=('IP', 'IP'),
help=('Diff the network objects to'
' which the given IP(s) belong'))
exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+',
help=('Return list of IP(s) contained within '
'the given token(s)'))
exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+',
help=('Return list of port(s) contained '
'within given token(s)'))
exclusive_group.add_argument('-p', '--port', dest='port', nargs=2,
metavar=('PORT', 'PROTO'),
help=('Returns a list of tokens containing '
'the given port and protocol'))
return parser
def main(argv):
"""Determines the code path based on the arguments passed."""
del argv # Unused.
parser = cli_options()
options = parser.parse_args()
<|code_end|>
with the help of current file imports:
import argparse
import pprint
import sys
from absl import app
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import naming
and context from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
, which may contain function names, class names, or code. Output only the next line. | db = naming.Naming(options.defs) |
Given the code snippet: <|code_start|># Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Aruba generator."""
_PLATFORM = 'aruba'
_COMMENT_MARKER = '#'
_TERMINATOR_MARKER = '!'
class Error(Exception):
"""Base error class."""
<|code_end|>
, generate the next line using the imports in this file:
import datetime
import logging
from capirca.lib import aclgenerator
and context (functions, classes, or occasionally code) from other files:
# Path: capirca/lib/aclgenerator.py
# class Error(Exception):
# class NoPlatformPolicyError(Error):
# class UnknownIcmpTypeError(Error):
# class MismatchIcmpInetError(Error):
# class EstablishedError(Error):
# class UnsupportedAFError(Error):
# class DuplicateTermError(Error):
# class UnsupportedFilterError(Error):
# class UnsupportedTargetOptionError(Error):
# class TermNameTooLongError(Error):
# class Term:
# class ACLGenerator:
# ICMP_TYPE = policy.Term.ICMP_TYPE
# PROTO_MAP = {'hopopt': 0,
# 'icmp': 1,
# 'igmp': 2,
# 'ggp': 3,
# 'ipip': 4,
# 'tcp': 6,
# 'egp': 8,
# 'igp': 9,
# 'udp': 17,
# 'rdp': 27,
# 'ipv6': 41,
# 'ipv6-route': 43,
# 'fragment': 44,
# 'rsvp': 46,
# 'gre': 47,
# 'esp': 50,
# 'ah': 51,
# 'icmpv6': 58,
# 'ipv6-nonxt': 59,
# 'ipv6-opts': 60,
# 'ospf': 89,
# 'pim': 103,
# 'vrrp': 112,
# 'l2tp': 115,
# 'sctp': 132,
# 'udplite': 136,
# 'all': -1, # Used for GCE default deny, do not use in pol file.
# }
# AF_MAP = {'inet': 4,
# 'inet6': 6,
# 'bridge': 4 # if this doesn't exist, output includes v4 & v6
# }
# ALWAYS_PROTO_NUM = ['ipip']
# PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.items()])
# AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.items()])
# NO_AF_LOG_ADDR = string.Template('Term $term will not be rendered, as it has'
# ' $direction address match specified but no'
# ' $direction addresses of $af address family'
# ' are present.')
# NO_AF_LOG_PROTO = string.Template('Term $term will not be rendered, as it has'
# ' $proto match specified but the ACL is of'
# ' $af address family.')
# _PLATFORM = None
# _DEFAULT_PROTOCOL = 'ip'
# _SUPPORTED_AF = {'inet', 'inet6'}
# _FILTER_BLACKLIST = {}
# WARN_IF_UNSUPPORTED = {
# 'restrict_address_family',
# 'counter',
# 'destination_tag',
# 'filter_term',
# 'logging',
# 'loss_priority',
# 'owner',
# 'qos',
# 'routing_instance',
# 'policer',
# 'source_tag'
# }
# _ABBREVIATION_TABLE = [
# # Service abbreviations first.
# ('experiment', 'EXP'),
# ('wifi-radius', 'W-R'),
# ('customer', 'CUST'),
# ('server', 'SRV'),
# # Next, common routing terms
# ('global', 'GBL'),
# ('google', 'GOOG'),
# ('service', 'SVC'),
# ('router', 'RTR'),
# ('internal', 'INT'),
# ('external', 'EXT'),
# ('transit', 'TRNS'),
# ('management', 'MGMT'),
# # State info
# ('established', 'EST'),
# ('unreachable', 'UNR'),
# ('fragment', 'FRAG'),
# ('accept', 'ACC'),
# ('discard', 'DISC'),
# ('reject', 'REJ'),
# ('replies', 'RPL'),
# ('request', 'REQ'),
# ]
# _TERM_MAX_LENGTH = 62
# def __init__(self, term):
# def NormalizeAddressFamily(self, af):
# def NormalizeIcmpTypes(self, icmp_types, protocols, af):
# def __init__(self, pol, exp_info):
# def _TranslatePolicy(self, pol, exp_info):
# def _BuildTokens(self):
# def _GetSupportedTokens(self):
# def FixHighPorts(self, term, af='inet', all_protocols_stateful=False):
# def FixTermLength(self, term_name, abbreviate=False, truncate=False):
# def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map):
# def AddRepositoryTags(prefix='', rid=True, date=True, revision=True,
# wrap=False):
# def WrapWords(textlist, size, joiner='\n'):
. Output only the next line. | class Term(aclgenerator.Term): |
Using the snippet: <|code_start|># Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Juniper SRX generator for loopback ACLs.
This is a subclass of Juniper generator. Juniper SRX loopback filter
uses the same syntax as regular Juniper stateless ACLs, with minor
differences. This subclass effects those differences.
"""
<|code_end|>
, determine the next line of code. You have imports:
from capirca.lib import juniper
and context (class names, function names, or code) available:
# Path: capirca/lib/juniper.py
# class Error(Exception):
# class JuniperTermPortProtocolError(Error):
# class TcpEstablishedWithNonTcpError(Error):
# class JuniperDuplicateTermError(Error):
# class UnsupportedFilterError(Error):
# class PrecedenceError(Error):
# class JuniperIndentationError(Error):
# class JuniperNextIpError(Error):
# class JuniperMultipleTerminatingActionError(Error):
# class JuniperFragmentInV6Error(Error):
# class Config:
# class Term(aclgenerator.Term):
# class Juniper(aclgenerator.ACLGenerator):
# def __init__(self, indent=0, tabstop=4):
# def __str__(self):
# def Append(self, line, verbatim=False):
# def __init__(self, term, term_type, enable_dsmo, noverbose):
# def __str__(self):
# def NextIpCheck(next_ip, term_name):
# def CheckTerminatingAction(self):
# def _MinimizePrefixes(self, include, exclude):
# def _Comment(self, addr, exclude=False, line_length=132):
# def _Group(self, group, lc=True):
# def _FormattedGroup(el, lc=True):
# def _BuildTokens(self):
# def _TranslatePolicy(self, pol, exp_info):
# def __str__(self):
# _PLATFORM = 'juniper'
# _DEFAULT_INDENT = 12
# ACTIONS = {'accept': 'accept',
# 'deny': 'discard',
# 'reject': 'reject',
# 'next': 'next term',
# 'reject-with-tcp-rst': 'reject tcp-reset',
# 'encapsulate': 'encapsulate'}
# _TERM_TYPE = {'inet': {'addr': 'address',
# 'saddr': 'source-address',
# 'daddr': 'destination-address',
# 'protocol': 'protocol',
# 'protocol-except': 'protocol-except',
# 'tcp-est': 'tcp-established'},
# 'inet6': {'addr': 'address',
# 'saddr': 'source-address',
# 'daddr': 'destination-address',
# 'protocol': 'next-header',
# 'protocol-except': 'next-header-except',
# 'tcp-est': 'tcp-established'},
# 'bridge': {'addr': 'ip-address',
# 'saddr': 'ip-source-address',
# 'daddr': 'ip-destination-address',
# 'protocol': 'ip-protocol',
# 'protocol-except': 'ip-protocol-except',
# 'tcp-est': 'tcp-flags "(ack|rst)"'}
# }
# _PLATFORM = 'juniper'
# _DEFAULT_PROTOCOL = 'ip'
# _SUPPORTED_AF = frozenset(('inet', 'inet6', 'bridge', 'mixed'))
# _TERM = Term
# SUFFIX = '.jcl'
. Output only the next line. | class Term(juniper.Term): |
Given snippet: <|code_start|>
a text representation of the config can be extracted with str().
attributes:
indent: The number of leading spaces on the current line.
lines: the text lines of the configuration.
"""
def __init__(self):
self.lines = []
def __str__(self):
return "\n".join(self.lines)
def Append(self, line_indent, line, verbatim=False):
"""append one line to the configuration.
Args:
line_indent: config specific spaces prepended to the line
line: the configuratoin string to append to the config.
verbatim: append line without adjusting indentation. Default False.
"""
if verbatim:
self.lines.append(line)
return
self.lines.append(line_indent + line.strip())
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import copy
import datetime
import re
import textwrap
import six
from absl import logging
from capirca.lib import aclgenerator
and context:
# Path: capirca/lib/aclgenerator.py
# class Error(Exception):
# class NoPlatformPolicyError(Error):
# class UnknownIcmpTypeError(Error):
# class MismatchIcmpInetError(Error):
# class EstablishedError(Error):
# class UnsupportedAFError(Error):
# class DuplicateTermError(Error):
# class UnsupportedFilterError(Error):
# class UnsupportedTargetOptionError(Error):
# class TermNameTooLongError(Error):
# class Term:
# class ACLGenerator:
# ICMP_TYPE = policy.Term.ICMP_TYPE
# PROTO_MAP = {'hopopt': 0,
# 'icmp': 1,
# 'igmp': 2,
# 'ggp': 3,
# 'ipip': 4,
# 'tcp': 6,
# 'egp': 8,
# 'igp': 9,
# 'udp': 17,
# 'rdp': 27,
# 'ipv6': 41,
# 'ipv6-route': 43,
# 'fragment': 44,
# 'rsvp': 46,
# 'gre': 47,
# 'esp': 50,
# 'ah': 51,
# 'icmpv6': 58,
# 'ipv6-nonxt': 59,
# 'ipv6-opts': 60,
# 'ospf': 89,
# 'pim': 103,
# 'vrrp': 112,
# 'l2tp': 115,
# 'sctp': 132,
# 'udplite': 136,
# 'all': -1, # Used for GCE default deny, do not use in pol file.
# }
# AF_MAP = {'inet': 4,
# 'inet6': 6,
# 'bridge': 4 # if this doesn't exist, output includes v4 & v6
# }
# ALWAYS_PROTO_NUM = ['ipip']
# PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.items()])
# AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.items()])
# NO_AF_LOG_ADDR = string.Template('Term $term will not be rendered, as it has'
# ' $direction address match specified but no'
# ' $direction addresses of $af address family'
# ' are present.')
# NO_AF_LOG_PROTO = string.Template('Term $term will not be rendered, as it has'
# ' $proto match specified but the ACL is of'
# ' $af address family.')
# _PLATFORM = None
# _DEFAULT_PROTOCOL = 'ip'
# _SUPPORTED_AF = {'inet', 'inet6'}
# _FILTER_BLACKLIST = {}
# WARN_IF_UNSUPPORTED = {
# 'restrict_address_family',
# 'counter',
# 'destination_tag',
# 'filter_term',
# 'logging',
# 'loss_priority',
# 'owner',
# 'qos',
# 'routing_instance',
# 'policer',
# 'source_tag'
# }
# _ABBREVIATION_TABLE = [
# # Service abbreviations first.
# ('experiment', 'EXP'),
# ('wifi-radius', 'W-R'),
# ('customer', 'CUST'),
# ('server', 'SRV'),
# # Next, common routing terms
# ('global', 'GBL'),
# ('google', 'GOOG'),
# ('service', 'SVC'),
# ('router', 'RTR'),
# ('internal', 'INT'),
# ('external', 'EXT'),
# ('transit', 'TRNS'),
# ('management', 'MGMT'),
# # State info
# ('established', 'EST'),
# ('unreachable', 'UNR'),
# ('fragment', 'FRAG'),
# ('accept', 'ACC'),
# ('discard', 'DISC'),
# ('reject', 'REJ'),
# ('replies', 'RPL'),
# ('request', 'REQ'),
# ]
# _TERM_MAX_LENGTH = 62
# def __init__(self, term):
# def NormalizeAddressFamily(self, af):
# def NormalizeIcmpTypes(self, icmp_types, protocols, af):
# def __init__(self, pol, exp_info):
# def _TranslatePolicy(self, pol, exp_info):
# def _BuildTokens(self):
# def _GetSupportedTokens(self):
# def FixHighPorts(self, term, af='inet', all_protocols_stateful=False):
# def FixTermLength(self, term_name, abbreviate=False, truncate=False):
# def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map):
# def AddRepositoryTags(prefix='', rid=True, date=True, revision=True,
# wrap=False):
# def WrapWords(textlist, size, joiner='\n'):
which might include code, classes, or functions. Output only the next line. | class Term(aclgenerator.Term): |
Continue the code snippet: <|code_start|># Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Ipset iptables generator. This is a subclass of Iptables generator.
ipset is a system inside the Linux kernel, which can very efficiently store
and match IPv4 and IPv6 addresses. This can be used to dramatically increase
performace of iptables firewall.
"""
class Error(Exception):
"""Base error class."""
<|code_end|>
. Use current file imports:
import string
from capirca.lib import iptables
from capirca.lib import nacaddr
and context (classes, functions, or code) from other files:
# Path: capirca/lib/iptables.py
# class Term(aclgenerator.Term):
# class Iptables(aclgenerator.ACLGenerator):
# class Error(Exception):
# class BadPortsError(Error):
# class UnsupportedFilterError(Error):
# class NoIptablesPolicyError(Error):
# class TcpEstablishedError(Error):
# class EstablishedError(Error):
# class UnsupportedDefaultActionError(Error):
# class UnsupportedTargetOptionError(Error):
# class LimitButNoLogError(Error):
# _PLATFORM = 'iptables'
# _POSTJUMP_FORMAT = None
# _PREJUMP_FORMAT = Template('-A $filter -j $term')
# _TERM_FORMAT = Template('-N $term')
# _COMMENT_FORMAT = Template('-A $term -m comment --comment "$comment"')
# _FILTER_TOP_FORMAT = Template('-A $term')
# _LOG_FORMAT = Template('-j LOG --log-prefix $term')
# _PROTO_TABLE = {
# 'icmpv6': '-p ipv6-icmp',
# 'icmp': '-p icmp',
# 'tcp': '-p tcp',
# 'udp': '-p udp',
# 'all': '-p all',
# 'esp': '-p esp',
# 'ah': '-p ah',
# 'gre': '-p gre',
# }
# _TCP_FLAGS_TABLE = {
# 'syn': 'SYN',
# 'ack': 'ACK',
# 'fin': 'FIN',
# 'rst': 'RST',
# 'urg': 'URG',
# 'psh': 'PSH',
# 'all': 'ALL',
# 'none': 'NONE',
# }
# _KNOWN_OPTIONS_MATCHERS = {
# # '! -f' also matches non-fragmented packets.
# 'first-fragment': '-m u32 --u32 4&0x3FFF=0x2000',
# 'initial': '--syn',
# 'tcp-initial': '--syn',
# 'sample': '',
# }
# _PLATFORM = 'iptables'
# _DEFAULT_PROTOCOL = 'all'
# SUFFIX = ''
# _RENDER_PREFIX = None
# _RENDER_SUFFIX = None
# _DEFAULTACTION_FORMAT = '-P %s %s'
# _DEFAULTACTION_FORMAT_CUSTOM_CHAIN = '-N %s'
# _DEFAULT_ACTION = 'DROP'
# _TERM = Term
# _TERM_MAX_LENGTH = 24
# _GOOD_FILTERS = ['INPUT', 'OUTPUT', 'FORWARD']
# _GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose']
# def __init__(self, term, filter_name, trackstate, filter_action, af='inet',
# verbose=True):
# def __str__(self):
# def _CalculateAddresses(self, term_saddr, exclude_saddr,
# term_daddr, exclude_daddr):
# def _FormatPart(self, protocol, saddr, sport, daddr, dport, options,
# tcp_flags, icmp_type, code, track_flags, sint, dint, log_hits,
# action):
# def _GenerateAddressStatement(self, saddr, daddr):
# def _GeneratePortStatement(self, ports, source=False, dest=False):
# def _SetDefaultAction(self):
# def __init__(self, pol, exp_info):
# def _BuildTokens(self):
# def _WarnIfCustomTarget(self, target):
# def _TranslatePolicy(self, pol, exp_info):
# def SetTarget(self, target, action=None):
# def __str__(self):
#
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
. Output only the next line. | class Term(iptables.Term): |
Next line prediction: <|code_start|> """
target_af = self.AF_MAP[self.af]
src_addr_list = self._CalculateAddrList(src_addr_list,
src_addr_exclude_list, target_af,
'src')
dst_addr_list = self._CalculateAddrList(dst_addr_list,
dst_addr_exclude_list, target_af,
'dst')
return (src_addr_list, [], dst_addr_list, [])
def _CalculateAddrList(self, addr_list, addr_exclude_list,
target_af, direction):
"""Calculates and stores address list for target AF and direction.
Args:
addr_list: address list.
addr_exclude_list: address exclude list of the term.
target_af: target address family.
direction: direction in which address list will be used.
Returns:
calculated address list.
"""
if not addr_list:
addr_list = [self._all_ips]
addr_list = [addr for addr in addr_list if addr.version == target_af]
if addr_exclude_list:
addr_exclude_list = [addr_exclude for addr_exclude in addr_exclude_list if
addr_exclude.version == target_af]
<|code_end|>
. Use current file imports:
(import string
from capirca.lib import iptables
from capirca.lib import nacaddr)
and context including class names, function names, or small code snippets from other files:
# Path: capirca/lib/iptables.py
# class Term(aclgenerator.Term):
# class Iptables(aclgenerator.ACLGenerator):
# class Error(Exception):
# class BadPortsError(Error):
# class UnsupportedFilterError(Error):
# class NoIptablesPolicyError(Error):
# class TcpEstablishedError(Error):
# class EstablishedError(Error):
# class UnsupportedDefaultActionError(Error):
# class UnsupportedTargetOptionError(Error):
# class LimitButNoLogError(Error):
# _PLATFORM = 'iptables'
# _POSTJUMP_FORMAT = None
# _PREJUMP_FORMAT = Template('-A $filter -j $term')
# _TERM_FORMAT = Template('-N $term')
# _COMMENT_FORMAT = Template('-A $term -m comment --comment "$comment"')
# _FILTER_TOP_FORMAT = Template('-A $term')
# _LOG_FORMAT = Template('-j LOG --log-prefix $term')
# _PROTO_TABLE = {
# 'icmpv6': '-p ipv6-icmp',
# 'icmp': '-p icmp',
# 'tcp': '-p tcp',
# 'udp': '-p udp',
# 'all': '-p all',
# 'esp': '-p esp',
# 'ah': '-p ah',
# 'gre': '-p gre',
# }
# _TCP_FLAGS_TABLE = {
# 'syn': 'SYN',
# 'ack': 'ACK',
# 'fin': 'FIN',
# 'rst': 'RST',
# 'urg': 'URG',
# 'psh': 'PSH',
# 'all': 'ALL',
# 'none': 'NONE',
# }
# _KNOWN_OPTIONS_MATCHERS = {
# # '! -f' also matches non-fragmented packets.
# 'first-fragment': '-m u32 --u32 4&0x3FFF=0x2000',
# 'initial': '--syn',
# 'tcp-initial': '--syn',
# 'sample': '',
# }
# _PLATFORM = 'iptables'
# _DEFAULT_PROTOCOL = 'all'
# SUFFIX = ''
# _RENDER_PREFIX = None
# _RENDER_SUFFIX = None
# _DEFAULTACTION_FORMAT = '-P %s %s'
# _DEFAULTACTION_FORMAT_CUSTOM_CHAIN = '-N %s'
# _DEFAULT_ACTION = 'DROP'
# _TERM = Term
# _TERM_MAX_LENGTH = 24
# _GOOD_FILTERS = ['INPUT', 'OUTPUT', 'FORWARD']
# _GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose']
# def __init__(self, term, filter_name, trackstate, filter_action, af='inet',
# verbose=True):
# def __str__(self):
# def _CalculateAddresses(self, term_saddr, exclude_saddr,
# term_daddr, exclude_daddr):
# def _FormatPart(self, protocol, saddr, sport, daddr, dport, options,
# tcp_flags, icmp_type, code, track_flags, sint, dint, log_hits,
# action):
# def _GenerateAddressStatement(self, saddr, daddr):
# def _GeneratePortStatement(self, ports, source=False, dest=False):
# def _SetDefaultAction(self):
# def __init__(self, pol, exp_info):
# def _BuildTokens(self):
# def _WarnIfCustomTarget(self, target):
# def _TranslatePolicy(self, pol, exp_info):
# def SetTarget(self, target, action=None):
# def __str__(self):
#
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
. Output only the next line. | addr_list = nacaddr.ExcludeAddrs(addr_list, addr_exclude_list) |
Continue the code snippet: <|code_start|># Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FieldTest(absltest.TestCase):
def setUp(self):
super().setUp()
logging.debug('======> %s <======', self.id())
def testAppendAppends(self):
<|code_end|>
. Use current file imports:
from absl.testing import absltest
from absl import logging
from capirca.lib import policy_simple
and context (classes, functions, or code) from other files:
# Path: capirca/lib/policy_simple.py
# class Field:
# class IntegerField(Field):
# class NamingField(Field):
# class Action(Field):
# class Address(NamingField):
# class Port(NamingField):
# class Comment(Field):
# class Counter(Field):
# class Encapsulate(Field):
# class DestinationAddress(Address):
# class DestinationExclude(Address):
# class DestinationInterface(Field):
# class DestinationPort(Port):
# class DestinationPrefix(Field):
# class DestinationPrefixExcept(Field):
# class DestinationTag(Field):
# class DscpMatch(Field):
# class DscpSet(Field):
# class EtherType(Field):
# class Expiration(Field):
# class FragmentOffset(Field):
# class ForwardingClass(Field):
# class ForwardingClassExcept(Field):
# class IcmpCode(Field):
# class IcmpType(Field):
# class Logging(Field):
# class LossPriority(Field):
# class Option(Field):
# class Owner(Field):
# class NextIP(Field):
# class PacketLength(Field):
# class Platform(Field):
# class PlatformExclude(Field):
# class Policer(Field):
# class Precedence(Field):
# class Protocol(Field):
# class ProtocolExcept(Field):
# class Qos(Field):
# class PANApplication(Field):
# class RoutingInstance(Field):
# class SourceAddress(Address):
# class SourceExclude(Address):
# class SourceInterface(Field):
# class SourcePort(Port):
# class SourcePrefix(Field):
# class SourcePrefixExcept(Field):
# class SourceTag(Field):
# class Target(Field):
# class Timeout(IntegerField):
# class TrafficType(Field):
# class TrafficClassCount(Field):
# class Verbatim(Field):
# class Vpn(Field):
# class Block:
# class Header(Block):
# class Term(Block):
# class BlankLine:
# class CommentLine:
# class Include:
# class Policy:
# class PolicyParser:
# def __init__(self, value):
# def __str__(self):
# def __eq__(self, o):
# def __ne__(self, o):
# def Append(self, value):
# def ValueStr(self):
# def __init__(self, value):
# def __init__(self, value):
# def ParseString(self, value):
# def ValidatePart(self, part):
# def Append(self, value):
# def ValueStr(self):
# def ValueStr(self):
# def __init__(self):
# def __iter__(self):
# def __getitem__(self, i):
# def __str__(self):
# def AddField(self, field):
# def FieldsWithType(self, f_type):
# def Match(self, match_fn):
# def Name(self):
# def __eq__(self, o):
# def __ne__(self, o):
# def __init__(self, name):
# def Name(self):
# def __eq__(self, o):
# def Describe(self):
# def __str__(self):
# def __eq__(self, o):
# def __ne__(self, o):
# def __init__(self, data):
# def __str__(self):
# def __eq__(self, o):
# def __ne__(self, o):
# def __init__(self, identifier):
# def __str__(self):
# def __eq__(self, o):
# def __ne__(self, o):
# def __init__(self, identifier):
# def AddMember(self, member):
# def __str__(self):
# def __iter__(self):
# def __getitem__(self, i):
# def Match(self, match_fn):
# def MatchFields(self, block_match_fn, field_match_fn):
# def __init__(self, data, identifier):
# def Parse(self):
# def ParseTopLevel(self, line):
# def ParseCommentLine(self, line):
# def ParseIncludeLine(self, line):
# def ParseHeaderLine(self, line):
# def ParseTermLine(self, line):
# def ParseInBlock(self, line):
# def ParseField(self, line):
. Output only the next line. | f = policy_simple.Field('Testvalue') |
Predict the next line after this snippet: <|code_start|>"""Unittest for GCP Firewall Generator module."""
class HelperFunctionsTest(parameterized.TestCase):
@parameterized.named_parameters(
('lowercase', 'project'),
('lowercase_w_hyphen', 'project-id'),
('lowercase_w_numbers', 'project123'),
('lowercase_w_numbers_hyphens', 'project-1-2-3'))
def testIsProjectIDValidPasses(self, project):
<|code_end|>
using the current file's imports:
from absl.testing import absltest
from absl.testing import parameterized
from capirca.lib import gcp
and any relevant context from other files:
# Path: capirca/lib/gcp.py
# class Error(Exception):
# class TermError(Error):
# class HeaderError(Error):
# class UnsupportedFilterTypeError(Error):
# class Term(aclgenerator.Term):
# class GCP(aclgenerator.ACLGenerator):
# _ALLOW_PROTO_NAME = frozenset(
# ['tcp', 'udp', 'icmp', 'esp', 'ah', 'ipip', 'sctp',
# 'all'
# ])
# _GOOD_DIRECTION = ['INGRESS', 'EGRESS']
# def _GetPorts(self):
# def _GetLoggingSetting(self):
# def __str__(self):
# def IsDefaultDeny(term):
# def IsProjectIDValid(project):
# def IsVPCNameValid(vpc):
# def TruncateString(raw_string, max_length):
# def GetIpv6TermName(term_name):
. Output only the next line. | self.assertTrue(gcp.IsProjectIDValid(project)) |
Given snippet: <|code_start|># Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for nacaddr.py module."""
class NacaddrUnitTest(absltest.TestCase):
"""Unit Test for nacaddr.py.
nacaddr class extends ipaddr by adding .text fields to allow
comments for each of the IPv4 and IPv6 classes.
"""
def setUp(self):
super().setUp()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from absl.testing import absltest
from capirca.lib import nacaddr
and context:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
which might include code, classes, or functions. Output only the next line. | self.addr1 = nacaddr.IPv4(u'10.0.0.0/8', 'The 10 block') |
Predict the next line after this snippet: <|code_start|>"""Google Cloud Armor Firewall Generator.
Refer to the links below for more information
https://cloud.google.com/armor/
https://cloud.google.com/armor/docs/
"""
# Generic error class
class Error(Exception):
"""Generic error class."""
class ExceededMaxTermsError(Error):
"""Raised when number of terms in a policy exceed _MAX_RULES_PER_POLICY."""
class UnsupportedFilterTypeError(Error):
"""Raised when unsupported filter type (i.e address family) is specified."""
<|code_end|>
using the current file's imports:
import copy
import json
import logging
import six
from capirca.lib import aclgenerator
and any relevant context from other files:
# Path: capirca/lib/aclgenerator.py
# class Error(Exception):
# class NoPlatformPolicyError(Error):
# class UnknownIcmpTypeError(Error):
# class MismatchIcmpInetError(Error):
# class EstablishedError(Error):
# class UnsupportedAFError(Error):
# class DuplicateTermError(Error):
# class UnsupportedFilterError(Error):
# class UnsupportedTargetOptionError(Error):
# class TermNameTooLongError(Error):
# class Term:
# class ACLGenerator:
# ICMP_TYPE = policy.Term.ICMP_TYPE
# PROTO_MAP = {'hopopt': 0,
# 'icmp': 1,
# 'igmp': 2,
# 'ggp': 3,
# 'ipip': 4,
# 'tcp': 6,
# 'egp': 8,
# 'igp': 9,
# 'udp': 17,
# 'rdp': 27,
# 'ipv6': 41,
# 'ipv6-route': 43,
# 'fragment': 44,
# 'rsvp': 46,
# 'gre': 47,
# 'esp': 50,
# 'ah': 51,
# 'icmpv6': 58,
# 'ipv6-nonxt': 59,
# 'ipv6-opts': 60,
# 'ospf': 89,
# 'pim': 103,
# 'vrrp': 112,
# 'l2tp': 115,
# 'sctp': 132,
# 'udplite': 136,
# 'all': -1, # Used for GCE default deny, do not use in pol file.
# }
# AF_MAP = {'inet': 4,
# 'inet6': 6,
# 'bridge': 4 # if this doesn't exist, output includes v4 & v6
# }
# ALWAYS_PROTO_NUM = ['ipip']
# PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.items()])
# AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.items()])
# NO_AF_LOG_ADDR = string.Template('Term $term will not be rendered, as it has'
# ' $direction address match specified but no'
# ' $direction addresses of $af address family'
# ' are present.')
# NO_AF_LOG_PROTO = string.Template('Term $term will not be rendered, as it has'
# ' $proto match specified but the ACL is of'
# ' $af address family.')
# _PLATFORM = None
# _DEFAULT_PROTOCOL = 'ip'
# _SUPPORTED_AF = {'inet', 'inet6'}
# _FILTER_BLACKLIST = {}
# WARN_IF_UNSUPPORTED = {
# 'restrict_address_family',
# 'counter',
# 'destination_tag',
# 'filter_term',
# 'logging',
# 'loss_priority',
# 'owner',
# 'qos',
# 'routing_instance',
# 'policer',
# 'source_tag'
# }
# _ABBREVIATION_TABLE = [
# # Service abbreviations first.
# ('experiment', 'EXP'),
# ('wifi-radius', 'W-R'),
# ('customer', 'CUST'),
# ('server', 'SRV'),
# # Next, common routing terms
# ('global', 'GBL'),
# ('google', 'GOOG'),
# ('service', 'SVC'),
# ('router', 'RTR'),
# ('internal', 'INT'),
# ('external', 'EXT'),
# ('transit', 'TRNS'),
# ('management', 'MGMT'),
# # State info
# ('established', 'EST'),
# ('unreachable', 'UNR'),
# ('fragment', 'FRAG'),
# ('accept', 'ACC'),
# ('discard', 'DISC'),
# ('reject', 'REJ'),
# ('replies', 'RPL'),
# ('request', 'REQ'),
# ]
# _TERM_MAX_LENGTH = 62
# def __init__(self, term):
# def NormalizeAddressFamily(self, af):
# def NormalizeIcmpTypes(self, icmp_types, protocols, af):
# def __init__(self, pol, exp_info):
# def _TranslatePolicy(self, pol, exp_info):
# def _BuildTokens(self):
# def _GetSupportedTokens(self):
# def FixHighPorts(self, term, af='inet', all_protocols_stateful=False):
# def FixTermLength(self, term_name, abbreviate=False, truncate=False):
# def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map):
# def AddRepositoryTags(prefix='', rid=True, date=True, revision=True,
# wrap=False):
# def WrapWords(textlist, size, joiner='\n'):
. Output only the next line. | class Term(aclgenerator.Term): |
Predict the next line for this snippet: <|code_start|>NO_OVERFLOW_LONG = \
"This " + \
"is a long header. It's long on purpose. It's " + \
"purpose is to test that the splitting works co" + \
"rrectly. It should be well over the line limit" + \
". If it is shorter, it would not test the limit."
NO_OVERFLOW_LONG_EXPECTED = [
"This is a long header. It's long on purpose. It's purpose is to test",
"that the splitting works correctly. It should be well over the line",
"limit. If it is shorter, it would not test the limit."
]
NO_OVERFLOW_SHORT = \
"This is a short line of text"
NO_OVERFLOW_SHORT_EXPECTED = [
"This is a short line of text"
]
@pytest.mark.parametrize("test_input,expected", [
(NO_OVERFLOW_SHORT, NO_OVERFLOW_SHORT_EXPECTED),
(NO_OVERFLOW_LONG, NO_OVERFLOW_LONG_EXPECTED),
(SINGLE_LINE_OVERFLOW_TEXT_LONG, SINGLE_LINE_OVERFLOW_TEXT_LONG_EXPECTED),
(MULTI_LINE_OVERFLOW_TEXT_LONG, MULTI_LINE_OVERFLOW_TEXT_LONG_EXPECTED)
]
)
def testWrapWords(test_input, expected):
<|code_end|>
with the help of current file imports:
from capirca.lib.aclgenerator import WrapWords
import pytest
and context from other files:
# Path: capirca/lib/aclgenerator.py
# def WrapWords(textlist, size, joiner='\n'):
# r"""Insert breaks into the listed strings at specified width.
#
# Args:
# textlist: a list of text strings
# size: width of reformated strings
# joiner: text to insert at break. eg. '\n ' to add an indent.
# Returns:
# list of strings
# """
# # \S{%d}(?!\s|\Z) collets the max size for words that are larger than the max
# # (?<=\S{%d})\S+ collects the remaining text for overflow words in their own
# # line
# # \S.{1,%d}(?=\s|\Z)) collects all words and spaces up to max size, breaking
# # at the last space
# rval = []
# linelength_re = re.compile(
# r'(\S{%d}(?!\s|\Z)|(?<=\S{%d})\S+|\S.{1,%d}(?=\s|\Z))' %
# (size, size, size - 1))
# for index in range(len(textlist)):
# if len(textlist[index]) > size:
# # insert joiner into the string at appropriate places.
# textlist[index] = joiner.join(linelength_re.findall(textlist[index]))
# # avoid empty comment lines
# rval.extend(x.strip() for x in textlist[index].strip().split(joiner) if x)
# return rval
, which may contain function names, class names, or code. Output only the next line. | result = WrapWords([test_input], 70) |
Given the code snippet: <|code_start|> """
counter = 0
for i in self.source_address:
if i.version == 6 and i.version in address_family:
counter += self._IPV6_BYTE_SIZE
elif i.version == 4 and i.version in address_family:
counter += self._IPV4_BYTE_SIZE
for i in self.destination_address:
if i.version == 6 and i.version in address_family:
counter += self._IPV6_BYTE_SIZE
elif i.version == 4 and i.version in address_family:
counter += self._IPV4_BYTE_SIZE
return counter
def FlattenAll(self):
"""Reduce source, dest, and address fields to their post-exclude state.
Populates the self.flattened_addr, self.flattened_saddr,
self.flattened_daddr by removing excludes from includes.
"""
# No excludes, set flattened attributes and move along.
self.flattened = True
if not (self.source_address_exclude or self.destination_address_exclude or
self.address_exclude):
self.flattened_saddr = self.source_address
self.flattened_daddr = self.destination_address
self.flattened_addr = self.address
return
if self.source_address_exclude:
<|code_end|>
, generate the next line using the imports in this file:
import datetime
import os
import sys
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import naming
from ply import lex
from ply import yacc
and context (functions, classes, or occasionally code) from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
. Output only the next line. | self.flattened_saddr = nacaddr.AddressListExclude( |
Predict the next line after this snippet: <|code_start|> Returns:
policy object or False (if parse error).
"""
data = _ReadFile(filename)
p = ParsePolicy(data, definitions, optimize, base_dir=base_dir,
shade_check=shade_check, filename=filename)
return p
def ParsePolicy(data, definitions=None, optimize=True, base_dir='',
shade_check=False, filename=''):
"""Parse the policy in 'data', optionally provide a naming object.
Parse a blob of policy text into a policy object.
Args:
data: a string blob of policy data to parse.
definitions: optional naming library definitions object.
optimize: bool - whether to summarize networks and services.
base_dir: base path string to look for acls or include files.
shade_check: bool - whether to raise an exception when a term is shaded.
filename: string - filename used by the policy.
Returns:
policy object or False (if parse error).
"""
try:
if definitions:
globals()['DEFINITIONS'] = definitions
else:
<|code_end|>
using the current file's imports:
import datetime
import os
import sys
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import naming
from ply import lex
from ply import yacc
and any relevant context from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
. Output only the next line. | globals()['DEFINITIONS'] = naming.Naming(DEFAULT_DEFINITIONS) |
Predict the next line after this snippet: <|code_start|>
This generate a pcap packet filter expression that either:
1) Matches (i.e., captures), the packets that match the ACCEPT clauses
specified in a given policy, or
2) Matches the packets that match opposite of that, i.e., the DENY or REJECT
clauses.
Support tcp flags matching and icmptypes, including ipv6/icmpv6, but not much
else past the standard addres, port, and protocol conditions.
Note that this is still alpha and will likely require more testing prior to
having more confidence in it.
Stolen liberally from packetfilter.py.
"""
class Error(Exception):
"""Base error class."""
class UnsupportedActionError(Error):
"""Raised when we see an unsupported action."""
class UnsupportedTargetOptionError(Error):
"""Raised when we see an unsupported option."""
<|code_end|>
using the current file's imports:
import datetime
from absl import logging
from capirca.lib import aclgenerator
and any relevant context from other files:
# Path: capirca/lib/aclgenerator.py
# class Error(Exception):
# class NoPlatformPolicyError(Error):
# class UnknownIcmpTypeError(Error):
# class MismatchIcmpInetError(Error):
# class EstablishedError(Error):
# class UnsupportedAFError(Error):
# class DuplicateTermError(Error):
# class UnsupportedFilterError(Error):
# class UnsupportedTargetOptionError(Error):
# class TermNameTooLongError(Error):
# class Term:
# class ACLGenerator:
# ICMP_TYPE = policy.Term.ICMP_TYPE
# PROTO_MAP = {'hopopt': 0,
# 'icmp': 1,
# 'igmp': 2,
# 'ggp': 3,
# 'ipip': 4,
# 'tcp': 6,
# 'egp': 8,
# 'igp': 9,
# 'udp': 17,
# 'rdp': 27,
# 'ipv6': 41,
# 'ipv6-route': 43,
# 'fragment': 44,
# 'rsvp': 46,
# 'gre': 47,
# 'esp': 50,
# 'ah': 51,
# 'icmpv6': 58,
# 'ipv6-nonxt': 59,
# 'ipv6-opts': 60,
# 'ospf': 89,
# 'pim': 103,
# 'vrrp': 112,
# 'l2tp': 115,
# 'sctp': 132,
# 'udplite': 136,
# 'all': -1, # Used for GCE default deny, do not use in pol file.
# }
# AF_MAP = {'inet': 4,
# 'inet6': 6,
# 'bridge': 4 # if this doesn't exist, output includes v4 & v6
# }
# ALWAYS_PROTO_NUM = ['ipip']
# PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.items()])
# AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.items()])
# NO_AF_LOG_ADDR = string.Template('Term $term will not be rendered, as it has'
# ' $direction address match specified but no'
# ' $direction addresses of $af address family'
# ' are present.')
# NO_AF_LOG_PROTO = string.Template('Term $term will not be rendered, as it has'
# ' $proto match specified but the ACL is of'
# ' $af address family.')
# _PLATFORM = None
# _DEFAULT_PROTOCOL = 'ip'
# _SUPPORTED_AF = {'inet', 'inet6'}
# _FILTER_BLACKLIST = {}
# WARN_IF_UNSUPPORTED = {
# 'restrict_address_family',
# 'counter',
# 'destination_tag',
# 'filter_term',
# 'logging',
# 'loss_priority',
# 'owner',
# 'qos',
# 'routing_instance',
# 'policer',
# 'source_tag'
# }
# _ABBREVIATION_TABLE = [
# # Service abbreviations first.
# ('experiment', 'EXP'),
# ('wifi-radius', 'W-R'),
# ('customer', 'CUST'),
# ('server', 'SRV'),
# # Next, common routing terms
# ('global', 'GBL'),
# ('google', 'GOOG'),
# ('service', 'SVC'),
# ('router', 'RTR'),
# ('internal', 'INT'),
# ('external', 'EXT'),
# ('transit', 'TRNS'),
# ('management', 'MGMT'),
# # State info
# ('established', 'EST'),
# ('unreachable', 'UNR'),
# ('fragment', 'FRAG'),
# ('accept', 'ACC'),
# ('discard', 'DISC'),
# ('reject', 'REJ'),
# ('replies', 'RPL'),
# ('request', 'REQ'),
# ]
# _TERM_MAX_LENGTH = 62
# def __init__(self, term):
# def NormalizeAddressFamily(self, af):
# def NormalizeIcmpTypes(self, icmp_types, protocols, af):
# def __init__(self, pol, exp_info):
# def _TranslatePolicy(self, pol, exp_info):
# def _BuildTokens(self):
# def _GetSupportedTokens(self):
# def FixHighPorts(self, term, af='inet', all_protocols_stateful=False):
# def FixTermLength(self, term_name, abbreviate=False, truncate=False):
# def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map):
# def AddRepositoryTags(prefix='', rid=True, date=True, revision=True,
# wrap=False):
# def WrapWords(textlist, size, joiner='\n'):
. Output only the next line. | class Term(aclgenerator.Term): |
Given the following code snippet before the placeholder: <|code_start|>"""Generic Google Cloud Platform multi-product generator.
Base class for GCP firewalling products.
"""
class Error(Exception):
"""Generic error class."""
class TermError(Error):
"""Raised when a term is not valid."""
class HeaderError(Error):
"""Raised when a header is not valid."""
class UnsupportedFilterTypeError(Error):
"""Raised when an unsupported filter type is specified."""
<|code_end|>
, predict the next line using imports from the current file:
import json
import re
import six
from capirca.lib import aclgenerator
and context including class names, function names, and sometimes code from other files:
# Path: capirca/lib/aclgenerator.py
# class Error(Exception):
# class NoPlatformPolicyError(Error):
# class UnknownIcmpTypeError(Error):
# class MismatchIcmpInetError(Error):
# class EstablishedError(Error):
# class UnsupportedAFError(Error):
# class DuplicateTermError(Error):
# class UnsupportedFilterError(Error):
# class UnsupportedTargetOptionError(Error):
# class TermNameTooLongError(Error):
# class Term:
# class ACLGenerator:
# ICMP_TYPE = policy.Term.ICMP_TYPE
# PROTO_MAP = {'hopopt': 0,
# 'icmp': 1,
# 'igmp': 2,
# 'ggp': 3,
# 'ipip': 4,
# 'tcp': 6,
# 'egp': 8,
# 'igp': 9,
# 'udp': 17,
# 'rdp': 27,
# 'ipv6': 41,
# 'ipv6-route': 43,
# 'fragment': 44,
# 'rsvp': 46,
# 'gre': 47,
# 'esp': 50,
# 'ah': 51,
# 'icmpv6': 58,
# 'ipv6-nonxt': 59,
# 'ipv6-opts': 60,
# 'ospf': 89,
# 'pim': 103,
# 'vrrp': 112,
# 'l2tp': 115,
# 'sctp': 132,
# 'udplite': 136,
# 'all': -1, # Used for GCE default deny, do not use in pol file.
# }
# AF_MAP = {'inet': 4,
# 'inet6': 6,
# 'bridge': 4 # if this doesn't exist, output includes v4 & v6
# }
# ALWAYS_PROTO_NUM = ['ipip']
# PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.items()])
# AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.items()])
# NO_AF_LOG_ADDR = string.Template('Term $term will not be rendered, as it has'
# ' $direction address match specified but no'
# ' $direction addresses of $af address family'
# ' are present.')
# NO_AF_LOG_PROTO = string.Template('Term $term will not be rendered, as it has'
# ' $proto match specified but the ACL is of'
# ' $af address family.')
# _PLATFORM = None
# _DEFAULT_PROTOCOL = 'ip'
# _SUPPORTED_AF = {'inet', 'inet6'}
# _FILTER_BLACKLIST = {}
# WARN_IF_UNSUPPORTED = {
# 'restrict_address_family',
# 'counter',
# 'destination_tag',
# 'filter_term',
# 'logging',
# 'loss_priority',
# 'owner',
# 'qos',
# 'routing_instance',
# 'policer',
# 'source_tag'
# }
# _ABBREVIATION_TABLE = [
# # Service abbreviations first.
# ('experiment', 'EXP'),
# ('wifi-radius', 'W-R'),
# ('customer', 'CUST'),
# ('server', 'SRV'),
# # Next, common routing terms
# ('global', 'GBL'),
# ('google', 'GOOG'),
# ('service', 'SVC'),
# ('router', 'RTR'),
# ('internal', 'INT'),
# ('external', 'EXT'),
# ('transit', 'TRNS'),
# ('management', 'MGMT'),
# # State info
# ('established', 'EST'),
# ('unreachable', 'UNR'),
# ('fragment', 'FRAG'),
# ('accept', 'ACC'),
# ('discard', 'DISC'),
# ('reject', 'REJ'),
# ('replies', 'RPL'),
# ('request', 'REQ'),
# ]
# _TERM_MAX_LENGTH = 62
# def __init__(self, term):
# def NormalizeAddressFamily(self, af):
# def NormalizeIcmpTypes(self, icmp_types, protocols, af):
# def __init__(self, pol, exp_info):
# def _TranslatePolicy(self, pol, exp_info):
# def _BuildTokens(self):
# def _GetSupportedTokens(self):
# def FixHighPorts(self, term, af='inet', all_protocols_stateful=False):
# def FixTermLength(self, term_name, abbreviate=False, truncate=False):
# def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map):
# def AddRepositoryTags(prefix='', rid=True, date=True, revision=True,
# wrap=False):
# def WrapWords(textlist, size, joiner='\n'):
. Output only the next line. | class Term(aclgenerator.Term): |
Given the code snippet: <|code_start|>
file_directory = pathlib.Path(__file__).parent.absolute()
exclude_address_testcases = []
with open(str(file_directory)+"/address_exclude_test_cases.txt", 'r') as f:
for line in f:
ipstr, exstrs, restrs = line.strip().split(' ')
ip = nacaddr.IP(ipstr)
exclude_ips = list(map(nacaddr.IP, exstrs.split(',')))
expected_results = []
for i in restrs.split(';'):
result_strings = i.split(',')
ip_map = map(nacaddr.IP, result_strings)
ip_list = list(ip_map)
expected_results.append(ip_list)
for ex, res in zip(exclude_ips, expected_results):
exclude_address_testcases.append((ip, ex, res))
class TestIPUtils:
@pytest.mark.unit
@pytest.mark.parametrize("ip,exclude,expected", exclude_address_testcases)
def test_exclude_address(self, ip, exclude, expected):
<|code_end|>
, generate the next line using the imports in this file:
import pytest
import pathlib
from capirca.utils import iputils
from capirca.lib import nacaddr
and context (functions, classes, or occasionally code) from other files:
# Path: capirca/utils/iputils.py
# def exclude_address(
# base_net: ipaddress._BaseNetwork, # pylint disable=protected-access
# exclude_net: ipaddress._BaseNetwork # pylint disable=protected-access
# ):
#
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
. Output only the next line. | result = iputils.exclude_address(ip, exclude) |
Given the following code snippet before the placeholder: <|code_start|>
file_directory = pathlib.Path(__file__).parent.absolute()
exclude_address_testcases = []
with open(str(file_directory)+"/address_exclude_test_cases.txt", 'r') as f:
for line in f:
ipstr, exstrs, restrs = line.strip().split(' ')
<|code_end|>
, predict the next line using imports from the current file:
import pytest
import pathlib
from capirca.utils import iputils
from capirca.lib import nacaddr
and context including class names, function names, and sometimes code from other files:
# Path: capirca/utils/iputils.py
# def exclude_address(
# base_net: ipaddress._BaseNetwork, # pylint disable=protected-access
# exclude_net: ipaddress._BaseNetwork # pylint disable=protected-access
# ):
#
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
. Output only the next line. | ip = nacaddr.IP(ipstr) |
Based on the snippet: <|code_start|> ('192.168.0.64', '0.0.255.31'))
test_data = [(summarizer.DSMNet(3232235584, 4294967295), True,
('192.168.0.64', '32')),
(summarizer.DSMNet(3232235584, 4294901760), True,
('192.168.0.64', '16')),
(summarizer.DSMNet(3232235584, 4294967294), True,
('192.168.0.64', '31')),
(summarizer.DSMNet(3232235584, 4290772992), True,
('192.168.0.64', '10')),
(summarizer.DSMNet(3232235584, 4294966016), True,
('192.168.0.64', '255.255.251.0')),
(summarizer.DSMNet(3232235584, 4294901504), True,
('192.168.0.64', '255.254.255.0'))]
for net, nondsm, expected in test_data:
self.assertEqual(summarizer.ToDottedQuad(net, nondsm=nondsm), expected)
def testInt32ToDottedQuad(self):
self.assertEqual(summarizer._Int32ToDottedQuad(3232235584),
'192.168.0.64')
def testSummarizeEmptyList(self):
nets = []
result = summarizer.Summarize(nets)
self.assertEqual(result, [])
def testSummarizeNoNetworks(self):
nets = []
for octet in range(0, 256):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import random
import time
from absl.testing import absltest
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import summarizer
and context (classes, functions, sometimes code) from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/summarizer.py
# class DSMNet:
# def __init__(self, address, netmask, text=''):
# def __eq__(self, other):
# def __ne__(self, other):
# def __le__(self, other):
# def __ge__(self, other):
# def __lt__(self, other):
# def __gt__(self, other):
# def __str__(self):
# def MergeText(self, text=''):
# def ToDottedQuad(net, negate=False, nondsm=False):
# def _PrefixlenForNonDSM(intmask):
# def _Int32ToDottedQuad(num):
# def _NacaddrNetToDSMNet(net):
# def _ToPrettyBinaryFormat(num):
# def Summarize(nets):
# def _SummarizeSameMask(nets):
. Output only the next line. | net = nacaddr.IPv4('192.' + str(255 - octet) + '.' + |
Next line prediction: <|code_start|># You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for discontinuous subnet mask summarizer."""
class SummarizerTest(absltest.TestCase):
def setUp(self):
super().setUp()
random_seed = int(time.time())
value = os.environ.get('TEST_RANDOM_SEED', '')
try:
random_seed = int(value)
except ValueError:
pass
logging.info('Seeding random generator with seed %d', random_seed)
random.seed(random_seed)
def testToDottedQuad(self):
<|code_end|>
. Use current file imports:
(import os
import random
import time
from absl.testing import absltest
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import summarizer)
and context including class names, function names, or small code snippets from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/summarizer.py
# class DSMNet:
# def __init__(self, address, netmask, text=''):
# def __eq__(self, other):
# def __ne__(self, other):
# def __le__(self, other):
# def __ge__(self, other):
# def __lt__(self, other):
# def __gt__(self, other):
# def __str__(self):
# def MergeText(self, text=''):
# def ToDottedQuad(net, negate=False, nondsm=False):
# def _PrefixlenForNonDSM(intmask):
# def _Int32ToDottedQuad(num):
# def _NacaddrNetToDSMNet(net):
# def _ToPrettyBinaryFormat(num):
# def Summarize(nets):
# def _SummarizeSameMask(nets):
. Output only the next line. | net = summarizer.DSMNet(1 << 32, 4294967264) |
Next line prediction: <|code_start|>
def IsDefaultDeny(term):
"""Returns true if a term is a default deny without IPs, ports, etc."""
skip_attrs = ['flattened', 'flattened_addr', 'flattened_saddr',
'flattened_daddr', 'action', 'comment', 'name', 'logging']
if 'deny' not in term.action:
return False
# This lc will look through all methods and attributes of the object.
# It returns only the attributes that need to be looked at to determine if
# this is a default deny.
for i in [a for a in dir(term) if not a.startswith('__') and
a.islower() and not callable(getattr(term, a))]:
if i in skip_attrs:
continue
v = getattr(term, i)
if isinstance(v, str) and v:
return False
if isinstance(v, list) and v:
return False
return True
def GetNextPriority(priority):
"""Get the priority for the next rule."""
return priority
<|code_end|>
. Use current file imports:
(import copy
import datetime
import ipaddress
import json
import logging
import re
import six
from typing import Dict, Any
from capirca.lib import gcp
from capirca.lib import nacaddr)
and context including class names, function names, or small code snippets from other files:
# Path: capirca/lib/gcp.py
# class Error(Exception):
# class TermError(Error):
# class HeaderError(Error):
# class UnsupportedFilterTypeError(Error):
# class Term(aclgenerator.Term):
# class GCP(aclgenerator.ACLGenerator):
# _ALLOW_PROTO_NAME = frozenset(
# ['tcp', 'udp', 'icmp', 'esp', 'ah', 'ipip', 'sctp',
# 'all'
# ])
# _GOOD_DIRECTION = ['INGRESS', 'EGRESS']
# def _GetPorts(self):
# def _GetLoggingSetting(self):
# def __str__(self):
# def IsDefaultDeny(term):
# def IsProjectIDValid(project):
# def IsVPCNameValid(vpc):
# def TruncateString(raw_string, max_length):
# def GetIpv6TermName(term_name):
#
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
. Output only the next line. | class Term(gcp.Term): |
Given the code snippet: <|code_start|> rule['sourceRanges'] = [str(saddr) for saddr in chunk]
rules.append(rule)
elif daddrs:
dest_addr_chunks = [
daddrs[x:x+self._TERM_ADDRESS_LIMIT] for x in range(
0, len(daddrs), self._TERM_ADDRESS_LIMIT)]
for i, chunk in enumerate(dest_addr_chunks):
rule = copy.deepcopy(proto_dict)
if len(dest_addr_chunks) > 1:
rule['name'] = '%s-%d' % (rule['name'], i+1)
rule['destinationRanges'] = [str(daddr) for daddr in chunk]
rules.append(rule)
else:
rules.append(proto_dict)
# Sanity checking term name lengths.
long_rules = [rule['name'] for rule in rules if len(rule['name']) > 63]
if long_rules:
raise GceFirewallError(
'GCE firewall name ended up being too long: %s' % long_rules)
return rules
class GCE(gcp.GCP):
"""A GCE firewall policy object."""
_PLATFORM = 'gce'
SUFFIX = '.gce'
_SUPPORTED_AF = frozenset(('inet', 'inet6', 'mixed'))
_ANY_IP = {
<|code_end|>
, generate the next line using the imports in this file:
import copy
import datetime
import ipaddress
import json
import logging
import re
import six
from typing import Dict, Any
from capirca.lib import gcp
from capirca.lib import nacaddr
and context (functions, classes, or occasionally code) from other files:
# Path: capirca/lib/gcp.py
# class Error(Exception):
# class TermError(Error):
# class HeaderError(Error):
# class UnsupportedFilterTypeError(Error):
# class Term(aclgenerator.Term):
# class GCP(aclgenerator.ACLGenerator):
# _ALLOW_PROTO_NAME = frozenset(
# ['tcp', 'udp', 'icmp', 'esp', 'ah', 'ipip', 'sctp',
# 'all'
# ])
# _GOOD_DIRECTION = ['INGRESS', 'EGRESS']
# def _GetPorts(self):
# def _GetLoggingSetting(self):
# def __str__(self):
# def IsDefaultDeny(term):
# def IsProjectIDValid(project):
# def IsVPCNameValid(vpc):
# def TruncateString(raw_string, max_length):
# def GetIpv6TermName(term_name):
#
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
. Output only the next line. | 'inet': nacaddr.IP('0.0.0.0/0'), |
Here is a snippet: <|code_start|> """Returns true if a term is a default deny without IPs, ports, etc."""
skip_attrs = [
'flattened', 'flattened_addr', 'flattened_saddr', 'flattened_daddr',
'action', 'comment', 'name', 'logging', 'direction'
]
if 'deny' not in term.action:
return False
# This lc will look through all methods and attributes of the object.
# It returns only the attributes that need to be looked at to determine if
# this is a default deny.
for i in [
a for a in dir(term) if not a.startswith('__') and a.islower() and
not callable(getattr(term, a))
]:
if i in skip_attrs:
continue
v = getattr(term, i)
if isinstance(v, str) and v:
return False
if isinstance(v, list) and v:
return False
return True
def GetNextPriority(priority):
"""Get the priority for the next rule."""
return priority
<|code_end|>
. Write the next line using the current file imports:
import copy
import datetime
import logging
import re
import yaml
from capirca.lib import aclgenerator
and context from other files:
# Path: capirca/lib/aclgenerator.py
# class Error(Exception):
# class NoPlatformPolicyError(Error):
# class UnknownIcmpTypeError(Error):
# class MismatchIcmpInetError(Error):
# class EstablishedError(Error):
# class UnsupportedAFError(Error):
# class DuplicateTermError(Error):
# class UnsupportedFilterError(Error):
# class UnsupportedTargetOptionError(Error):
# class TermNameTooLongError(Error):
# class Term:
# class ACLGenerator:
# ICMP_TYPE = policy.Term.ICMP_TYPE
# PROTO_MAP = {'hopopt': 0,
# 'icmp': 1,
# 'igmp': 2,
# 'ggp': 3,
# 'ipip': 4,
# 'tcp': 6,
# 'egp': 8,
# 'igp': 9,
# 'udp': 17,
# 'rdp': 27,
# 'ipv6': 41,
# 'ipv6-route': 43,
# 'fragment': 44,
# 'rsvp': 46,
# 'gre': 47,
# 'esp': 50,
# 'ah': 51,
# 'icmpv6': 58,
# 'ipv6-nonxt': 59,
# 'ipv6-opts': 60,
# 'ospf': 89,
# 'pim': 103,
# 'vrrp': 112,
# 'l2tp': 115,
# 'sctp': 132,
# 'udplite': 136,
# 'all': -1, # Used for GCE default deny, do not use in pol file.
# }
# AF_MAP = {'inet': 4,
# 'inet6': 6,
# 'bridge': 4 # if this doesn't exist, output includes v4 & v6
# }
# ALWAYS_PROTO_NUM = ['ipip']
# PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.items()])
# AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.items()])
# NO_AF_LOG_ADDR = string.Template('Term $term will not be rendered, as it has'
# ' $direction address match specified but no'
# ' $direction addresses of $af address family'
# ' are present.')
# NO_AF_LOG_PROTO = string.Template('Term $term will not be rendered, as it has'
# ' $proto match specified but the ACL is of'
# ' $af address family.')
# _PLATFORM = None
# _DEFAULT_PROTOCOL = 'ip'
# _SUPPORTED_AF = {'inet', 'inet6'}
# _FILTER_BLACKLIST = {}
# WARN_IF_UNSUPPORTED = {
# 'restrict_address_family',
# 'counter',
# 'destination_tag',
# 'filter_term',
# 'logging',
# 'loss_priority',
# 'owner',
# 'qos',
# 'routing_instance',
# 'policer',
# 'source_tag'
# }
# _ABBREVIATION_TABLE = [
# # Service abbreviations first.
# ('experiment', 'EXP'),
# ('wifi-radius', 'W-R'),
# ('customer', 'CUST'),
# ('server', 'SRV'),
# # Next, common routing terms
# ('global', 'GBL'),
# ('google', 'GOOG'),
# ('service', 'SVC'),
# ('router', 'RTR'),
# ('internal', 'INT'),
# ('external', 'EXT'),
# ('transit', 'TRNS'),
# ('management', 'MGMT'),
# # State info
# ('established', 'EST'),
# ('unreachable', 'UNR'),
# ('fragment', 'FRAG'),
# ('accept', 'ACC'),
# ('discard', 'DISC'),
# ('reject', 'REJ'),
# ('replies', 'RPL'),
# ('request', 'REQ'),
# ]
# _TERM_MAX_LENGTH = 62
# def __init__(self, term):
# def NormalizeAddressFamily(self, af):
# def NormalizeIcmpTypes(self, icmp_types, protocols, af):
# def __init__(self, pol, exp_info):
# def _TranslatePolicy(self, pol, exp_info):
# def _BuildTokens(self):
# def _GetSupportedTokens(self):
# def FixHighPorts(self, term, af='inet', all_protocols_stateful=False):
# def FixTermLength(self, term_name, abbreviate=False, truncate=False):
# def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map):
# def AddRepositoryTags(prefix='', rid=True, date=True, revision=True,
# wrap=False):
# def WrapWords(textlist, size, joiner='\n'):
, which may include functions, classes, or code. Output only the next line. | class Term(aclgenerator.Term): |
Next line prediction: <|code_start|> """Test proper handling of a non-existant service request."""
self.assertListEqual(self.defs.GetServiceByProto('SVC1', 'fud'), [])
def testGetServiceByProto(self):
self.assertListEqual(self.defs.GetServiceByProto('SVC1', 'tcp'),
['80', '82'])
def testGetServiceByProtoWithoutProtocols(self):
"""Ensure services with protocol are not returned when type is specified."""
self.assertListEqual(self.defs.GetServiceByProto('SVC3', 'tcp'), ['80'])
def testNetworkComment(self):
self.assertEqual(self.defs.GetNetAddr('NET1')[0].text, 'network1')
def testNestedNetworkComment(self):
self.assertEqual(self.defs.GetNetAddr('NET2')[1].text, 'network1')
def testUndefinedAddress(self):
self.assertRaises(naming.UndefinedAddressError, self.defs.GetNetAddr, 'FOO')
def testNamespaceCollisionError(self):
badservicedata = []
badservicedata.append('SVC1 = 80/tcp')
badservicedata.append('SVC1 = 81/udp')
testdefs = naming.Naming(None)
self.assertRaises(naming.NamespaceCollisionError,
testdefs.ParseServiceList, badservicedata)
def testNetworkAddress(self):
self.assertListEqual(self.defs.GetNetAddr('NET1'),
<|code_end|>
. Use current file imports:
(from absl.testing import absltest
from capirca.lib import nacaddr
from capirca.lib import naming)
and context including class names, function names, or small code snippets from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
. Output only the next line. | [nacaddr.IPv4('10.0.0.0/8')]) |
Next line prediction: <|code_start|># Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for naming.py module."""
class NamingUnitTest(absltest.TestCase):
"""Unit Test for naming.py.
The Naming class allows us to specify if we want to use arrays of text
instead of files. Most of the tests below create an empty Naming class.
To populate the class with data, we simply pass our test data in arrays
to the ParseList method, or in some cases, pass an io.BytesIO stream.
"""
def setUp(self):
super().setUp()
<|code_end|>
. Use current file imports:
(from absl.testing import absltest
from capirca.lib import nacaddr
from capirca.lib import naming)
and context including class names, function names, or small code snippets from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
. Output only the next line. | self.defs = naming.Naming(None) |
Here is a snippet: <|code_start|>
Useful for debugging.
Args:
num: number to be prettily formatted
Returns:
prettily formatted string
"""
# like ipaddr make assumption that this is ipv4
byte_strings = []
while num > 0 or len(byte_strings) < 4:
byte_strings.append('{0:08b}'.format(num & 0xff))
num >>= 8
return ' '.join(reversed(byte_strings))
def Summarize(nets):
"""Summarizes networks while allowing for discontinuous subnet mask.
Args:
nets: list of nacaddr.IPv4 or nacaddr.IPv6 objects.
Address family can be mixed, however there is no support for rendering
anything other than IPv4.
Returns:
sorted list of DSMIPNet objects.
"""
result = []
<|code_end|>
. Write the next line using the current file imports:
import collections
from capirca.lib import nacaddr
and context from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
, which may include functions, classes, or code. Output only the next line. | optimized_nets = nacaddr.CollapseAddrList(nets) |
Based on the snippet: <|code_start|>
class CgrepTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.db = naming.Naming(None)
self.db.ParseServiceList(_SERVICE.split('\n'))
self.db.ParseNetworkList(_NETWORK.split('\n'))
#
# test ip->token resolution (-i)
#
# 1.1.1.1 should only be in 'ANY'
def test_one_ip(self):
expected_results = [('ANY', ['0.0.0.0/0'])]
ip = '1.1.1.1'
results = cgrep.get_ip_parents(ip, self.db)
self.assertEqual(results, expected_results)
# 2001:db8::1 should only be in 'BOGON'
def test_one_ipv6(self):
expected_results = [('BOGON', ['2001:db8::/32'])]
ip = '2001:db8::1'
results = cgrep.get_ip_parents(ip, self.db)
self.assertEqual(results, expected_results)
# 1.1.1.1 should not be in CLASS-E
def test_one_ip_fail(self):
expected_results = [('CLASS-E', ['240.0.0.0/4'])]
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
from absl.testing import absltest
from capirca.lib import nacaddr
from capirca.lib import naming
from tools import cgrep
and context (classes, functions, sometimes code) from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
#
# Path: tools/cgrep.py
# def is_valid_ip(arg):
# def cli_options():
# def main(argv):
# def check_encapsulated(obj_type, first_obj, second_obj, db):
# def print_diff(ip, common, diff1, diff2):
# def group_diff(options, db):
# def get_ip_parents(ip, db):
# def get_nets_and_highest_prefix(ip, net_group, db):
# def get_nets(objects, db):
# def compare_tokens(options, db):
# def compare_ip_token(options, db):
# def get_ports(svc_group, db):
# def get_services(options, db):
. Output only the next line. | ip = nacaddr.IP('1.1.1.1/32') |
Given the code snippet: <|code_start|>LDAPS = 636/tcp
IMAPS = 993/tcp
POP_SSL = 995/tcp
HIGH_PORTS = 1024-65535/tcp 1024-65535/udp
MSSQL = 1433/tcp
MSSQL_MONITOR = 1434/tcp
RADIUS = 1812/tcp 1812/udp
HSRP = 1985/udp
NFSD = 2049/tcp 2049/udp
NETFLOW = 2056/udp
SQUID_PROXY = 3128/tcp
MYSQL = 3306/tcp
RDP = 3389/tcp
IPSEC = 4500/udp
POSTGRESQL = 5432/tcp
TRACEROUTE = 33434-33534/udp
"""
class Namespace:
def __init__(self, **kwargs):
for arg in kwargs:
setattr(self, arg, kwargs[arg])
class CgrepTest(absltest.TestCase):
def setUp(self):
super().setUp()
<|code_end|>
, generate the next line using the imports in this file:
import argparse
from absl.testing import absltest
from capirca.lib import nacaddr
from capirca.lib import naming
from tools import cgrep
and context (functions, classes, or occasionally code) from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
#
# Path: tools/cgrep.py
# def is_valid_ip(arg):
# def cli_options():
# def main(argv):
# def check_encapsulated(obj_type, first_obj, second_obj, db):
# def print_diff(ip, common, diff1, diff2):
# def group_diff(options, db):
# def get_ip_parents(ip, db):
# def get_nets_and_highest_prefix(ip, net_group, db):
# def get_nets(objects, db):
# def compare_tokens(options, db):
# def compare_ip_token(options, db):
# def get_ports(svc_group, db):
# def get_services(options, db):
. Output only the next line. | self.db = naming.Naming(None) |
Predict the next line for this snippet: <|code_start|>MYSQL = 3306/tcp
RDP = 3389/tcp
IPSEC = 4500/udp
POSTGRESQL = 5432/tcp
TRACEROUTE = 33434-33534/udp
"""
class Namespace:
def __init__(self, **kwargs):
for arg in kwargs:
setattr(self, arg, kwargs[arg])
class CgrepTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.db = naming.Naming(None)
self.db.ParseServiceList(_SERVICE.split('\n'))
self.db.ParseNetworkList(_NETWORK.split('\n'))
#
# test ip->token resolution (-i)
#
# 1.1.1.1 should only be in 'ANY'
def test_one_ip(self):
expected_results = [('ANY', ['0.0.0.0/0'])]
ip = '1.1.1.1'
<|code_end|>
with the help of current file imports:
import argparse
from absl.testing import absltest
from capirca.lib import nacaddr
from capirca.lib import naming
from tools import cgrep
and context from other files:
# Path: capirca/lib/nacaddr.py
# def IP(ip, comment='', token='', strict=True):
# def _is_subnet_of(a, b): # pylint: disable=invalid-name
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def AddComment(self, comment=''):
# def supernet(self, prefixlen_diff=1):
# def __init__(self, ip_string, comment='', token='', strict=True):
# def subnet_of(self, other):
# def supernet_of(self, other):
# def __deepcopy__(self, memo):
# def supernet(self, prefixlen_diff=1):
# def AddComment(self, comment=''):
# def _InNetList(adders, ip):
# def IsSuperNet(supernets, subnets):
# def CollapseAddrListPreserveTokens(addresses):
# def _SafeToMerge(address, merge_target, check_addresses):
# def _CollapseAddrListInternal(addresses, complements_by_network):
# def CollapseAddrList(addresses, complement_addresses=None):
# def SortAddrList(addresses):
# def RemoveAddressFromList(superset, exclude):
# def AddressListExclude(superset, excludes, collapse_addrs=True):
# class IPv4(ipaddress.IPv4Network):
# class IPv6(ipaddress.IPv6Network):
# class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError):
#
# Path: capirca/lib/naming.py
# class Error(Exception):
# class NamespaceCollisionError(Error):
# class BadNetmaskTypeError(Error):
# class NoDefinitionsError(Error):
# class ParseError(Error):
# class UndefinedAddressError(Error):
# class UndefinedServiceError(Error):
# class UndefinedPortError(Error):
# class UnexpectedDefinitionTypeError(Error):
# class NamingSyntaxError(Error):
# class _ItemUnit:
# class Naming:
# def __init__(self, symbol):
# def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
# def _CheckUnseen(self, def_type):
# def GetIpParents(self, query):
# def GetServiceParents(self, query):
# def GetNetParents(self, query):
# def _GetParents(self, query, query_group):
# def GetNetChildren(self, query):
# def _GetChildren(self, query, query_group):
# def _IsIpFormat(self, item):
# def GetServiceNames(self):
# def GetService(self, query):
# def GetPortParents(self, query, proto):
# def GetServiceByProto(self, query, proto):
# def GetNetAddr(self, token):
# def GetNet(self, query):
# def _Parse(self, defdirectory, def_type):
# def _ParseFile(self, file_handle, def_type):
# def ParseServiceList(self, data):
# def ParseNetworkList(self, data):
# def _ParseLine(self, line, definition_type):
#
# Path: tools/cgrep.py
# def is_valid_ip(arg):
# def cli_options():
# def main(argv):
# def check_encapsulated(obj_type, first_obj, second_obj, db):
# def print_diff(ip, common, diff1, diff2):
# def group_diff(options, db):
# def get_ip_parents(ip, db):
# def get_nets_and_highest_prefix(ip, net_group, db):
# def get_nets(objects, db):
# def compare_tokens(options, db):
# def compare_ip_token(options, db):
# def get_ports(svc_group, db):
# def get_services(options, db):
, which may contain function names, class names, or code. Output only the next line. | results = cgrep.get_ip_parents(ip, self.db) |
Given the following code snippet before the placeholder: <|code_start|> def __str__(self):
return "[%d] %s" % (self.channel, self.payload)
class PayloadMeta(type):
def __new__(cls, name, bases, cls_members):
for req in ("encode", "decode", "type"):
if req not in cls_members:
raise TypeError("%s must define %s" % (name, req))
t = type.__new__(cls, name, bases, cls_members)
if t.type is not None:
Frame.DECODERS[t.type] = t
return t
class Payload(with_metaclass(PayloadMeta, object)):
type = None
def encode(self, enc): raise NotImplementedError
@staticmethod
def decode(spec, dec): raise NotImplementedError
class Method(Payload):
type = Frame.METHOD
def __init__(self, method, *args):
if len(args) != len(method.fields):
<|code_end|>
, predict the next line using imports from the current file:
from io import BytesIO
from twisted.python import log
from six import with_metaclass
from . import codec
from .spec import pythonize
and context including class names, function names, and sometimes code from other files:
# Path: src/txamqp/spec.py
# def pythonize(name):
# name = str(name)
# for key, val in REPLACE.items():
# name = name.replace(key, val)
# try:
# name = KEYWORDS[name]
# except KeyError:
# pass
# return name
. Output only the next line. | argspec = ["%s: %s" % (pythonize(f.name), f.type) |
Next line prediction: <|code_start|>#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
class Delegate(object):
def __init__(self):
self.handlers = {}
self.invokers = {}
# initialize all the mixins
self.invoke_all("init")
def invoke_all(self, meth, *args, **kwargs):
for cls in inspect.getmro(self.__class__):
if hasattr(cls, meth):
getattr(cls, meth)(self, *args, **kwargs)
def dispatch(self, channel, message):
method = message.method
try:
handler = self.handlers[method]
except KeyError:
<|code_end|>
. Use current file imports:
(import inspect
from .spec import pythonize)
and context including class names, function names, or small code snippets from other files:
# Path: src/txamqp/spec.py
# def pythonize(name):
# name = str(name)
# for key, val in REPLACE.items():
# name = name.replace(key, val)
# try:
# name = KEYWORDS[name]
# except KeyError:
# pass
# return name
. Output only the next line. | name = "%s_%s" % (pythonize(method.klass.name), |
Given the following code snippet before the placeholder: <|code_start|> ordering = ['-created']
db_table = 'poloniex_balance'
# def __str__(self):
# return '{usd:0>6} US$ | {btc:0>10} BTC'.format(
# usd=self.usd_balance,
# btc=self.btc_balance
# )
"""
BUY/SELL ORDER
{
"orderNumber": 31226040,
"resultingTrades": [
{
"amount": "338.8732",
"date": "2014-10-18 23:03:21",
"rate": "0.00000173",
"total": "0.00058625",
"tradeID": "16164",
"type": "buy"
}
]
}
"""
class Order(models.Model):
type = models.IntegerField(
<|code_end|>
, predict the next line using imports from the current file:
from datetime import datetime
from allauth.account.signals import user_signed_up
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from crumpet.profiles import constants
from crumpet.profiles.fields import PriceField, PercentField, AmountField
from crumpet.profiles.models import UserAccount
and context including class names, function names, and sometimes code from other files:
# Path: crumpet/profiles/constants.py
# CRYPTO_EXCHANGES = (
# ("poloniex", _("Poloniex")),
# ("bittrex", _("Bittrex")),
# ("bitstamp", _("Bitstamp"))
# )
# EXCHANGE_PERIODS = (
# (300, "5-min"),
# (900, "15-min"),
# (1800, "30-min"),
# (7200, "2-hr"),
# (14400, "4-hr"),
# (86400, "1-day")
# )
# STRATEGIES = (
# ("To The Moon", "toTheMoon"),
# ("Strategy 2", "strategy 2"),
# )
# INSTRUMENTS = (
# ("ETH", _("Ethereum")),
# ("DGB", _("DigiByte")),
# ("XRP", _("Ripple")),
# ("XMR", _("Monero")),
# ("LTC", _("LiteCoin")),
# ("LSK", _("LISK")),
# ("STEEM", _("STEEM"))
# )
# MODES = (
# ("backtest", _("Backtest")),
# ("livetest", _("Live Backtest")),
# ("live", _("Live"))
# )
# BUY = 0
# SELL = 1
# ORDER_TYPES = (
# (OrderType.BUY, 'buy'),
# (OrderType.SELL, 'sell')
# )
# OPEN, CANCELLED, PROCESSED = 'open', 'cancelled', 'processed'
# ORDER_STATES = (
# (OPEN, _('open')),
# (CANCELLED, _('cancelled')),
# (PROCESSED, _('processed'))
# )
# SMA, EMA, RSI = 'sma', 'ema', 'rsi'
# INDICATORS = (
# (SMA, _('sma')),
# (EMA, _('ema')),
# (RSI, _('rsi'))
# )
# class OrderType(object):
#
# Path: crumpet/profiles/fields.py
# class PriceField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=2, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# class PercentField(models.DecimalField):
# def __init__(self, max_digits=8, decimal_places=3, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# **kwargs)
#
# class AmountField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=8, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# Path: crumpet/profiles/models.py
# class UserAccount(models.Model):
# """User profile"""
# user = models.OneToOneField(
# settings.AUTH_USER_MODEL,
# related_name="user_account"
# )
# exchange = models.CharField(
# verbose_name="Crypto Exchanges",
# choices=constants.CRYPTO_EXCHANGES,
# default=constants.CRYPTO_EXCHANGES[0][0],
# max_length=30
# )
# api_key = models.CharField(max_length=255, blank=True)
# api_secret = models.CharField(max_length=255, blank=True)
# created = models.DateTimeField(auto_now_add=True)
# updated = models.DateTimeField(auto_now=True)
#
# class Meta:
# verbose_name = "User Account"
#
# def __str__(self):
# return u"%s %s" % (self.user.first_name, self.user.last_name)
. Output only the next line. | choices=constants.ORDER_TYPES, |
Based on the snippet: <|code_start|>
@python_2_unicode_compatible
class TradingStrategyProfile(models.Model):
"""Base trading strategy configuration models class."""
name = models.CharField(max_length=100)
note = models.CharField(max_length=255, blank=True)
account = models.ForeignKey(UserAccount)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@python_2_unicode_compatible
class FixedStrategyProfile(TradingStrategyProfile):
"""Configuration for fixed trading strategy."""
<|code_end|>
, predict the immediate next line with the help of imports:
from datetime import datetime
from allauth.account.signals import user_signed_up
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from crumpet.profiles import constants
from crumpet.profiles.fields import PriceField, PercentField, AmountField
from crumpet.profiles.models import UserAccount
and context (classes, functions, sometimes code) from other files:
# Path: crumpet/profiles/constants.py
# CRYPTO_EXCHANGES = (
# ("poloniex", _("Poloniex")),
# ("bittrex", _("Bittrex")),
# ("bitstamp", _("Bitstamp"))
# )
# EXCHANGE_PERIODS = (
# (300, "5-min"),
# (900, "15-min"),
# (1800, "30-min"),
# (7200, "2-hr"),
# (14400, "4-hr"),
# (86400, "1-day")
# )
# STRATEGIES = (
# ("To The Moon", "toTheMoon"),
# ("Strategy 2", "strategy 2"),
# )
# INSTRUMENTS = (
# ("ETH", _("Ethereum")),
# ("DGB", _("DigiByte")),
# ("XRP", _("Ripple")),
# ("XMR", _("Monero")),
# ("LTC", _("LiteCoin")),
# ("LSK", _("LISK")),
# ("STEEM", _("STEEM"))
# )
# MODES = (
# ("backtest", _("Backtest")),
# ("livetest", _("Live Backtest")),
# ("live", _("Live"))
# )
# BUY = 0
# SELL = 1
# ORDER_TYPES = (
# (OrderType.BUY, 'buy'),
# (OrderType.SELL, 'sell')
# )
# OPEN, CANCELLED, PROCESSED = 'open', 'cancelled', 'processed'
# ORDER_STATES = (
# (OPEN, _('open')),
# (CANCELLED, _('cancelled')),
# (PROCESSED, _('processed'))
# )
# SMA, EMA, RSI = 'sma', 'ema', 'rsi'
# INDICATORS = (
# (SMA, _('sma')),
# (EMA, _('ema')),
# (RSI, _('rsi'))
# )
# class OrderType(object):
#
# Path: crumpet/profiles/fields.py
# class PriceField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=2, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# class PercentField(models.DecimalField):
# def __init__(self, max_digits=8, decimal_places=3, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# **kwargs)
#
# class AmountField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=8, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# Path: crumpet/profiles/models.py
# class UserAccount(models.Model):
# """User profile"""
# user = models.OneToOneField(
# settings.AUTH_USER_MODEL,
# related_name="user_account"
# )
# exchange = models.CharField(
# verbose_name="Crypto Exchanges",
# choices=constants.CRYPTO_EXCHANGES,
# default=constants.CRYPTO_EXCHANGES[0][0],
# max_length=30
# )
# api_key = models.CharField(max_length=255, blank=True)
# api_secret = models.CharField(max_length=255, blank=True)
# created = models.DateTimeField(auto_now_add=True)
# updated = models.DateTimeField(auto_now=True)
#
# class Meta:
# verbose_name = "User Account"
#
# def __str__(self):
# return u"%s %s" % (self.user.first_name, self.user.last_name)
. Output only the next line. | buy = PriceField() |
Predict the next line after this snippet: <|code_start|>
@python_2_unicode_compatible
class TradingStrategyProfile(models.Model):
"""Base trading strategy configuration models class."""
name = models.CharField(max_length=100)
note = models.CharField(max_length=255, blank=True)
account = models.ForeignKey(UserAccount)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@python_2_unicode_compatible
class FixedStrategyProfile(TradingStrategyProfile):
"""Configuration for fixed trading strategy."""
buy = PriceField()
sell = PriceField()
class Meta:
db_table = 'strategy_profile_fixed'
def __str__(self):
return 'fixed buy at ${buy}, sell at ${sell}'.format(
buy=self.buy, sell=self.sell)
@python_2_unicode_compatible
class RelativeStrategyProfile(TradingStrategyProfile):
"""Configuration for relative trading strategy."""
<|code_end|>
using the current file's imports:
from datetime import datetime
from allauth.account.signals import user_signed_up
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from crumpet.profiles import constants
from crumpet.profiles.fields import PriceField, PercentField, AmountField
from crumpet.profiles.models import UserAccount
and any relevant context from other files:
# Path: crumpet/profiles/constants.py
# CRYPTO_EXCHANGES = (
# ("poloniex", _("Poloniex")),
# ("bittrex", _("Bittrex")),
# ("bitstamp", _("Bitstamp"))
# )
# EXCHANGE_PERIODS = (
# (300, "5-min"),
# (900, "15-min"),
# (1800, "30-min"),
# (7200, "2-hr"),
# (14400, "4-hr"),
# (86400, "1-day")
# )
# STRATEGIES = (
# ("To The Moon", "toTheMoon"),
# ("Strategy 2", "strategy 2"),
# )
# INSTRUMENTS = (
# ("ETH", _("Ethereum")),
# ("DGB", _("DigiByte")),
# ("XRP", _("Ripple")),
# ("XMR", _("Monero")),
# ("LTC", _("LiteCoin")),
# ("LSK", _("LISK")),
# ("STEEM", _("STEEM"))
# )
# MODES = (
# ("backtest", _("Backtest")),
# ("livetest", _("Live Backtest")),
# ("live", _("Live"))
# )
# BUY = 0
# SELL = 1
# ORDER_TYPES = (
# (OrderType.BUY, 'buy'),
# (OrderType.SELL, 'sell')
# )
# OPEN, CANCELLED, PROCESSED = 'open', 'cancelled', 'processed'
# ORDER_STATES = (
# (OPEN, _('open')),
# (CANCELLED, _('cancelled')),
# (PROCESSED, _('processed'))
# )
# SMA, EMA, RSI = 'sma', 'ema', 'rsi'
# INDICATORS = (
# (SMA, _('sma')),
# (EMA, _('ema')),
# (RSI, _('rsi'))
# )
# class OrderType(object):
#
# Path: crumpet/profiles/fields.py
# class PriceField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=2, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# class PercentField(models.DecimalField):
# def __init__(self, max_digits=8, decimal_places=3, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# **kwargs)
#
# class AmountField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=8, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# Path: crumpet/profiles/models.py
# class UserAccount(models.Model):
# """User profile"""
# user = models.OneToOneField(
# settings.AUTH_USER_MODEL,
# related_name="user_account"
# )
# exchange = models.CharField(
# verbose_name="Crypto Exchanges",
# choices=constants.CRYPTO_EXCHANGES,
# default=constants.CRYPTO_EXCHANGES[0][0],
# max_length=30
# )
# api_key = models.CharField(max_length=255, blank=True)
# api_secret = models.CharField(max_length=255, blank=True)
# created = models.DateTimeField(auto_now_add=True)
# updated = models.DateTimeField(auto_now=True)
#
# class Meta:
# verbose_name = "User Account"
#
# def __str__(self):
# return u"%s %s" % (self.user.first_name, self.user.last_name)
. Output only the next line. | buy = PercentField() |
Continue the code snippet: <|code_start|> # NEVER BUY OR SELL UNLESS THESE ASSERTS PASS
assert self.buy < 100 - min_fee
assert self.sell > 100 + min_fee
return super().save(*args, **kwargs)
class SimpleMovingAverage(models.Model):
period = models.CharField(max_length=100, default=25)
class Meta:
db_table = 'simple_moving_average_indicator'
def __str__(self):
return 'Simple moving average with a period of ${period}%'.format(
period=self.period)
class ExponentialMovingAverage(models.Model):
period = models.CharField(max_length=100, default=25)
class Meta:
db_table = 'simple_moving_average_indicator'
def __str__(self):
return 'Simple moving average with a period of ${period}%'.format(
period=self.period)
class IndicatorParameter(models.Model):
name = models.CharField(max_length=100)
<|code_end|>
. Use current file imports:
from datetime import datetime
from allauth.account.signals import user_signed_up
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from crumpet.profiles import constants
from crumpet.profiles.fields import PriceField, PercentField, AmountField
from crumpet.profiles.models import UserAccount
and context (classes, functions, or code) from other files:
# Path: crumpet/profiles/constants.py
# CRYPTO_EXCHANGES = (
# ("poloniex", _("Poloniex")),
# ("bittrex", _("Bittrex")),
# ("bitstamp", _("Bitstamp"))
# )
# EXCHANGE_PERIODS = (
# (300, "5-min"),
# (900, "15-min"),
# (1800, "30-min"),
# (7200, "2-hr"),
# (14400, "4-hr"),
# (86400, "1-day")
# )
# STRATEGIES = (
# ("To The Moon", "toTheMoon"),
# ("Strategy 2", "strategy 2"),
# )
# INSTRUMENTS = (
# ("ETH", _("Ethereum")),
# ("DGB", _("DigiByte")),
# ("XRP", _("Ripple")),
# ("XMR", _("Monero")),
# ("LTC", _("LiteCoin")),
# ("LSK", _("LISK")),
# ("STEEM", _("STEEM"))
# )
# MODES = (
# ("backtest", _("Backtest")),
# ("livetest", _("Live Backtest")),
# ("live", _("Live"))
# )
# BUY = 0
# SELL = 1
# ORDER_TYPES = (
# (OrderType.BUY, 'buy'),
# (OrderType.SELL, 'sell')
# )
# OPEN, CANCELLED, PROCESSED = 'open', 'cancelled', 'processed'
# ORDER_STATES = (
# (OPEN, _('open')),
# (CANCELLED, _('cancelled')),
# (PROCESSED, _('processed'))
# )
# SMA, EMA, RSI = 'sma', 'ema', 'rsi'
# INDICATORS = (
# (SMA, _('sma')),
# (EMA, _('ema')),
# (RSI, _('rsi'))
# )
# class OrderType(object):
#
# Path: crumpet/profiles/fields.py
# class PriceField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=2, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# class PercentField(models.DecimalField):
# def __init__(self, max_digits=8, decimal_places=3, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# **kwargs)
#
# class AmountField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=8, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# Path: crumpet/profiles/models.py
# class UserAccount(models.Model):
# """User profile"""
# user = models.OneToOneField(
# settings.AUTH_USER_MODEL,
# related_name="user_account"
# )
# exchange = models.CharField(
# verbose_name="Crypto Exchanges",
# choices=constants.CRYPTO_EXCHANGES,
# default=constants.CRYPTO_EXCHANGES[0][0],
# max_length=30
# )
# api_key = models.CharField(max_length=255, blank=True)
# api_secret = models.CharField(max_length=255, blank=True)
# created = models.DateTimeField(auto_now_add=True)
# updated = models.DateTimeField(auto_now=True)
#
# class Meta:
# verbose_name = "User Account"
#
# def __str__(self):
# return u"%s %s" % (self.user.first_name, self.user.last_name)
. Output only the next line. | parameter = AmountField() |
Given snippet: <|code_start|>
@python_2_unicode_compatible
class TradingStrategyProfile(models.Model):
"""Base trading strategy configuration models class."""
name = models.CharField(max_length=100)
note = models.CharField(max_length=255, blank=True)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from datetime import datetime
from allauth.account.signals import user_signed_up
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from crumpet.profiles import constants
from crumpet.profiles.fields import PriceField, PercentField, AmountField
from crumpet.profiles.models import UserAccount
and context:
# Path: crumpet/profiles/constants.py
# CRYPTO_EXCHANGES = (
# ("poloniex", _("Poloniex")),
# ("bittrex", _("Bittrex")),
# ("bitstamp", _("Bitstamp"))
# )
# EXCHANGE_PERIODS = (
# (300, "5-min"),
# (900, "15-min"),
# (1800, "30-min"),
# (7200, "2-hr"),
# (14400, "4-hr"),
# (86400, "1-day")
# )
# STRATEGIES = (
# ("To The Moon", "toTheMoon"),
# ("Strategy 2", "strategy 2"),
# )
# INSTRUMENTS = (
# ("ETH", _("Ethereum")),
# ("DGB", _("DigiByte")),
# ("XRP", _("Ripple")),
# ("XMR", _("Monero")),
# ("LTC", _("LiteCoin")),
# ("LSK", _("LISK")),
# ("STEEM", _("STEEM"))
# )
# MODES = (
# ("backtest", _("Backtest")),
# ("livetest", _("Live Backtest")),
# ("live", _("Live"))
# )
# BUY = 0
# SELL = 1
# ORDER_TYPES = (
# (OrderType.BUY, 'buy'),
# (OrderType.SELL, 'sell')
# )
# OPEN, CANCELLED, PROCESSED = 'open', 'cancelled', 'processed'
# ORDER_STATES = (
# (OPEN, _('open')),
# (CANCELLED, _('cancelled')),
# (PROCESSED, _('processed'))
# )
# SMA, EMA, RSI = 'sma', 'ema', 'rsi'
# INDICATORS = (
# (SMA, _('sma')),
# (EMA, _('ema')),
# (RSI, _('rsi'))
# )
# class OrderType(object):
#
# Path: crumpet/profiles/fields.py
# class PriceField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=2, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# class PercentField(models.DecimalField):
# def __init__(self, max_digits=8, decimal_places=3, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# **kwargs)
#
# class AmountField(models.DecimalField):
# def __init__(self, max_digits=30, decimal_places=8, default=0, **kwargs):
# super().__init__(max_digits=max_digits,
# decimal_places=decimal_places,
# default=default,
# **kwargs)
#
# Path: crumpet/profiles/models.py
# class UserAccount(models.Model):
# """User profile"""
# user = models.OneToOneField(
# settings.AUTH_USER_MODEL,
# related_name="user_account"
# )
# exchange = models.CharField(
# verbose_name="Crypto Exchanges",
# choices=constants.CRYPTO_EXCHANGES,
# default=constants.CRYPTO_EXCHANGES[0][0],
# max_length=30
# )
# api_key = models.CharField(max_length=255, blank=True)
# api_secret = models.CharField(max_length=255, blank=True)
# created = models.DateTimeField(auto_now_add=True)
# updated = models.DateTimeField(auto_now=True)
#
# class Meta:
# verbose_name = "User Account"
#
# def __str__(self):
# return u"%s %s" % (self.user.first_name, self.user.last_name)
which might include code, classes, or functions. Output only the next line. | account = models.ForeignKey(UserAccount) |
Given the code snippet: <|code_start|>
class UserAccountForm(forms.Form):
exchange = forms.ChoiceField(
required=True,
label=_("Exchange"),
<|code_end|>
, generate the next line using the imports in this file:
from django import forms
from crumpet.profiles import constants
from django.utils.translation import ugettext_lazy as _
and context (functions, classes, or occasionally code) from other files:
# Path: crumpet/profiles/constants.py
# CRYPTO_EXCHANGES = (
# ("poloniex", _("Poloniex")),
# ("bittrex", _("Bittrex")),
# ("bitstamp", _("Bitstamp"))
# )
# EXCHANGE_PERIODS = (
# (300, "5-min"),
# (900, "15-min"),
# (1800, "30-min"),
# (7200, "2-hr"),
# (14400, "4-hr"),
# (86400, "1-day")
# )
# STRATEGIES = (
# ("To The Moon", "toTheMoon"),
# ("Strategy 2", "strategy 2"),
# )
# INSTRUMENTS = (
# ("ETH", _("Ethereum")),
# ("DGB", _("DigiByte")),
# ("XRP", _("Ripple")),
# ("XMR", _("Monero")),
# ("LTC", _("LiteCoin")),
# ("LSK", _("LISK")),
# ("STEEM", _("STEEM"))
# )
# MODES = (
# ("backtest", _("Backtest")),
# ("livetest", _("Live Backtest")),
# ("live", _("Live"))
# )
# BUY = 0
# SELL = 1
# ORDER_TYPES = (
# (OrderType.BUY, 'buy'),
# (OrderType.SELL, 'sell')
# )
# OPEN, CANCELLED, PROCESSED = 'open', 'cancelled', 'processed'
# ORDER_STATES = (
# (OPEN, _('open')),
# (CANCELLED, _('cancelled')),
# (PROCESSED, _('processed'))
# )
# SMA, EMA, RSI = 'sma', 'ema', 'rsi'
# INDICATORS = (
# (SMA, _('sma')),
# (EMA, _('ema')),
# (RSI, _('rsi'))
# )
# class OrderType(object):
. Output only the next line. | choices=constants.CRYPTO_EXCHANGES |
Given the code snippet: <|code_start|>
class ToTheMoonStrategyForm(forms.Form):
exchange = forms.ChoiceField(
required=False,
label=_("Exchange"),
<|code_end|>
, generate the next line using the imports in this file:
from django import forms
from django.utils.translation import ugettext_lazy as _
from crumpet.profiles import constants
and context (functions, classes, or occasionally code) from other files:
# Path: crumpet/profiles/constants.py
# CRYPTO_EXCHANGES = (
# ("poloniex", _("Poloniex")),
# ("bittrex", _("Bittrex")),
# ("bitstamp", _("Bitstamp"))
# )
# EXCHANGE_PERIODS = (
# (300, "5-min"),
# (900, "15-min"),
# (1800, "30-min"),
# (7200, "2-hr"),
# (14400, "4-hr"),
# (86400, "1-day")
# )
# STRATEGIES = (
# ("To The Moon", "toTheMoon"),
# ("Strategy 2", "strategy 2"),
# )
# INSTRUMENTS = (
# ("ETH", _("Ethereum")),
# ("DGB", _("DigiByte")),
# ("XRP", _("Ripple")),
# ("XMR", _("Monero")),
# ("LTC", _("LiteCoin")),
# ("LSK", _("LISK")),
# ("STEEM", _("STEEM"))
# )
# MODES = (
# ("backtest", _("Backtest")),
# ("livetest", _("Live Backtest")),
# ("live", _("Live"))
# )
# BUY = 0
# SELL = 1
# ORDER_TYPES = (
# (OrderType.BUY, 'buy'),
# (OrderType.SELL, 'sell')
# )
# OPEN, CANCELLED, PROCESSED = 'open', 'cancelled', 'processed'
# ORDER_STATES = (
# (OPEN, _('open')),
# (CANCELLED, _('cancelled')),
# (PROCESSED, _('processed'))
# )
# SMA, EMA, RSI = 'sma', 'ema', 'rsi'
# INDICATORS = (
# (SMA, _('sma')),
# (EMA, _('ema')),
# (RSI, _('rsi'))
# )
# class OrderType(object):
. Output only the next line. | choices=constants.CRYPTO_EXCHANGES |
Continue the code snippet: <|code_start|>
urlpatterns = [
url(
r"^$",
<|code_end|>
. Use current file imports:
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from crumpet.backtests import views
and context (classes, functions, or code) from other files:
# Path: crumpet/backtests/views.py
# class BacktestingView(FormView):
# class LiveTestView(FormView):
# def form_valid(self, form):
# def form_valid(self, form):
. Output only the next line. | login_required(views.BacktestingView.as_view( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.