Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line after this snippet: <|code_start|> dtype=dtype,
initializer=init_ops.constant_initializer(
bias_start, dtype=dtype))
return res + bias_term
def linear(args,
output_size,
bias,
bias_start=0.0,
scope=None,
squeeze=False,
wd=0.0,
input_keep_prob=1.0,
is_train=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [
tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob),
lambda: arg) for arg in flat_args
]
flat_out = _linear(
flat_args, output_size, bias, bias_start=bias_start, scope=scope)
<|code_end|>
using the current file's imports:
import tensorflow as tf
from third_party.bi_att_flow.my.tensorflow.general import add_wd
from third_party.bi_att_flow.my.tensorflow.general import exp_mask
from third_party.bi_att_flow.my.tensorflow.general import flatten
from third_party.bi_att_flow.my.tensorflow.general import reconstruct
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
and any relevant context from other files:
# Path: third_party/bi_att_flow/my/tensorflow/general.py
# def add_wd(wd, scope=None):
# scope = scope or tf.get_variable_scope().name
# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
# with tf.name_scope('weight_decay'):
# for var in variables:
# weight_decay = tf.mul(
# tf.nn.l2_loss(var), wd, name='{}/wd'.format(var.op.name))
# tf.add_to_collection('losses', weight_decay)
#
# Path: third_party/bi_att_flow/my/tensorflow/general.py
# def exp_mask(val, mask, name=None):
# """Give very negative number to unmasked elements in val.
#
# For example, [-3, -2, 10], [True, True, False] -> [-3, -2, -1e9].
# Typically, this effectively masks in exponential space (e.g. softmax)
# Args:
# val: values to be masked
# mask: masking boolean tensor, same shape as tensor
# name: name for output tensor
#
# Returns:
# Same shape as val, where some elements are very small (exponentially
# zero)
# """
# if name is None:
# name = 'exp_mask'
# return tf.add(
# val, (1 - tf.cast(mask, 'float')) * VERY_NEGATIVE_NUMBER, name=name)
#
# Path: third_party/bi_att_flow/my/tensorflow/general.py
# def flatten(tensor, keep):
# fixed_shape = tensor.get_shape().as_list()
# start = len(fixed_shape) - keep
# left = reduce(mul,
# [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start)])
# out_shape = [left] + [
# fixed_shape[i] or tf.shape(tensor)[i]
# for i in range(start, len(fixed_shape))
# ]
# flat = tf.reshape(tensor, out_shape)
# return flat
#
# Path: third_party/bi_att_flow/my/tensorflow/general.py
# def reconstruct(tensor, ref, keep):
# ref_shape = ref.get_shape().as_list()
# tensor_shape = tensor.get_shape().as_list()
# ref_stop = len(ref_shape) - keep
# tensor_start = len(tensor_shape) - keep
# pre_shape = [ref_shape[i] or tf.shape(ref)[i] for i in range(ref_stop)]
# keep_shape = [
# tensor_shape[i] or tf.shape(tensor)[i]
# for i in range(tensor_start, len(tensor_shape))
# ]
# # pre_shape = [tf.shape(ref)[i] for i in range(len(ref.get_shape().as_list()[:-keep]))]
# # keep_shape = tensor.get_shape().as_list()[-keep:]
# target_shape = pre_shape + keep_shape
# out = tf.reshape(tensor, target_shape)
# return out
. Output only the next line. | out = reconstruct(flat_out, args[0], 1) |
Next line prediction: <|code_start|> x: number to correct
dataset: string that identifies the correction to make
Returns:
The rescaled score x.
Raises:
ValueError: if dataset is none of train, dev, test.
"""
if dataset == 'train':
return x * 90843 / (90843 + 8977)
elif dataset == 'dev':
return x * 12635 / (12635 + 1258)
elif dataset == 'test':
return x * 24660 / (24660 + 2588)
else:
raise ValueError('Unexepected value for dataset: {}'.format(dataset))
def main(argv):
del argv # Unused.
if FLAGS.debug:
random.seed(0)
reformulator_instance = reformulator.Reformulator(
hparams_path=FLAGS.hparams_path,
source_prefix=FLAGS.source_prefix,
out_dir=FLAGS.out_dir,
environment_server_address=FLAGS.environment_server_address)
<|code_end|>
. Use current file imports:
(import codecs
import json
import os
import random
import time
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from px.nmt import environment_client
from px.nmt import reformulator
from px.nmt.utils.logging_utils import safe_string
from px.proto import reformulator_pb2
from px.selector import selector_keras as selector
from px.utils import eval_utils
from third_party.nmt.utils import misc_utils)
and context including class names, function names, or small code snippets from other files:
# Path: px/nmt/environment_client.py
# def multi_call_environment(pool, stub, request, timeouts):
# def single_call_environment(args):
# def __init__(self, *args, **kwargs):
# def __setitem__(self, key, value):
# def _check_size_limit(self):
# def make_cache_key(question, docid):
# def make_environment_reward_fn(environment_server,
# timeouts=None,
# mode='squad',
# use_cache=False,
# cache_size=-1,
# env_call_parallelism=1):
# def environment_reward_fn(questions, doc_ids):
# class LimitedSizeDict(OrderedDict):
#
# Path: px/nmt/reformulator.py
# def load_hparams(hparams_file, out_dir):
# def __init__(self, hparams_path, source_prefix, out_dir,
# environment_server_address):
# def tokenize(self, questions, prefix=""):
# def detokenize(self, text):
# def reformulate(self, questions, inference_mode, trie_excludes=None):
# def train(self, sources, annotations):
# class Reformulator(object):
#
# Path: px/nmt/utils/logging_utils.py
# def safe_string(s):
# """Safely converts unicode and plain strings to byte strings."""
# if isinstance(s, unicode):
# try:
# s = s.encode('utf-8')
# except UnicodeDecodeError:
# s = repr(s)
# return s
#
# Path: px/selector/selector_keras.py
# FLAGS = flags.FLAGS
# class Selector(object):
# def __init__(self):
# def load(self, name):
# def save(self, name):
# def _get_checkpoint_paths(self, name):
# def _build_embedding_matrix(self):
# def _build_model(self, embedding_matrix):
# def encode_labels(self, labels):
# def encode_texts(self, texts):
# def encode_data(self, questions, original_questions, answers, labels):
# def encode_train(self, question_lists, answer_lists, score_lists):
# def train(self, questions, answers, scores):
# def eval(self, question_lists, answer_lists, score_lists):
#
# Path: px/utils/eval_utils.py
# def compute_f1_single(prediction, ground_truth):
# def compute_f1(prediction, gold_answers):
#
# Path: third_party/nmt/utils/misc_utils.py
# def check_tensorflow_version():
# def safe_exp(value):
# def print_time(s, start_time):
# def print_out(s, f=None, new_line=True):
# def print_hparams(hparams, skip_patterns=None, header=None):
# def load_hparams(model_dir):
# def maybe_parse_standard_hparams(hparams, hparams_path):
# def save_hparams(out_dir, hparams):
# def debug_tensor(s, msg=None, summarize=10):
# def add_summary(summary_writer, global_step, tag, value):
# def get_config_proto(log_device_placement=False, allow_soft_placement=True,
# num_intra_threads=0, num_inter_threads=0):
# def format_text(words):
# def format_bpe_text(symbols, delimiter=b"@@"):
# def format_spm_text(symbols):
. Output only the next line. | environment_fn = environment_client.make_environment_reward_fn( |
Continue the code snippet: <|code_start|>
def _correct_searchqa_score(x, dataset):
"""Method to correct for deleted datapoints in the sets.
Args:
x: number to correct
dataset: string that identifies the correction to make
Returns:
The rescaled score x.
Raises:
ValueError: if dataset is none of train, dev, test.
"""
if dataset == 'train':
return x * 90843 / (90843 + 8977)
elif dataset == 'dev':
return x * 12635 / (12635 + 1258)
elif dataset == 'test':
return x * 24660 / (24660 + 2588)
else:
raise ValueError('Unexepected value for dataset: {}'.format(dataset))
def main(argv):
del argv # Unused.
if FLAGS.debug:
random.seed(0)
<|code_end|>
. Use current file imports:
import codecs
import json
import os
import random
import time
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from px.nmt import environment_client
from px.nmt import reformulator
from px.nmt.utils.logging_utils import safe_string
from px.proto import reformulator_pb2
from px.selector import selector_keras as selector
from px.utils import eval_utils
from third_party.nmt.utils import misc_utils
and context (classes, functions, or code) from other files:
# Path: px/nmt/environment_client.py
# def multi_call_environment(pool, stub, request, timeouts):
# def single_call_environment(args):
# def __init__(self, *args, **kwargs):
# def __setitem__(self, key, value):
# def _check_size_limit(self):
# def make_cache_key(question, docid):
# def make_environment_reward_fn(environment_server,
# timeouts=None,
# mode='squad',
# use_cache=False,
# cache_size=-1,
# env_call_parallelism=1):
# def environment_reward_fn(questions, doc_ids):
# class LimitedSizeDict(OrderedDict):
#
# Path: px/nmt/reformulator.py
# def load_hparams(hparams_file, out_dir):
# def __init__(self, hparams_path, source_prefix, out_dir,
# environment_server_address):
# def tokenize(self, questions, prefix=""):
# def detokenize(self, text):
# def reformulate(self, questions, inference_mode, trie_excludes=None):
# def train(self, sources, annotations):
# class Reformulator(object):
#
# Path: px/nmt/utils/logging_utils.py
# def safe_string(s):
# """Safely converts unicode and plain strings to byte strings."""
# if isinstance(s, unicode):
# try:
# s = s.encode('utf-8')
# except UnicodeDecodeError:
# s = repr(s)
# return s
#
# Path: px/selector/selector_keras.py
# FLAGS = flags.FLAGS
# class Selector(object):
# def __init__(self):
# def load(self, name):
# def save(self, name):
# def _get_checkpoint_paths(self, name):
# def _build_embedding_matrix(self):
# def _build_model(self, embedding_matrix):
# def encode_labels(self, labels):
# def encode_texts(self, texts):
# def encode_data(self, questions, original_questions, answers, labels):
# def encode_train(self, question_lists, answer_lists, score_lists):
# def train(self, questions, answers, scores):
# def eval(self, question_lists, answer_lists, score_lists):
#
# Path: px/utils/eval_utils.py
# def compute_f1_single(prediction, ground_truth):
# def compute_f1(prediction, gold_answers):
#
# Path: third_party/nmt/utils/misc_utils.py
# def check_tensorflow_version():
# def safe_exp(value):
# def print_time(s, start_time):
# def print_out(s, f=None, new_line=True):
# def print_hparams(hparams, skip_patterns=None, header=None):
# def load_hparams(model_dir):
# def maybe_parse_standard_hparams(hparams, hparams_path):
# def save_hparams(out_dir, hparams):
# def debug_tensor(s, msg=None, summarize=10):
# def add_summary(summary_writer, global_step, tag, value):
# def get_config_proto(log_device_placement=False, allow_soft_placement=True,
# num_intra_threads=0, num_inter_threads=0):
# def format_text(words):
# def format_bpe_text(symbols, delimiter=b"@@"):
# def format_spm_text(symbols):
. Output only the next line. | reformulator_instance = reformulator.Reformulator( |
Next line prediction: <|code_start|> f1s.extend(f1_scores)
return np.mean(f1s)
def _run_eval_with_selector(questions, annotations, docid_2_answer,
reformulator_instance, selector_model, batch_size,
environment_fn):
"""Runs a joined eval with the reformulator and selector model."""
f1s = []
for batch_id, (questions_batch, annotations_batch) in enumerate(
batch(questions, annotations, batch_size)):
responses = reformulator_instance.reformulate(
questions=questions_batch,
inference_mode=reformulator_pb2.ReformulatorRequest.BEAM_SEARCH)
# Discard answers.
reformulations = [[rf.reformulation for rf in rsp] for rsp in responses]
question_and_rewrites, answers, scores = query_environment(
original_questions=questions_batch,
rewrites=reformulations,
annotations=annotations_batch,
environment_fn=environment_fn,
docid_2_answer=docid_2_answer,
token_level_f1_scores=True)
f1s.append(selector_model.eval(question_and_rewrites, answers, scores))
if FLAGS.debug and batch_id == 0:
print('Running Eval...')
print('Questions: {}, Annotation: {}'.format(
<|code_end|>
. Use current file imports:
(import codecs
import json
import os
import random
import time
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from px.nmt import environment_client
from px.nmt import reformulator
from px.nmt.utils.logging_utils import safe_string
from px.proto import reformulator_pb2
from px.selector import selector_keras as selector
from px.utils import eval_utils
from third_party.nmt.utils import misc_utils)
and context including class names, function names, or small code snippets from other files:
# Path: px/nmt/environment_client.py
# def multi_call_environment(pool, stub, request, timeouts):
# def single_call_environment(args):
# def __init__(self, *args, **kwargs):
# def __setitem__(self, key, value):
# def _check_size_limit(self):
# def make_cache_key(question, docid):
# def make_environment_reward_fn(environment_server,
# timeouts=None,
# mode='squad',
# use_cache=False,
# cache_size=-1,
# env_call_parallelism=1):
# def environment_reward_fn(questions, doc_ids):
# class LimitedSizeDict(OrderedDict):
#
# Path: px/nmt/reformulator.py
# def load_hparams(hparams_file, out_dir):
# def __init__(self, hparams_path, source_prefix, out_dir,
# environment_server_address):
# def tokenize(self, questions, prefix=""):
# def detokenize(self, text):
# def reformulate(self, questions, inference_mode, trie_excludes=None):
# def train(self, sources, annotations):
# class Reformulator(object):
#
# Path: px/nmt/utils/logging_utils.py
# def safe_string(s):
# """Safely converts unicode and plain strings to byte strings."""
# if isinstance(s, unicode):
# try:
# s = s.encode('utf-8')
# except UnicodeDecodeError:
# s = repr(s)
# return s
#
# Path: px/selector/selector_keras.py
# FLAGS = flags.FLAGS
# class Selector(object):
# def __init__(self):
# def load(self, name):
# def save(self, name):
# def _get_checkpoint_paths(self, name):
# def _build_embedding_matrix(self):
# def _build_model(self, embedding_matrix):
# def encode_labels(self, labels):
# def encode_texts(self, texts):
# def encode_data(self, questions, original_questions, answers, labels):
# def encode_train(self, question_lists, answer_lists, score_lists):
# def train(self, questions, answers, scores):
# def eval(self, question_lists, answer_lists, score_lists):
#
# Path: px/utils/eval_utils.py
# def compute_f1_single(prediction, ground_truth):
# def compute_f1(prediction, gold_answers):
#
# Path: third_party/nmt/utils/misc_utils.py
# def check_tensorflow_version():
# def safe_exp(value):
# def print_time(s, start_time):
# def print_out(s, f=None, new_line=True):
# def print_hparams(hparams, skip_patterns=None, header=None):
# def load_hparams(model_dir):
# def maybe_parse_standard_hparams(hparams, hparams_path):
# def save_hparams(out_dir, hparams):
# def debug_tensor(s, msg=None, summarize=10):
# def add_summary(summary_writer, global_step, tag, value):
# def get_config_proto(log_device_placement=False, allow_soft_placement=True,
# num_intra_threads=0, num_inter_threads=0):
# def format_text(words):
# def format_bpe_text(symbols, delimiter=b"@@"):
# def format_spm_text(symbols):
. Output only the next line. | safe_string(questions_batch[0]), safe_string(annotations_batch[0]))) |
Next line prediction: <|code_start|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-line-too-long
r"""Main for training the reformulator and the selector.
Additional flags defined in selector_keras.py:
--glove_path: Path to pretrained Glove embeddings.
--save_path: Directory where models will be saved to/loaded from.
"""
# pylint: enable=g-line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
<|code_end|>
. Use current file imports:
(import codecs
import json
import os
import random
import time
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from px.nmt import environment_client
from px.nmt import reformulator
from px.nmt.utils.logging_utils import safe_string
from px.proto import reformulator_pb2
from px.selector import selector_keras as selector
from px.utils import eval_utils
from third_party.nmt.utils import misc_utils)
and context including class names, function names, or small code snippets from other files:
# Path: px/nmt/environment_client.py
# def multi_call_environment(pool, stub, request, timeouts):
# def single_call_environment(args):
# def __init__(self, *args, **kwargs):
# def __setitem__(self, key, value):
# def _check_size_limit(self):
# def make_cache_key(question, docid):
# def make_environment_reward_fn(environment_server,
# timeouts=None,
# mode='squad',
# use_cache=False,
# cache_size=-1,
# env_call_parallelism=1):
# def environment_reward_fn(questions, doc_ids):
# class LimitedSizeDict(OrderedDict):
#
# Path: px/nmt/reformulator.py
# def load_hparams(hparams_file, out_dir):
# def __init__(self, hparams_path, source_prefix, out_dir,
# environment_server_address):
# def tokenize(self, questions, prefix=""):
# def detokenize(self, text):
# def reformulate(self, questions, inference_mode, trie_excludes=None):
# def train(self, sources, annotations):
# class Reformulator(object):
#
# Path: px/nmt/utils/logging_utils.py
# def safe_string(s):
# """Safely converts unicode and plain strings to byte strings."""
# if isinstance(s, unicode):
# try:
# s = s.encode('utf-8')
# except UnicodeDecodeError:
# s = repr(s)
# return s
#
# Path: px/selector/selector_keras.py
# FLAGS = flags.FLAGS
# class Selector(object):
# def __init__(self):
# def load(self, name):
# def save(self, name):
# def _get_checkpoint_paths(self, name):
# def _build_embedding_matrix(self):
# def _build_model(self, embedding_matrix):
# def encode_labels(self, labels):
# def encode_texts(self, texts):
# def encode_data(self, questions, original_questions, answers, labels):
# def encode_train(self, question_lists, answer_lists, score_lists):
# def train(self, questions, answers, scores):
# def eval(self, question_lists, answer_lists, score_lists):
#
# Path: px/utils/eval_utils.py
# def compute_f1_single(prediction, ground_truth):
# def compute_f1(prediction, gold_answers):
#
# Path: third_party/nmt/utils/misc_utils.py
# def check_tensorflow_version():
# def safe_exp(value):
# def print_time(s, start_time):
# def print_out(s, f=None, new_line=True):
# def print_hparams(hparams, skip_patterns=None, header=None):
# def load_hparams(model_dir):
# def maybe_parse_standard_hparams(hparams, hparams_path):
# def save_hparams(out_dir, hparams):
# def debug_tensor(s, msg=None, summarize=10):
# def add_summary(summary_writer, global_step, tag, value):
# def get_config_proto(log_device_placement=False, allow_soft_placement=True,
# num_intra_threads=0, num_inter_threads=0):
# def format_text(words):
# def format_bpe_text(symbols, delimiter=b"@@"):
# def format_spm_text(symbols):
. Output only the next line. | from px.selector import selector_keras as selector |
Given snippet: <|code_start|> If False, F1 scores are from the BiDAF environment, which uses the
intersection between predicted answer span and ground-truth spans.
Returns:
originals_and_rewrites: a nested list of strings of lengths
[batch_size, n_rewrites + 1].
answers: a nested list of strings of lengths [batch_size, n_rewrites + 1].
f1_scores: a float32 array of shape [batch_size, n_rewrites + 1]
representing the scores for each answer.
"""
assert len(set(map(len, rewrites))) == 1, (
'Not all examples have the same number of rewrites: {}'.format(rewrites))
# Prepend original question to the list of rewrites.
originals_and_rewrites = np.array([
[original_question] + list(rewrite_list)
for original_question, rewrite_list in zip(original_questions, rewrites)
])
# Expand annotations so they have the same shape of rewrites:
# [batch_size, n_rewrites + 1].
annotations = np.array([[annotation] * originals_and_rewrites.shape[1]
for annotation in annotations]).flatten()
f1_scores, _, answers = environment_fn(originals_and_rewrites.flatten(),
annotations)
assert len(annotations) == len(answers)
if token_level_f1_scores:
f1_scores = np.array([
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import codecs
import json
import os
import random
import time
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from px.nmt import environment_client
from px.nmt import reformulator
from px.nmt.utils.logging_utils import safe_string
from px.proto import reformulator_pb2
from px.selector import selector_keras as selector
from px.utils import eval_utils
from third_party.nmt.utils import misc_utils
and context:
# Path: px/nmt/environment_client.py
# def multi_call_environment(pool, stub, request, timeouts):
# def single_call_environment(args):
# def __init__(self, *args, **kwargs):
# def __setitem__(self, key, value):
# def _check_size_limit(self):
# def make_cache_key(question, docid):
# def make_environment_reward_fn(environment_server,
# timeouts=None,
# mode='squad',
# use_cache=False,
# cache_size=-1,
# env_call_parallelism=1):
# def environment_reward_fn(questions, doc_ids):
# class LimitedSizeDict(OrderedDict):
#
# Path: px/nmt/reformulator.py
# def load_hparams(hparams_file, out_dir):
# def __init__(self, hparams_path, source_prefix, out_dir,
# environment_server_address):
# def tokenize(self, questions, prefix=""):
# def detokenize(self, text):
# def reformulate(self, questions, inference_mode, trie_excludes=None):
# def train(self, sources, annotations):
# class Reformulator(object):
#
# Path: px/nmt/utils/logging_utils.py
# def safe_string(s):
# """Safely converts unicode and plain strings to byte strings."""
# if isinstance(s, unicode):
# try:
# s = s.encode('utf-8')
# except UnicodeDecodeError:
# s = repr(s)
# return s
#
# Path: px/selector/selector_keras.py
# FLAGS = flags.FLAGS
# class Selector(object):
# def __init__(self):
# def load(self, name):
# def save(self, name):
# def _get_checkpoint_paths(self, name):
# def _build_embedding_matrix(self):
# def _build_model(self, embedding_matrix):
# def encode_labels(self, labels):
# def encode_texts(self, texts):
# def encode_data(self, questions, original_questions, answers, labels):
# def encode_train(self, question_lists, answer_lists, score_lists):
# def train(self, questions, answers, scores):
# def eval(self, question_lists, answer_lists, score_lists):
#
# Path: px/utils/eval_utils.py
# def compute_f1_single(prediction, ground_truth):
# def compute_f1(prediction, gold_answers):
#
# Path: third_party/nmt/utils/misc_utils.py
# def check_tensorflow_version():
# def safe_exp(value):
# def print_time(s, start_time):
# def print_out(s, f=None, new_line=True):
# def print_hparams(hparams, skip_patterns=None, header=None):
# def load_hparams(model_dir):
# def maybe_parse_standard_hparams(hparams, hparams_path):
# def save_hparams(out_dir, hparams):
# def debug_tensor(s, msg=None, summarize=10):
# def add_summary(summary_writer, global_step, tag, value):
# def get_config_proto(log_device_placement=False, allow_soft_placement=True,
# num_intra_threads=0, num_inter_threads=0):
# def format_text(words):
# def format_bpe_text(symbols, delimiter=b"@@"):
# def format_spm_text(symbols):
which might include code, classes, or functions. Output only the next line. | eval_utils.compute_f1_single( |
Given snippet: <|code_start|> for epoch in range(FLAGS.epochs):
for batch_id, (questions_batch, annotations_batch) in enumerate(
batch(questions, annotations, FLAGS.batch_size_train)):
# Run eval every num_steps_per_eval batches.
if global_step % FLAGS.num_steps_per_eval is 0:
if FLAGS.debug:
print('Running eval...')
eval_start_time = time.time()
if not FLAGS.enable_selector_training:
eval_f1_avg = _run_reformulator_eval(
dev_questions, dev_annotations, reformulator_instance,
environment_fn, FLAGS.batch_size_eval)
else:
eval_f1_avg = _run_eval_with_selector(
questions=dev_questions,
annotations=dev_annotations,
docid_2_answer=dev_docid_2_answer,
reformulator_instance=reformulator_instance,
selector_model=selector_model,
batch_size=FLAGS.batch_size_eval,
environment_fn=eval_environment_fn)
# Correct the average F1 score for deleted datapoints in the SearchQA
# dataset.
if FLAGS.mode == 'searchqa':
eval_f1_avg = _correct_searchqa_score(eval_f1_avg, dataset='dev')
eval_time = time.time() - eval_start_time
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import codecs
import json
import os
import random
import time
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from px.nmt import environment_client
from px.nmt import reformulator
from px.nmt.utils.logging_utils import safe_string
from px.proto import reformulator_pb2
from px.selector import selector_keras as selector
from px.utils import eval_utils
from third_party.nmt.utils import misc_utils
and context:
# Path: px/nmt/environment_client.py
# def multi_call_environment(pool, stub, request, timeouts):
# def single_call_environment(args):
# def __init__(self, *args, **kwargs):
# def __setitem__(self, key, value):
# def _check_size_limit(self):
# def make_cache_key(question, docid):
# def make_environment_reward_fn(environment_server,
# timeouts=None,
# mode='squad',
# use_cache=False,
# cache_size=-1,
# env_call_parallelism=1):
# def environment_reward_fn(questions, doc_ids):
# class LimitedSizeDict(OrderedDict):
#
# Path: px/nmt/reformulator.py
# def load_hparams(hparams_file, out_dir):
# def __init__(self, hparams_path, source_prefix, out_dir,
# environment_server_address):
# def tokenize(self, questions, prefix=""):
# def detokenize(self, text):
# def reformulate(self, questions, inference_mode, trie_excludes=None):
# def train(self, sources, annotations):
# class Reformulator(object):
#
# Path: px/nmt/utils/logging_utils.py
# def safe_string(s):
# """Safely converts unicode and plain strings to byte strings."""
# if isinstance(s, unicode):
# try:
# s = s.encode('utf-8')
# except UnicodeDecodeError:
# s = repr(s)
# return s
#
# Path: px/selector/selector_keras.py
# FLAGS = flags.FLAGS
# class Selector(object):
# def __init__(self):
# def load(self, name):
# def save(self, name):
# def _get_checkpoint_paths(self, name):
# def _build_embedding_matrix(self):
# def _build_model(self, embedding_matrix):
# def encode_labels(self, labels):
# def encode_texts(self, texts):
# def encode_data(self, questions, original_questions, answers, labels):
# def encode_train(self, question_lists, answer_lists, score_lists):
# def train(self, questions, answers, scores):
# def eval(self, question_lists, answer_lists, score_lists):
#
# Path: px/utils/eval_utils.py
# def compute_f1_single(prediction, ground_truth):
# def compute_f1(prediction, gold_answers):
#
# Path: third_party/nmt/utils/misc_utils.py
# def check_tensorflow_version():
# def safe_exp(value):
# def print_time(s, start_time):
# def print_out(s, f=None, new_line=True):
# def print_hparams(hparams, skip_patterns=None, header=None):
# def load_hparams(model_dir):
# def maybe_parse_standard_hparams(hparams, hparams_path):
# def save_hparams(out_dir, hparams):
# def debug_tensor(s, msg=None, summarize=10):
# def add_summary(summary_writer, global_step, tag, value):
# def get_config_proto(log_device_placement=False, allow_soft_placement=True,
# num_intra_threads=0, num_inter_threads=0):
# def format_text(words):
# def format_bpe_text(symbols, delimiter=b"@@"):
# def format_spm_text(symbols):
which might include code, classes, or functions. Output only the next line. | misc_utils.add_summary( |
Continue the code snippet: <|code_start|> annot_dataset = None
ctx_dataset = None
if use_placeholders:
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
src_dataset = tf.data.Dataset.from_tensor_slices(src_placeholder)
tgt_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
tgt_dataset = tf.data.Dataset.from_tensor_slices(tgt_placeholder)
if hparams.use_rl:
annot_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
annot_dataset = tf.data.Dataset.from_tensor_slices(annot_placeholder)
else:
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
ctx_file = None
if hparams.ctx is not None:
ctx_file = "%s.%s" % (hparams.train_prefix, hparams.ctx)
src_dataset = tf.data.TextLineDataset(src_file)
tgt_dataset = tf.data.TextLineDataset(tgt_file)
if hparams.train_annotations is not None:
annot_dataset = tf.data.TextLineDataset(hparams.train_annotations)
if ctx_file is not None:
ctx_dataset = tf.data.TextLineDataset(ctx_file)
skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)
<|code_end|>
. Use current file imports:
import collections
import os
import six
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from third_party.nmt.utils import misc_utils as utils
from px.nmt.utils import iterator_utils
from px.nmt.utils import vocab_utils
and context (classes, functions, or code) from other files:
# Path: third_party/nmt/utils/misc_utils.py
# def check_tensorflow_version():
# def safe_exp(value):
# def print_time(s, start_time):
# def print_out(s, f=None, new_line=True):
# def print_hparams(hparams, skip_patterns=None, header=None):
# def load_hparams(model_dir):
# def maybe_parse_standard_hparams(hparams, hparams_path):
# def save_hparams(out_dir, hparams):
# def debug_tensor(s, msg=None, summarize=10):
# def add_summary(summary_writer, global_step, tag, value):
# def get_config_proto(log_device_placement=False, allow_soft_placement=True,
# num_intra_threads=0, num_inter_threads=0):
# def format_text(words):
# def format_bpe_text(symbols, delimiter=b"@@"):
# def format_spm_text(symbols):
#
# Path: px/nmt/utils/iterator_utils.py
# class BatchedInput(
# collections.namedtuple(
# "BatchedInput", ("initializer", "source_string", "source",
# "target_input", "target_output", "weights", "context",
# "annotation", "trie_exclude", "source_sequence_length",
# "target_sequence_length", "context_sequence_length"))):
# def get_infer_iterator(hparams,
# src_dataset,
# src_vocab_table,
# batch_size,
# eos,
# ctx_dataset=None,
# annot_dataset=None,
# trie_exclude_dataset=None,
# src_max_len=None,
# tgt_vocab_table=None):
# def batching_func(x):
# def get_iterator(hparams,
# src_dataset,
# tgt_dataset,
# src_vocab_table,
# tgt_vocab_table,
# batch_size,
# sos,
# eos,
# random_seed,
# num_buckets,
# wgt_dataset=None,
# ctx_dataset=None,
# annot_dataset=None,
# src_max_len=None,
# tgt_max_len=None,
# num_parallel_calls=4,
# output_buffer_size=None,
# skip_count=None,
# num_shards=1,
# shard_index=0,
# reshuffle_each_iteration=True):
# def batching_func(x):
# def key_func(unused_1, unused_2, unused_3, unused_4, unused_5, unused_6,
# unused_7, unused_8, src_len, tgt_len, unused_9):
# def reduce_func(unused_key, windowed_data):
#
# Path: px/nmt/utils/vocab_utils.py
# UNK = "<unk>"
# SOS = "<s>"
# EOS = "</s>"
# UNK_ID = 0
# def load_vocab(vocab_file):
# def check_vocab(vocab_file,
# out_dir,
# check_special_token=True,
# sos=None,
# eos=None,
# unk=None,
# context_delimiter=None):
# def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):
# def load_embed_txt(embed_file):
. Output only the next line. | iterator = iterator_utils.get_iterator( |
Predict the next line after this snippet: <|code_start|> "ExtraArgs",
("single_cell_fn", "model_device_fn", "attention_mechanism_fn"))):
pass
class TrainModel(
collections.namedtuple(
"TrainModel",
("graph", "model", "iterator", "src_placeholder", "tgt_placeholder",
"annot_placeholder", "skip_count_placeholder"))):
pass
def create_train_model(model_creator,
hparams,
scope=None,
num_workers=1,
jobid=0,
graph=None,
extra_args=None,
trie=None,
use_placeholders=False):
"""Create train graph, model, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
if not graph:
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "train"):
<|code_end|>
using the current file's imports:
import collections
import os
import six
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from third_party.nmt.utils import misc_utils as utils
from px.nmt.utils import iterator_utils
from px.nmt.utils import vocab_utils
and any relevant context from other files:
# Path: third_party/nmt/utils/misc_utils.py
# def check_tensorflow_version():
# def safe_exp(value):
# def print_time(s, start_time):
# def print_out(s, f=None, new_line=True):
# def print_hparams(hparams, skip_patterns=None, header=None):
# def load_hparams(model_dir):
# def maybe_parse_standard_hparams(hparams, hparams_path):
# def save_hparams(out_dir, hparams):
# def debug_tensor(s, msg=None, summarize=10):
# def add_summary(summary_writer, global_step, tag, value):
# def get_config_proto(log_device_placement=False, allow_soft_placement=True,
# num_intra_threads=0, num_inter_threads=0):
# def format_text(words):
# def format_bpe_text(symbols, delimiter=b"@@"):
# def format_spm_text(symbols):
#
# Path: px/nmt/utils/iterator_utils.py
# class BatchedInput(
# collections.namedtuple(
# "BatchedInput", ("initializer", "source_string", "source",
# "target_input", "target_output", "weights", "context",
# "annotation", "trie_exclude", "source_sequence_length",
# "target_sequence_length", "context_sequence_length"))):
# def get_infer_iterator(hparams,
# src_dataset,
# src_vocab_table,
# batch_size,
# eos,
# ctx_dataset=None,
# annot_dataset=None,
# trie_exclude_dataset=None,
# src_max_len=None,
# tgt_vocab_table=None):
# def batching_func(x):
# def get_iterator(hparams,
# src_dataset,
# tgt_dataset,
# src_vocab_table,
# tgt_vocab_table,
# batch_size,
# sos,
# eos,
# random_seed,
# num_buckets,
# wgt_dataset=None,
# ctx_dataset=None,
# annot_dataset=None,
# src_max_len=None,
# tgt_max_len=None,
# num_parallel_calls=4,
# output_buffer_size=None,
# skip_count=None,
# num_shards=1,
# shard_index=0,
# reshuffle_each_iteration=True):
# def batching_func(x):
# def key_func(unused_1, unused_2, unused_3, unused_4, unused_5, unused_6,
# unused_7, unused_8, src_len, tgt_len, unused_9):
# def reduce_func(unused_key, windowed_data):
#
# Path: px/nmt/utils/vocab_utils.py
# UNK = "<unk>"
# SOS = "<s>"
# EOS = "</s>"
# UNK_ID = 0
# def load_vocab(vocab_file):
# def check_vocab(vocab_file,
# out_dir,
# check_special_token=True,
# sos=None,
# eos=None,
# unk=None,
# context_delimiter=None):
# def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):
# def load_embed_txt(embed_file):
. Output only the next line. | src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables( |
Here is a snippet: <|code_start|> xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi] # process tokens
# given xi, add chars
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
# get words
qi = word_tokenize(qa['question'])
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
<|code_end|>
. Write the next line using the current file imports:
import argparse
import json
import os
import tensorflow as tf
import nltk
from collections import Counter
from tqdm import tqdm
from third_party.bi_att_flow.squad.utils import get_word_span, get_word_idx, process_tokens
from my.corenlp_interface import CoreNLPInterface
and context from other files:
# Path: third_party/bi_att_flow/squad/utils.py
# def get_word_span(context, wordss, start, stop):
# spanss = get_2d_spans(context, wordss)
# idxs = []
# for sent_idx, spans in enumerate(spanss):
# for word_idx, span in enumerate(spans):
# if not (stop <= span[0] or start >= span[1]):
# idxs.append((sent_idx, word_idx))
#
# assert len(idxs) > 0, "{} {} {} {}".format(context, spanss, start, stop)
# return idxs[0], (idxs[-1][0], idxs[-1][1] + 1)
#
# def get_word_idx(context, wordss, idx):
# spanss = get_2d_spans(context, wordss)
# return spanss[idx[0]][idx[1]][0]
#
# def process_tokens(temp_tokens):
# tokens = []
# for token in temp_tokens:
# l = (u"-", u"\u2212", u"\u2014", u"\u2013", u"/", u"~", '"', u"'",
# u"\u201C", u"\u2019", u"\u201D", u"\u2018", u"\u00B0")
# # \u2013 is en-dash. Used for number to number
# # l = ("-", "\u2212", "\u2014", "\u2013")
# # l = ("\u2013",)
# tokens.extend(re.split(u"([{}])".format(u"".join(l)), token))
# return tokens
, which may include functions, classes, or code. Output only the next line. | yi0, yi1 = get_word_span(context, xi, answer_start, answer_stop) |
Based on the snippet: <|code_start|>
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
# get words
qi = word_tokenize(qa['question'])
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
yi0, yi1 = get_word_span(context, xi, answer_start, answer_stop)
# yi0 = answer['answer_word_start'] or [0, 0]
# yi1 = answer['answer_word_stop'] or [0, 1]
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
import json
import os
import tensorflow as tf
import nltk
from collections import Counter
from tqdm import tqdm
from third_party.bi_att_flow.squad.utils import get_word_span, get_word_idx, process_tokens
from my.corenlp_interface import CoreNLPInterface
and context (classes, functions, sometimes code) from other files:
# Path: third_party/bi_att_flow/squad/utils.py
# def get_word_span(context, wordss, start, stop):
# spanss = get_2d_spans(context, wordss)
# idxs = []
# for sent_idx, spans in enumerate(spanss):
# for word_idx, span in enumerate(spans):
# if not (stop <= span[0] or start >= span[1]):
# idxs.append((sent_idx, word_idx))
#
# assert len(idxs) > 0, "{} {} {} {}".format(context, spanss, start, stop)
# return idxs[0], (idxs[-1][0], idxs[-1][1] + 1)
#
# def get_word_idx(context, wordss, idx):
# spanss = get_2d_spans(context, wordss)
# return spanss[idx[0]][idx[1]][0]
#
# def process_tokens(temp_tokens):
# tokens = []
# for token in temp_tokens:
# l = (u"-", u"\u2212", u"\u2014", u"\u2013", u"/", u"~", '"', u"'",
# u"\u201C", u"\u2019", u"\u201D", u"\u2018", u"\u00B0")
# # \u2013 is en-dash. Used for number to number
# # l = ("-", "\u2212", "\u2014", "\u2013")
# # l = ("\u2013",)
# tokens.extend(re.split(u"([{}])".format(u"".join(l)), token))
# return tokens
. Output only the next line. | i0 = get_word_idx(context, xi, yi0) |
Here is a snippet: <|code_start|> raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir,
'{}-v1.1.json'.format(data_type))
source_data = json.load(tf.gfile.Open(source_path, 'r'))
q, cq, y, rx, rcx, ids, idxs = [], [], [], [], [], [], []
cy = []
x, cx = [], []
answerss = []
p = []
word_counter, char_counter, lower_word_counter = Counter(), Counter(
), Counter()
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for ai, article in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
xp, cxp = [], []
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for pi, para in enumerate(article['paragraphs']):
# wordss
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
<|code_end|>
. Write the next line using the current file imports:
import argparse
import json
import os
import tensorflow as tf
import nltk
from collections import Counter
from tqdm import tqdm
from third_party.bi_att_flow.squad.utils import get_word_span, get_word_idx, process_tokens
from my.corenlp_interface import CoreNLPInterface
and context from other files:
# Path: third_party/bi_att_flow/squad/utils.py
# def get_word_span(context, wordss, start, stop):
# spanss = get_2d_spans(context, wordss)
# idxs = []
# for sent_idx, spans in enumerate(spanss):
# for word_idx, span in enumerate(spans):
# if not (stop <= span[0] or start >= span[1]):
# idxs.append((sent_idx, word_idx))
#
# assert len(idxs) > 0, "{} {} {} {}".format(context, spanss, start, stop)
# return idxs[0], (idxs[-1][0], idxs[-1][1] + 1)
#
# def get_word_idx(context, wordss, idx):
# spanss = get_2d_spans(context, wordss)
# return spanss[idx[0]][idx[1]][0]
#
# def process_tokens(temp_tokens):
# tokens = []
# for token in temp_tokens:
# l = (u"-", u"\u2212", u"\u2014", u"\u2013", u"/", u"~", '"', u"'",
# u"\u201C", u"\u2019", u"\u201D", u"\u2018", u"\u00B0")
# # \u2013 is en-dash. Used for number to number
# # l = ("-", "\u2212", "\u2014", "\u2013")
# # l = ("\u2013",)
# tokens.extend(re.split(u"([{}])".format(u"".join(l)), token))
# return tokens
, which may include functions, classes, or code. Output only the next line. | xi = [process_tokens(tokens) for tokens in xi] # process tokens |
Given snippet: <|code_start|> """"Constructor for the BiDAF server."""
data_dir = kwargs.pop('squad_data_dir', None)
shared_file = kwargs.pop('bidaf_shared_file', None)
model_dir = kwargs.pop('bidaf_model_dir', None)
load_test = kwargs.pop('load_test', False)
load_impossible_questions = kwargs.pop('load_impossible_questions', False)
debug_mode = kwargs.pop('debug_mode', False)
self.debug_mode = debug_mode
self._InitializeEnvironment(
data_dir=data_dir,
shared_file=shared_file,
model_dir=model_dir,
load_test=load_test,
load_impossible_questions=load_impossible_questions,
debug_mode=debug_mode)
def _InitializeEnvironment(self, data_dir, shared_file, model_dir, load_test,
load_impossible_questions, debug_mode):
"""Initilizes the BiDAF model environment.
Args:
data_dir: Directory containing preprocessed SQuAD data.
shared_file: Path to shared data generated at training time.
model_dir: Directory contining parameters of a pre-trained BiDAF
model.
load_test: Whether the test set should be loaded as well.
load_impossible_questions: Whether info about impossibility of questions
should be loaded.
debug_mode: Whether to log debug information.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from concurrent import futures
from absl import app
from absl import flags
from absl import logging
from px.environments import bidaf
from px.proto import aqa_pb2
from px.proto import aqa_pb2_grpc
import time
import grpc
and context:
# Path: px/environments/bidaf.py
# class BidafEnvironment(object):
# def __init__(self,
# data_dir,
# shared_path,
# model_dir,
# docid_separator='###',
# debug_mode=False,
# load_test=False,
# load_impossible_questions=False):
# def _ReadImpossiblities(self, dataset, data_dir):
# def _WordTokenize(self, text):
# def _PreprocessQaData(self, questions, document_ids):
# def IsImpossible(self, document_id):
# def GetAnswers(self, questions, document_ids):
which might include code, classes, or functions. Output only the next line. | self._environment = bidaf.BidafEnvironment( |
Here is a snippet: <|code_start|> out = defaultdict(list)
for key, val in self.data.items():
out[key].extend(val[idx] for idx in idxs)
return out
elif isinstance(self.data, Data):
return self.data.get_by_idxs(idxs)
raise Exception()
def get_batches(self,
batch_size,
num_batches=None,
shuffle=False,
cluster=False):
"""
:param batch_size:
:param num_batches:
:param shuffle:
:param cluster: cluster examples by their lengths; this might give
performance boost (i.e. faster training).
:return:
"""
num_batches_per_epoch = int(math.ceil(self.num_examples / batch_size))
if num_batches is None:
num_batches = num_batches_per_epoch
num_epochs = int(math.ceil(num_batches / num_batches_per_epoch))
if shuffle:
random_idxs = random.sample(self.valid_idxs, len(self.valid_idxs))
if cluster:
sorted_idxs = sorted(random_idxs, key=self._sort_key)
<|code_end|>
. Write the next line using the current file imports:
import json
import os
import random
import itertools
import math
import numpy as np
import tensorflow as tf
from collections import defaultdict
from third_party.bi_att_flow.my.tensorflow.general import grouper
from third_party.bi_att_flow.my.utils import index
and context from other files:
# Path: third_party/bi_att_flow/my/tensorflow/general.py
# def grouper(iterable, n, fillvalue=None, shorten=False, num_groups=None):
# args = [iter(iterable)] * n
# out = zip_longest(*args, fillvalue=fillvalue)
# out = list(out)
# if num_groups is not None:
# default = (fillvalue,) * n
# assert isinstance(num_groups, int)
# out = list(
# each
# for each, _ in zip_longest(out, range(num_groups), fillvalue=default))
# if shorten:
# assert fillvalue is None
# out = (tuple(e for e in each if e is not None) for each in out)
# return out
#
# Path: third_party/bi_att_flow/my/utils.py
# def index(l, i):
# return index(l[i[0]], i[1:]) if len(i) > 1 else l[i[0]]
, which may include functions, classes, or code. Output only the next line. | sorted_grouped = lambda: list(grouper(sorted_idxs, batch_size)) |
Predict the next line for this snippet: <|code_start|> num_batches_per_epoch = int(math.ceil(self.num_examples / batch_size))
if num_batches is None:
num_batches = num_batches_per_epoch
num_epochs = int(math.ceil(num_batches / num_batches_per_epoch))
if shuffle:
random_idxs = random.sample(self.valid_idxs, len(self.valid_idxs))
if cluster:
sorted_idxs = sorted(random_idxs, key=self._sort_key)
sorted_grouped = lambda: list(grouper(sorted_idxs, batch_size))
grouped = lambda: random.sample(sorted_grouped(), num_batches_per_epoch)
else:
random_grouped = lambda: list(grouper(random_idxs, batch_size))
grouped = random_grouped
else:
raw_grouped = lambda: list(grouper(self.valid_idxs, batch_size))
grouped = raw_grouped
batch_idx_tuples = itertools.chain.from_iterable(grouped()
for _ in range(num_epochs))
for _ in range(num_batches):
batch_idxs = tuple(i for i in next(batch_idx_tuples) if i is not None)
batch_data = self.get_by_idxs(batch_idxs)
shared_batch_data = {}
for key, val in batch_data.items():
if key.startswith('*'):
if key == '*cx' and 'cx' not in self.shared:
continue
assert self.shared is not None
shared_key = key[1:]
shared_batch_data[shared_key] = [
<|code_end|>
with the help of current file imports:
import json
import os
import random
import itertools
import math
import numpy as np
import tensorflow as tf
from collections import defaultdict
from third_party.bi_att_flow.my.tensorflow.general import grouper
from third_party.bi_att_flow.my.utils import index
and context from other files:
# Path: third_party/bi_att_flow/my/tensorflow/general.py
# def grouper(iterable, n, fillvalue=None, shorten=False, num_groups=None):
# args = [iter(iterable)] * n
# out = zip_longest(*args, fillvalue=fillvalue)
# out = list(out)
# if num_groups is not None:
# default = (fillvalue,) * n
# assert isinstance(num_groups, int)
# out = list(
# each
# for each, _ in zip_longest(out, range(num_groups), fillvalue=default))
# if shorten:
# assert fillvalue is None
# out = (tuple(e for e in each if e is not None) for each in out)
# return out
#
# Path: third_party/bi_att_flow/my/utils.py
# def index(l, i):
# return index(l[i[0]], i[1:]) if len(i) > 1 else l[i[0]]
, which may contain function names, class names, or code. Output only the next line. | index(self.shared[shared_key], each) for each in val |
Using the snippet: <|code_start|>
class MasterConfig(UserDict):
"""compile configuration data"""
def __init__(self, changes={}):
UserDict.__init__(self)
self.network_config=BikeNetworkConfigDirectFromCube()
self.outer_network_config=OuterNetworkConfig()
self.output_config=BikeOutputConfig()
self.choice_set_config=BikeChoiceSetConfig()
<|code_end|>
, determine the next line of code. You have imports:
import os
from UserDict import UserDict
from bike_model.config.bike_network_config_direct_from_cube import BikeNetworkConfigDirectFromCube
from bike_model.config.outer_network_config import OuterNetworkConfig
from bike_model.config.bike_output_config import BikeOutputConfig
from bike_model.config.bike_choice_set_config import BikeChoiceSetConfig
from route_model.config.assign_config import AssignConfig
and context (class names, function names, or code) available:
# Path: route_model/config/assign_config.py
# class AssignConfig(UserDict):
# """compile configuration data"""
#
# def __init__(self, changes={}):
# UserDict.__init__(self)
#
# """how to project outer trips to the county line"""
# self['max_inner']=981 #maximum zone id for SF county
# self['outer_importance_conditions']=[(982,1348),(2403,2455)] #zones with non-negligible trips to SF
# self['boundary_condition']='MTYPE' #network variable that indicates links which are inside SF
# self['outer_impedance']="DISTANCE" #netowrk variable to minimize when projecting trips to county line
#
# """trip matrices to assign"""
# self['matrix_filenames']=[r"X:\Projects\BikeModel\data\bike_model\input\matrix\am.csv",
# r"X:\Projects\BikeModel\data\bike_model\input\matrix\md.csv",
# #r"X:\Projects\BikeModel\data\bike_model\input\matrix\pm.csv",
# #r"X:\Projects\BikeModel\data\bike_model\input\matrix\ev.csv",
# #r"X:\Projects\BikeModel\data\bike_model\input\matrix\ea.csv"
# ]
# self['load_names']=['BIKE_AM','BIKE_PM']#'BIKE_MD','BIKE_EV','BIKE_EA'
#
# """override bound_file from choice_set_config"""
# self['bound_file']=r'X:\Projects\BikeModel\data\bike_model\input\bound\BoundPredict.csv'
#
# """use x times as many random seeds as needed for each source"""
# self['inverted_multiple']=2
#
# """path storage"""
# self['pickle_path']='C:/pickle_path' #directory to store path files
# self['delete_paths']=False #delete the paths after assignment is complete?
# self['load_paths_from_files']=False #use already generated paths rather than starting anew?
#
# """how to trace variables for utility function"""
# self['variables']=['DISTANCE',
# 'B1',
# 'B2',
# 'B3',
# 'TPER_RISE',
# 'WRONG_WAY',
# 'TURN']
# self['aliases']=['DISTANCE',
# 'BIKE_PCT_1',
# 'BIKE_PCT_2',
# 'BIKE_PCT_3',
# 'AVG_RISE',
# 'WRONG_WAY',
# 'TURNS_P_MI']
# self['weights']=[None,
# 'DISTANCE',
# 'DISTANCE',
# 'DISTANCE',
# 'DISTANCE',
# 'DISTANCE',
# None]
# self['trace_funs']=['sum',
# 'avg',
# 'avg',
# 'avg',
# 'avg',
# 'avg',
# 'sum']
# self['final_funs']=[None,None,None,None,None,None,None]
# self['path_size']=True
# self['path_size_log']=True
# self['path_size_alias']='lnpathsize'
# self['divisors']={'TURNS_P_MI':'DISTANCE'} # calculate this alias by dividing by this variable
#
# """fixed coefficients"""
# self['fixed_coefficients']=['DISTANCE','TURNS_P_MI','WRONG_WAY','BIKE_PCT_1','BIKE_PCT_2','BIKE_PCT_3','AVG_RISE','lnpathsize']
# self['alpha']=[-1.05,-0.21,-13.30,1.89,2.15,0.35,-154.0,1.0]
#
# """random coefficients"""
# self['use_random_coefficients']=False
# self['random_coefficients']=[]#['BIKE_PCT_1','BIKE_PCT_2','BIKE_PCT_3','AVG_RISE']
# self['random_transformations']=[]#[idenfun,idenfun,idenfun,idenfun]
# self['latent_mu']=[]#[1.82,2.49,0.76,-2.22]
# self['latent_sigma']=array([])
# """array( [ [24.95, 0., 6.58, 0. ],
# [0., 5.45, 2.91, 0. ],
# [0., 0., 4.19, 0. ],
# [0., 0., 0., 3.85 ] ] )"""
# self['mixing_granularity']=0.2 # number of trips to simulate as an individual
#
# """for debugging code"""
# self['test_small_matrix']=True
#
# for key in changes:
# self[key]=changes[key]
. Output only the next line. | self.assign_config=AssignConfig() |
Given the following code snippet before the placeholder: <|code_start|>
def path_size(G,choice_sets,choice_set_config):
result=[]
config=choice_set_config
the_network=G
if G.orig_network is not None:
the_network=G.orig_network
for path_list in choice_sets:
temp=[]
hashes=[]; lengths=[]
for cur_path in path_list:
use_path=cur_path
if G.orig_network is not None:
<|code_end|>
, predict the next line using imports from the current file:
from route_model.misc import get_orig_path
and context including class names, function names, and sometimes code from other files:
# Path: route_model/misc.py
# def get_orig_path(dual_path):
#
# orig_path=[dual_path[1][0]]
# for i in range(1,len(dual_path)-1):
# orig_path.append(dual_path[i][1])
#
# return orig_path
. Output only the next line. | use_path=get_orig_path(cur_path) |
Continue the code snippet: <|code_start|>
def param_worker(work_queue,done_queue,network,trip_data,master_config,trip_times,ext_bound,trip_ids):
this_network=network.copy()
#initialize link randomizer
link_randomizer=None
if master_config.choice_set_config['method']=='doubly_stochastic':
link_randomizer=master_config.choice_set_config.get_link_randomizer(network,master_config)
for kappa, sigma in iter(work_queue.get,'STOP'):
link_randomizer.set_scl(kappa)
link_randomizer.set_par(link_randomizer['zero']['par'],[1-sigma,1+sigma])
link_randomizer.update_denoms()
random.seed(0)
idx=0
overlap_sum=0
for trip_id in trip_ids:
idx=idx+1
print time.asctime(time.localtime()), "-", current_process().name, "-",idx, ", k: ", kappa, ", s: ", sigma ,". trip_id: ", trip_id[0], ", sub_trip: ", trip_id[1], ", stage: ", trip_id[2]
<|code_end|>
. Use current file imports:
import os, csv, sys
import route_model.misc as rm_misc
import route_model.input as rm_input
import route_model.output as rm_output
import route_model.choice_set.ds_generate as ds
import time
import random
import numpy
from optparse import OptionParser
from transport_network import TransportNetwork, create_pseudo_dual
from route_model.choice_set.generate_choice_set import generate_choice_set
from multiprocessing import Lock, Process, current_process, Queue
and context (classes, functions, or code) from other files:
# Path: route_model/choice_set/generate_choice_set.py
# def generate_choice_set(G,chosen,choice_set_config,link_randomizer=None,time_dependent_relation=None,trip_time=None,ext_bound=None):
#
# config=choice_set_config
#
# if config['method']=='link_elimination':
# return le_generate(G,chosen,config)
#
# if config['method']=='doubly_stochastic':
# return ds_generate(G,chosen,config,link_randomizer,ext_bound,time_dependent_relation,trip_time)
. Output only the next line. | the_set,chosen_overlap=generate_choice_set(this_network,trip_data[trip_id],master_config.choice_set_config,link_randomizer,master_config['time_dependent_relation'],trip_times[trip_id[0]],ext_bound) |
Predict the next line after this snippet: <|code_start|>
def generate_choice_set(G,chosen,choice_set_config,link_randomizer=None,time_dependent_relation=None,trip_time=None,ext_bound=None):
config=choice_set_config
if config['method']=='link_elimination':
<|code_end|>
using the current file's imports:
import random
from route_model.choice_set.link_elimination import le_generate
from route_model.choice_set.ds_generate import ds_generate
and any relevant context from other files:
# Path: route_model/choice_set/link_elimination.py
# def le_generate(G,chosen,choice_set_config):
#
# master_set=link_elimination(G,chosen[0],chosen[-1],G.euclid,G.config['dist_var'],choice_set_config['master_size'])
# chosen_overlap=calc_chosen_overlap(G,chosen,master_set,choice_set_config)
# sample_set=random.sample(master_set,choice_set_config['consider_size'])
# result_set=[chosen]
# chosen_seen=False
# for path in sample_set:
# if chosen_seen==False:
# if path==result_set[0]:
# chosen_seen=True
# if not choice_set_config['allow_duplicates_of_chosen_route']:
# continue
# result_set.append(path)
# if chosen_seen==False and not choice_set_config['allow_duplicates_of_chosen_route']:
# result_set.pop()
# return (result_set,chosen_overlap)
#
# Path: route_model/choice_set/ds_generate.py
# def ds_generate(G,chosen,choice_set_config,link_randomizer,ext_bound,time_dependent_relation=None,trip_time=None):
#
# if choice_set_config['allow_duplicates_of_chosen_route']:
# master_set=ds_generate_master(G,chosen,choice_set_config,link_randomizer,time_dependent_relation,trip_time,ext_bound)
# overlap=calc_chosen_overlap(G,chosen,master_set,choice_set_config)
# filtered_set=filter_master(G,None,master_set[1:],choice_set_config)
# return ([chosen]+filtered_set,overlap)
# else:
# return filter_master(G,chosen,ds_generate_master(G,chosen,choice_set_config,link_randomizer,time_dependent_relation,trip_time,ext_bound),choice_set_config)
. Output only the next line. | return le_generate(G,chosen,config) |
Next line prediction: <|code_start|>
def generate_choice_set(G,chosen,choice_set_config,link_randomizer=None,time_dependent_relation=None,trip_time=None,ext_bound=None):
config=choice_set_config
if config['method']=='link_elimination':
return le_generate(G,chosen,config)
if config['method']=='doubly_stochastic':
<|code_end|>
. Use current file imports:
(import random
from route_model.choice_set.link_elimination import le_generate
from route_model.choice_set.ds_generate import ds_generate)
and context including class names, function names, or small code snippets from other files:
# Path: route_model/choice_set/link_elimination.py
# def le_generate(G,chosen,choice_set_config):
#
# master_set=link_elimination(G,chosen[0],chosen[-1],G.euclid,G.config['dist_var'],choice_set_config['master_size'])
# chosen_overlap=calc_chosen_overlap(G,chosen,master_set,choice_set_config)
# sample_set=random.sample(master_set,choice_set_config['consider_size'])
# result_set=[chosen]
# chosen_seen=False
# for path in sample_set:
# if chosen_seen==False:
# if path==result_set[0]:
# chosen_seen=True
# if not choice_set_config['allow_duplicates_of_chosen_route']:
# continue
# result_set.append(path)
# if chosen_seen==False and not choice_set_config['allow_duplicates_of_chosen_route']:
# result_set.pop()
# return (result_set,chosen_overlap)
#
# Path: route_model/choice_set/ds_generate.py
# def ds_generate(G,chosen,choice_set_config,link_randomizer,ext_bound,time_dependent_relation=None,trip_time=None):
#
# if choice_set_config['allow_duplicates_of_chosen_route']:
# master_set=ds_generate_master(G,chosen,choice_set_config,link_randomizer,time_dependent_relation,trip_time,ext_bound)
# overlap=calc_chosen_overlap(G,chosen,master_set,choice_set_config)
# filtered_set=filter_master(G,None,master_set[1:],choice_set_config)
# return ([chosen]+filtered_set,overlap)
# else:
# return filter_master(G,chosen,ds_generate_master(G,chosen,choice_set_config,link_randomizer,time_dependent_relation,trip_time,ext_bound),choice_set_config)
. Output only the next line. | return ds_generate(G,chosen,config,link_randomizer,ext_bound,time_dependent_relation,trip_time) |
Given the code snippet: <|code_start|> self['tolerance']=0.01 # percentage threshold to stop binary search when extracting prior distribution
self['log_prior']=True # use log-uniform prior? (False means uniform prior)
"""variable configuration"""
self['variables']=['DISTANCE','BNE1','BNE2','BNE3','WRONG_WAY','TPER_RISE','TURN'] #network variables to use in choice set generation
self['ref']='DISTANCE' #reference variable (coef fixed to 1)
self['ranges']={'BNE1':[0.0000001,1000.0],'BNE2':[0.0000001,1000.0],'BNE3':[0.0000001,1000.0],'TPER_RISE':[0.00001,100000.0],'WRONG_WAY':[0.0000001,1000.0],'TURN':[0.0000001,1000.0]} #large initial boundary intervals
self['weights']={'BNE1':'DISTANCE','BNE2':'DISTANCE','BNE3':'DISTANCE','TPER_RISE':'DISTANCE','WRONG_WAY':'DISTANCE'} #to multiply each link attribute value by
self['median_compare']=['TURN'] #extract these coefficients with others at their medians (must appear last in self['variables'])
self['randomize_compare']=[] #extract these coefficients with link randomization (must appear last in self['variables'])
"""generate noisy output?"""
self['verbose']=True
"""speed up by randomizing whole network in outer loop and performing searches in inner loop using only comparisons/additions"""
self['inverted']=True # should we?
self['inverted_N_attr']=4 # when link attributes were randomized individually, this controlled the number of link randomizations, now just set it to the number of processors
self['inverted_N_param']=5 # when link attributes were randomized individually, this controlled the number of parameters to draw per link randomization, now just set it to the number of parameters desired divided by the number of processors (e.g. w/; N_attr=4 processors x N_param=5 == 20 random parameters)
self['inverted_nested']=False # when link attributes were randomized individually, True would nest the attribute and parameter randomization loops, now just leave set to False
"""link randomization parameters"""
self['randomize_after']=True # apply link randomization after generalized cost is calcluated rather than to attributes individually? Leave set to True.
self['randomize_after_dev']=0.4 # link randomization scale parameter
self['randomize_after_iters']=3 # number of link randomizations per coefficient (e.g. 20 random parameters x 3 randomize_after_iters == 60 max choice set size)
"""refrain from filtering out routes that overlap too much with chosen route (used to analyze choice set quality)"""
self['allow_duplicates_of_chosen_route']=False
"""deprecated"""
#parameters used to randomize link attributes individually
<|code_end|>
, generate the next line using the imports in this file:
import os, time, string
import random
import route_model.misc as rm_misc
from UserDict import UserDict
from math import sqrt, exp
from numpy import *
from route_model.choice_set.beta_unif_randomizer import BetaUnifRandomizer
and context (functions, classes, or occasionally code) from other files:
# Path: route_model/choice_set/beta_unif_randomizer.py
# class BetaUnifRandomizer(LinkRandomizer):
#
# def __init__(self,G,variables,no_randomize,b,s,scl):
#
# LinkRandomizer.__init__(self)
#
# self['variables']=variables
# self['no_randomize']=no_randomize
#
# self.set_scl(scl)
# self.set_fam(nr.beta,nr.uniform)
# self.set_par([1,b],[1-s,1+s])
#
# self.get_link_distribution(G)
#
# self.get_fam_means()
# self.update_denoms()
#
# def get_fam_means(self):
#
# self['zero']['mean']=self['zero']['par'][0]/(self['zero']['par'][0]+self['zero']['par'][1])
# self['pos']['mean']=(self['pos']['par'][0]+self['pos']['par'][1])/2
. Output only the next line. | self['randomizer_fun']=BetaUnifRandomizer |
Predict the next line for this snippet: <|code_start|> self['tolerance']=0.01 # percentage threshold to stop binary search when extracting prior distribution
self['log_prior']=True # use log-uniform prior? (False means uniform prior)
"""variable configuration"""
self['variables']=['DISTANCE','BNE1','BNE2','BNE3','WRONG_WAY','TPER_RISE','TURN'] #network variables to use in choice set generation
self['ref']='DISTANCE' #reference variable (coef fixed to 1)
self['ranges']={'BNE1':[0.0000001,1000.0],'BNE2':[0.0000001,1000.0],'BNE3':[0.0000001,1000.0],'TPER_RISE':[0.00001,100000.0],'WRONG_WAY':[0.0000001,1000.0],'TURN':[0.0000001,1000.0]} #large initial boundary intervals
self['weights']={'BNE1':'DISTANCE','BNE2':'DISTANCE','BNE3':'DISTANCE','TPER_RISE':'DISTANCE','WRONG_WAY':'DISTANCE'} #to multiply each link attribute value by
self['median_compare']=['TURN'] #extract these coefficients with others at their medians (must appear last in self['variables'])
self['randomize_compare']=[] #extract these coefficients with link randomization (must appear last in self['variables'])
"""generate noisy output?"""
self['verbose']=True
"""speed up by randomizing whole network in outer loop and performing searches in inner loop using only comparisons/additions"""
self['inverted']=True # should we?
self['inverted_N_attr']=4 # when link attributes were randomized individually, this controlled the number of link randomizations, now just set it to the number of processors
self['inverted_N_param']=5 # when link attributes were randomized individually, this controlled the number of parameters to draw per link randomization, now just set it to the number of parameters desired divided by the number of processors (e.g. w/; N_attr=4 processors x N_param=5 == 20 random parameters)
self['inverted_nested']=False # when link attributes were randomized individually, True would nest the attribute and parameter randomization loops, now just leave set to False
"""link randomization parameters"""
self['randomize_after']=True # apply link randomization after generalized cost is calcluated rather than to attributes individually? Leave set to True.
self['randomize_after_dev']=0.4 # link randomization scale parameter
self['randomize_after_iters']=3 # number of link randomizations per coefficient (e.g. 20 random parameters x 3 randomize_after_iters == 60 max choice set size)
"""refrain from filtering out routes that overlap too much with chosen route (used to analyze choice set quality)"""
self['allow_duplicates_of_chosen_route']=False
"""deprecated"""
#parameters used to randomize link attributes individually
<|code_end|>
with the help of current file imports:
import os
import random
import route_model.misc as rm_misc
from UserDict import UserDict
from route_model.choice_set.beta_unif_randomizer import BetaUnifRandomizer
from math import sqrt
and context from other files:
# Path: route_model/choice_set/beta_unif_randomizer.py
# class BetaUnifRandomizer(LinkRandomizer):
#
# def __init__(self,G,variables,no_randomize,b,s,scl):
#
# LinkRandomizer.__init__(self)
#
# self['variables']=variables
# self['no_randomize']=no_randomize
#
# self.set_scl(scl)
# self.set_fam(nr.beta,nr.uniform)
# self.set_par([1,b],[1-s,1+s])
#
# self.get_link_distribution(G)
#
# self.get_fam_means()
# self.update_denoms()
#
# def get_fam_means(self):
#
# self['zero']['mean']=self['zero']['par'][0]/(self['zero']['par'][0]+self['zero']['par'][1])
# self['pos']['mean']=(self['pos']['par'][0]+self['pos']['par'][1])/2
, which may contain function names, class names, or code. Output only the next line. | self['randomizer_fun']=BetaUnifRandomizer |
Using the snippet: <|code_start|> for e2 in G.edges([e1[1]],data=True):
dual_data=e2[2]
dual_data['TURN']=0
dual_data['L_TURN']=0
dual_data['R_TURN']=0
dual_data['U_TURN']=0
the_turn=G.turn_dir(e1,e2)
if the_turn:
dual_data['TURN']=1
if the_turn=='L':
dual_data['L_TURN']=1
if the_turn=='R':
dual_data['R_TURN']=1
if the_turn=='U':
dual_data['U_TURN']=1
H.add_edge((e1[0],e1[1]),(e2[0],e2[1]),attr_dict=dual_data)
H.orig_network=G.copy()
return H
class TransportNetworkError(Exception):
pass
class Tests(unittest.TestCase):
def setUp(self):
<|code_end|>
, determine the next line of code. You have imports:
from networkx.classes.digraph import DiGraph
from math import sqrt, acos, pi
from numpy import *
from route_model.config.network_config import NetworkConfig
import os, csv
import unittest
and context (class names, function names, or code) available:
# Path: route_model/config/network_config.py
# class NetworkConfig(UserDict):
# """store network configuration data"""
#
# def __init__(self, changes={}):
# UserDict.__init__(self)
# self['data_dir']=r"X:\Projects\BikeModel\data\route_model\input\network\test"
# self['link_file']=os.path.join(self['data_dir'],'links.csv')
# self['node_file']=os.path.join(self['data_dir'],'nodes.csv')
# self['dist_var']='DISTANCE'
# self['dist_scl']=1 #rescales with node distance x dist_scl= link distance
# self['max_centroid']=None
#
# for key in changes:
# self[key]=changes[key]
. Output only the next line. | network_config=NetworkConfig() |
Continue the code snippet: <|code_start|> name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', EventDetail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', EventDetail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', EventDetail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', MemoryDetail, name="memory_detail"),
path('<slug:slug>/memories/add/', add_memory, name="add_memory"),
# extra info pages
path('<slug:event_slug>/extra/<slug:slug>/', ExtraInfoDetail, name="special_event_extra"),
# updates
path('<slug:slug>/updates/', event_update_list, name="event_update_list"),
path('<slug:event_slug>/updates/<int:pk>/', EventUpdate, name="event_update_detail"),
path('<slug:event_slug>/updates/<int:pk>/slides/',
DetailView,
{
'template_name': "happenings/updates/update_slides.html",
<|code_end|>
. Use current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, or code) from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | 'queryset': Update.objects.all() |
Using the snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
<|code_end|>
, determine the next line of code. You have imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (class names, function names, or code) available:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/add-recap/', add_recap, name="add_recap"), |
Based on the snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', EventList, name="events_index"),
path('by-region/<slug:region>/', EventList, name="events_by_region"),
path('by-state/<slug:state>/', EventList, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', EventDetail, name="event_detail"),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, sometimes code) from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/ical/', create_ical, name="event_ical"), |
Continue the code snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
<|code_end|>
. Use current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, or code) from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('', EventList, name="events_index"), |
Here is a snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', EventList, name="events_index"),
path('by-region/<slug:region>/', EventList, name="events_by_region"),
path('by-state/<slug:state>/', EventList, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
<|code_end|>
. Write the next line using the current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
, which may include functions, classes, or code. Output only the next line. | path('<slug:slug>/', EventDetail, name="event_detail"), |
Given the following code snippet before the placeholder: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', EventList, name="events_index"),
path('by-region/<slug:region>/', EventList, name="events_by_region"),
path('by-state/<slug:state>/', EventList, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
<|code_end|>
, predict the next line using imports from the current file:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context including class names, function names, and sometimes code from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | EventsForPeriod, |
Based on the snippet: <|code_start|> path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', EventDetail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', EventDetail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', EventDetail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', EventDetail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', MemoryDetail, name="memory_detail"),
path('<slug:slug>/memories/add/', add_memory, name="add_memory"),
# extra info pages
path('<slug:event_slug>/extra/<slug:slug>/', ExtraInfoDetail, name="special_event_extra"),
# updates
path('<slug:slug>/updates/', event_update_list, name="event_update_list"),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, sometimes code) from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:event_slug>/updates/<int:pk>/', EventUpdate, name="event_update_detail"), |
Based on the snippet: <|code_start|> re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', EventDetail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', EventDetail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', EventDetail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', EventDetail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', EventDetail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, sometimes code) from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:event_slug>/memories/<int:pk>/', MemoryDetail, name="memory_detail"), |
Next line prediction: <|code_start|> ),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', EventDetail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', EventDetail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', EventDetail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', EventDetail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', EventDetail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', MemoryDetail, name="memory_detail"),
path('<slug:slug>/memories/add/', add_memory, name="add_memory"),
# extra info pages
<|code_end|>
. Use current file imports:
(from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory)
and context including class names, function names, or small code snippets from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:event_slug>/extra/<slug:slug>/', ExtraInfoDetail, name="special_event_extra"), |
Given snippet: <|code_start|> path('<slug:slug>/', EventDetail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', EventDetail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', EventDetail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', EventDetail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', EventDetail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', MemoryDetail, name="memory_detail"),
path('<slug:slug>/memories/add/', add_memory, name="add_memory"),
# extra info pages
path('<slug:event_slug>/extra/<slug:slug>/', ExtraInfoDetail, name="special_event_extra"),
# updates
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
which might include code, classes, or functions. Output only the next line. | path('<slug:slug>/updates/', event_update_list, name="event_update_list"), |
Given the following code snippet before the placeholder: <|code_start|>urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', EventList, name="events_index"),
path('by-region/<slug:region>/', EventList, name="events_by_region"),
path('by-state/<slug:state>/', EventList, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', EventDetail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', EventDetail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
<|code_end|>
, predict the next line using imports from the current file:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context including class names, function names, and sometimes code from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/videos/', video_list, name="event_video_list"), |
Using the snippet: <|code_start|> # FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', EventList, name="events_index"),
path('by-region/<slug:region>/', EventList, name="events_by_region"),
path('by-state/<slug:state>/', EventList, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', EventDetail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', EventDetail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
<|code_end|>
, determine the next line of code. You have imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (class names, function names, or code) available:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"), |
Here is a snippet: <|code_start|> re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', EventDetail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', EventDetail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', EventDetail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', EventDetail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
<|code_end|>
. Write the next line using the current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
, which may include functions, classes, or code. Output only the next line. | path('<slug:slug>/attending/add/', add_attending, name="attending_add"), |
Next line prediction: <|code_start|> r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
EventsForPeriod,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', EventDetail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', EventDetail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', EventDetail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', EventDetail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', EventDetail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', MemoryDetail, name="memory_detail"),
<|code_end|>
. Use current file imports:
(from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
EventList, EventDetail, EventsForPeriod, \
EventUpdate, MemoryDetail, ExtraInfoDetail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory)
and context including class names, function names, or small code snippets from other files:
# Path: build/lib/happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# @models.permalink
# def get_absolute_url(self):
# return ('event_update_detail', [str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# @models.permalink
# def get_gallery_url(self):
# return ('update_slides', [self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: build/lib/happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/memories/add/', add_memory, name="add_memory"), |
Continue the code snippet: <|code_start|> {'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', event_detail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', event_detail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', event_detail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', memory_detail, name="memory_detail"),
path('<slug:slug>/memories/add/', add_memory, name="add_memory"),
# extra info pages
path('<slug:event_slug>/extra/<slug:slug>/', extrainfo_detail, name="special_event_extra"),
# updates
path('<slug:slug>/updates/', event_update_list, name="event_update_list"),
path('<slug:event_slug>/updates/<int:pk>/', event_update, name="event_update_detail"),
path('<slug:event_slug>/updates/<int:pk>/slides/',
DetailView.as_view(
template_name="happenings/updates/update_slides.html",
<|code_end|>
. Use current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, or code) from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | queryset=Update.objects.all() |
Predict the next line after this snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
<|code_end|>
using the current file's imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and any relevant context from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/add-recap/', add_recap, name="add_recap"), |
Based on the snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', event_list, name="events_index"),
path('by-region/<slug:region>/', event_list, name="events_by_region"),
path('by-state/<slug:state>/', event_list, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', event_detail, name="event_detail"),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, sometimes code) from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/ical/', create_ical, name="event_ical"), |
Given the code snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
<|code_end|>
, generate the next line using the imports in this file:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (functions, classes, or occasionally code) from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('', event_list, name="events_index"), |
Continue the code snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', event_list, name="events_index"),
path('by-region/<slug:region>/', event_list, name="events_by_region"),
path('by-state/<slug:state>/', event_list, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
<|code_end|>
. Use current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, or code) from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/', event_detail, name="event_detail"), |
Here is a snippet: <|code_start|>
# CRUD and admin functions
urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', event_list, name="events_index"),
path('by-region/<slug:region>/', event_list, name="events_by_region"),
path('by-state/<slug:state>/', event_list, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
<|code_end|>
. Write the next line using the current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
, which may include functions, classes, or code. Output only the next line. | events_period, |
Predict the next line for this snippet: <|code_start|> path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', event_detail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', event_detail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', event_detail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', event_detail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', memory_detail, name="memory_detail"),
path('<slug:slug>/memories/add/', add_memory, name="add_memory"),
# extra info pages
path('<slug:event_slug>/extra/<slug:slug>/', extrainfo_detail, name="special_event_extra"),
# updates
path('<slug:slug>/updates/', event_update_list, name="event_update_list"),
<|code_end|>
with the help of current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
, which may contain function names, class names, or code. Output only the next line. | path('<slug:event_slug>/updates/<int:pk>/', event_update, name="event_update_detail"), |
Predict the next line for this snippet: <|code_start|> re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', event_detail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', event_detail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', event_detail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', event_detail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', event_detail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
<|code_end|>
with the help of current file imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
, which may contain function names, class names, or code. Output only the next line. | path('<slug:event_slug>/memories/<int:pk>/', memory_detail, name="memory_detail"), |
Using the snippet: <|code_start|> ),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', event_detail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', event_detail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', event_detail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', event_detail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', event_detail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', memory_detail, name="memory_detail"),
path('<slug:slug>/memories/add/', add_memory, name="add_memory"),
# extra info pages
<|code_end|>
, determine the next line of code. You have imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (class names, function names, or code) available:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:event_slug>/extra/<slug:slug>/', extrainfo_detail, name="special_event_extra"), |
Given the code snippet: <|code_start|> path('<slug:slug>/', event_detail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', event_detail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', event_detail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', event_detail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', event_detail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', memory_detail, name="memory_detail"),
path('<slug:slug>/memories/add/', add_memory, name="add_memory"),
# extra info pages
path('<slug:event_slug>/extra/<slug:slug>/', extrainfo_detail, name="special_event_extra"),
# updates
<|code_end|>
, generate the next line using the imports in this file:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (functions, classes, or occasionally code) from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/updates/', event_update_list, name="event_update_list"), |
Given the code snippet: <|code_start|>urlpatterns = [
# FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', event_list, name="events_index"),
path('by-region/<slug:region>/', event_list, name="events_by_region"),
path('by-state/<slug:state>/', event_list, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', event_detail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', event_detail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
<|code_end|>
, generate the next line using the imports in this file:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (functions, classes, or occasionally code) from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/videos/', video_list, name="event_video_list"), |
Next line prediction: <|code_start|> # FORMS
path('add/', add_event, name="add_event"),
path('<slug:slug>/edit-event/', edit_event, name="edit-event"),
path('<slug:slug>/add-recap/', add_recap, name="add_recap"),
# EVENT LISTS
path('', event_list, name="events_index"),
path('by-region/<slug:region>/', event_list, name="events_by_region"),
path('by-state/<slug:state>/', event_list, name="events_by_state"),
re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', event_detail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', event_detail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
<|code_end|>
. Use current file imports:
(from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory)
and context including class names, function names, or small code snippets from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"), |
Given the code snippet: <|code_start|> re_path(
r'^(?P<m>\d{2})/(?P<d>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_day"
),
re_path(
r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', event_detail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', event_detail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', event_detail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', event_detail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
<|code_end|>
, generate the next line using the imports in this file:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (functions, classes, or occasionally code) from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/attending/add/', add_attending, name="attending_add"), |
Based on the snippet: <|code_start|> r'^(?P<m>\d{2})/(?P<y>\d{4})/$',
events_period,
name="events_for_month"
),
# ************* EVENT DETAILS *************/
path('<slug:slug>/', event_detail, name="event_detail"),
path('<slug:slug>/ical/', create_ical, name="event_ical"),
# **************** Event children ************/
path('<slug:slug>/slides/', event_detail,
{'template_name': 'happenings/event_slides.html'},
name="event_slides"
),
path('<slug:slug>/videos/', video_list, name="event_video_list"),
path('<slug:slug>/all-comments/', event_all_comments_list, name="event_comments"),
path('<slug:slug>/map/', event_detail,
{'template_name': 'happenings/event_map.html'},
name="event_map"
),
path('<slug:slug>/attending/', event_detail,
{'template_name': 'happenings/attending/list.html'},
name='event_attending_list'
),
path('<slug:slug>/attending/add/', add_attending, name="attending_add"),
path('<slug:slug>/memories/', event_detail,
{'template_name': 'happenings/memory_list.html'},
name="event_memories"
),
path('<slug:event_slug>/memories/<int:pk>/', memory_detail, name="memory_detail"),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.urls import path, re_path
from django.views.generic import DetailView
from .models import Update
from .views import add_event, edit_event, add_recap, create_ical, \
event_list, event_detail, events_period, \
event_update, memory_detail, extrainfo_detail, event_update_list, \
video_list, event_all_comments_list, add_attending, add_memory
and context (classes, functions, sometimes code) from other files:
# Path: happenings/models.py
# class Update(models.Model):
# """
# Allows updating the event in near real-time, with blog-style content updates.
# """
# event = models.ForeignKey(
# Event,
# on_delete=models.CASCADE,
# limit_choices_to={'featured': True},
# db_index=True
# )
# title = models.CharField("Update title", max_length=200)
# author = models.ForeignKey(
# UserModel,
# on_delete=models.CASCADE,
# limit_choices_to={'is_staff': True})
# update = models.TextField()
# update_formatted = models.TextField(blank=True, editable=False)
# pub_time = models.DateTimeField(auto_now_add=True)
# last_updated = models.DateTimeField(auto_now=True)
# audio = models.FileField(
# upload_to='audio/events/special/',
# blank=True,
# null=True,
# help_text="Should be MP3 format"
# )
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# return reverse('event_update_detail', args=[str(self.event.slug), str(self.id)])
#
# def save(self, *args, **kwargs):
# self.update_formatted = sanetize_text(self.update)
# super(Update, self).save(*args, **kwargs)
#
# @cached_property
# def comments_open(self):
# """
# Based on the update's event's comments open status
# """
# return self.event.comments_open
#
# def has_image(self):
# if self.updateimage_set.count():
# return True
#
# def get_image(self):
# return self.updateimage_set.latest('id')
#
# def get_gallery_url(self):
# return reverse('update_slides', args=[self.event.slug, str(self.id)])
#
# def get_top_assets(self):
# return self.updateimage_set.all()
#
# Path: happenings/views.py
# class EventList(ListView):
# class EventsForPeriod(EventList):
# class EventDetail(DetailView):
# class EventUpdate(DetailView):
# class ExtraInfoDetail(EventUpdate):
# class EditEvent(UpdateView):
# class AddRecap(EditEvent):
# class MemoryDetail(DetailView):
# def get_queryset(self):
# def get_context_data(self, **kwargs):
# def get_queryset(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_context_data(self, **kwargs):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def get_object(self):
# def create_ical(request, slug):
# def event_all_comments_list(request, slug):
# def event_update_list(request, slug):
# def video_list(request, slug):
# def add_event(request):
# def dispatch(self, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def add_attending(request, slug):
# def add_memory(request, slug):
# def dispatch(self, request, *args, **kwargs):
# def get_context_data(self, **kwargs):
# def get_object(self):
# def process_upload(upload_file, instance, form, event, request):
. Output only the next line. | path('<slug:slug>/memories/add/', add_memory, name="add_memory"), |
Continue the code snippet: <|code_start|> try:
img_file = SimpleUploadedFile(clean_filename, temp_handle.read(), 'image/jpeg',)
except Exception:
img_file = None
if img_file is not None:
try:
Image.objects.get(image=dirstring + clean_filename)
except Image.DoesNotExist:
new_img = Image(
event=self.event,
image=img_file,
)
new_img.save()
return # note that we're not actually saving the zip. No good reason to.
if supports_video:
class EventVideo(models.Model):
video = models.ForeignKey(
'video.Video',
on_delete=models.CASCADE,
related_name="event_video"
)
event = models.ForeignKey(
Event,
on_delete=models.CASCADE
)
if supports_comments:
<|code_end|>
. Use current file imports:
import datetime
import os
import zipfile
from io import BytesIO
from itertools import chain
from PIL import Image as PIL_Image
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.db.models import signals, Q
from django.urls import reverse
from django.utils.functional import cached_property
from tango_shared.models import ContentImage, BaseUserContentModel, BaseSidebarContentModel
from tango_shared.utils.maptools import get_geocode
from tango_shared.utils.sanetize import sanetize_text
from .signals import update_time
from tango_comments.models import Comment
and context (classes, functions, or code) from other files:
# Path: happenings/signals.py
# def update_time(sender, **kwargs):
# """
# When a Comment is added, updates the Update to set "last_updated" time
# """
# comment = kwargs['instance']
# if comment.content_type.app_label == "happenings" and comment.content_type.name == "Update":
# from .models import Update
# item = Update.objects.get(id=comment.object_pk)
# item.save()
. Output only the next line. | signals.post_save.connect(update_time, sender=Comment) |
Continue the code snippet: <|code_start|> """
Print the svg instructions to draw the axis to the output.
:arg output: Stream where to write.
:type output: file
:arg x_coord: x-coordinate where to start (bottom) the Y-axis in the svg.
:type x_coord: float
:arg y_coord: y-coordinate where to start (bottom) the Y-axis in the svg.
:type y_coord: float
"""
output.write("<line x1=\"%d\" y1=\"%d\" " % (x_coord, y_coord - 8))
output.write("x2=\"%d\" y2=\"%d\" " % (x_coord, y_coord - 110))
output.write("style=\"stroke:rgb(99,99,99);stroke-width:2\" />\n")
draw_rectangle(output, x_coord - 7, y_coord - 10, 2, 7)
output.write("<text x=\"%d\" y=\"%d\">\n0\n</text>\n" % (x_coord - 17,
y_coord - 5))
draw_rectangle(output, x_coord - 7, y_coord - 60, 2, 7)
output.write("<text x=\"%d\" y=\"%d\">\n1\n</text>\n" % (x_coord - 17,
y_coord - 55))
draw_rectangle(output, x_coord - 7, y_coord - 110, 2, 7)
output.write("<text x=\"%d\" y=\"%d\">\n2\n</text>\n" % (x_coord - 17,
y_coord - 105))
output.write("<text x=\"%d\" y=\"%d\"\n" % (x_coord - 30, y_coord - 55))
output.write("transform=\"rotate(-90, %d, %d)\">\n" % (x_coord - 30,
y_coord - 50))
output.write("bits\n</text>\n")
<|code_end|>
. Use current file imports:
from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module
and context (classes, functions, or code) from other files:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | def draw_ellipse(output, x_center, y_center, x_radius, y_radius, colour=BLACK, |
Predict the next line after this snippet: <|code_start|> :type width: float
:arg height: Height of the box containing the 'A'.
:type height: float
:arg colour: Color of the 'A' (default: black). The colour is given in
RGB.
:type colour: tuple (int, int, int)
:arg opacity: Opacity of the 'A' (default: 1.0)
:type opacity: float
:note: The 'A' is drawn by creating a red polygon giving the shape and a
white internal triangle above to make the bar in the middle of the 'A'
appear.
"""
output.write("<!-- Begin 'A' -->\n")
pt1 = (x_coord, y_coord + height)
pt2 = (x_coord + width * 0.42, y_coord)
pt3 = (x_coord + width * 0.58, y_coord)
pt4 = (x_coord + width, y_coord + height)
pt5 = (x_coord + 0.85 * width, y_coord + height)
pt6 = (x_coord + 0.725 * width, y_coord + 0.75 * height)
pt7 = (x_coord + 0.275 * width, y_coord + 0.75 * height)
pt8 = (x_coord + 0.15 * width, y_coord + height)
points = [pt1, pt2, pt3, pt4, pt5, pt6, pt7, pt8]
draw_polygon(output, points, colour, opacity)
if height > 8:
pt1 = (x_coord + 0.5 * width, y_coord + 0.2 * height)
pt2 = (x_coord + 0.34 * width, y_coord + 0.6 * height - 1)
pt3 = (x_coord + 0.64 * width, y_coord + 0.6 * height - 1)
<|code_end|>
using the current file's imports:
from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module
and any relevant context from other files:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | draw_polygon(output, [pt1, pt2, pt3], WHITE) |
Predict the next line after this snippet: <|code_start|> RGB.
:type colour: tuple (int, int, int)
:arg opacity: Opacity of the 'A' (default: 1.0)
:type opacity: float
:note: The 'A' is drawn by creating a red polygon giving the shape and a
white internal triangle above to make the bar in the middle of the 'A'
appear.
"""
output.write("<!-- Begin 'A' -->\n")
pt1 = (x_coord, y_coord + height)
pt2 = (x_coord + width * 0.42, y_coord)
pt3 = (x_coord + width * 0.58, y_coord)
pt4 = (x_coord + width, y_coord + height)
pt5 = (x_coord + 0.85 * width, y_coord + height)
pt6 = (x_coord + 0.725 * width, y_coord + 0.75 * height)
pt7 = (x_coord + 0.275 * width, y_coord + 0.75 * height)
pt8 = (x_coord + 0.15 * width, y_coord + height)
points = [pt1, pt2, pt3, pt4, pt5, pt6, pt7, pt8]
draw_polygon(output, points, colour, opacity)
if height > 8:
pt1 = (x_coord + 0.5 * width, y_coord + 0.2 * height)
pt2 = (x_coord + 0.34 * width, y_coord + 0.6 * height - 1)
pt3 = (x_coord + 0.64 * width, y_coord + 0.6 * height - 1)
draw_polygon(output, [pt1, pt2, pt3], WHITE)
output.write("<!-- End 'A' -->\n")
<|code_end|>
using the current file's imports:
from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module
and any relevant context from other files:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | def draw_letter_c(output, x_coord, y_coord, width, height, colour=BLUE, |
Given the following code snippet before the placeholder: <|code_start|> opacity=1.0):
"""
Print the svg instructions to draw an rectangle to the output.
:arg output: Stream where to write.
:type output: file
:arg x_coord: x-coordinate of the bottom-left corner of the rectangle.
:type x_coord: float
:arg y_coord: y-coordinate of the bottom-left corner of the rectangle.
:type y_coord: float
:arg height: Height of the rectangle.
:type height: float
:arg width: Width of the rectangle.
:type width: float
:arg colour: Color of the rectangle (default: black). The colour is given
in RGB.
:type colour: tuple (int, int, int)
:arg opacity: Opacity of the rectangle (default: 1.0)
:type opacity: float
:note: Look at the rectangle statement of an svg file for more information.
"""
output.write("<rect height=\"%f\" width=\"%f\" " % (height, width))
output.write("x=\"%f\" y=\"%f\" " % (x_coord, y_coord))
output.write("fill=\"rgb%s\" style=\"fill-opacity:%.2f\" />\n" %
(str(colour), opacity))
<|code_end|>
, predict the next line using imports from the current file:
from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module
and context including class names, function names, and sometimes code from other files:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | def draw_letter_a(output, x_coord, y_coord, width, height, colour=RED, |
Given snippet: <|code_start|> :type y_coord: float
:arg width: Width of the box containing the 'G'.
:type width: float
:arg height: Height of the box containing the 'G'.
:type height: float
:arg colour: Color of the box containing the 'G' (default: black). The
colour is given in RGB.
:type colour: tuple (int, int, int)
:arg opacity: Opacity of the box containing the 'G' (default: 1.0)
:type opacity: float
:note: The 'G' is drawn by creating a 'C' and then adding the two other
rectangles.
"""
output.write("<!-- Begin 'G' -->\n")
draw_ellipse(output, x_coord + width / 2, y_coord + height / 2, width / 2,
height / 2, colour, opacity)
draw_ellipse(output, x_coord + width / 2, y_coord + height / 2, width / 3,
height / 3, WHITE)
draw_rectangle(output, x_coord + width / 2, y_coord + height / 4, height /
2, width / 2, WHITE)
draw_rectangle(output, x_coord + width / 2, y_coord + 5 * height / 8,
height / 8, width / 2, colour, opacity)
draw_rectangle(output, x_coord + 7 * width / 8, y_coord + 5 * height / 8,
height - 5 * height / 8, width / 8, colour, opacity)
output.write("<!-- End 'G' -->\n")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module
and context:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
which might include code, classes, or functions. Output only the next line. | def draw_letter_t(output, x_coord, y_coord, width, height, colour=GREEN, |
Using the snippet: <|code_start|> :type x_coord: float
:arg y_coord: y-coordinate of the bottom left corner ot the box containing
the 'C'.
:type y_coord: float
:arg width: Width of the box containing the 'C'.
:type width: float
:arg height: Height of the box containing the 'C'.
:type height: float
:arg colour: Color of the box containing the 'C' (default: black). The
colour is given in RGB.
:type colour: tuple (int, int, int)
:arg opacity: Opacity of the box containing the 'C' (default: 1.0)
:type opacity: float
:note: The 'C' is drawn by creating the surrounding ellipse with the right
colour and then superposing an smaller white ellipse and a white
rectangle on the right side of the ellipe to create the 'C'.
"""
output.write("<!-- Begin 'C' -->\n")
draw_ellipse(output, x_coord + width / 2, y_coord + height / 2, width / 2,
height / 2, colour, opacity)
draw_ellipse(output, x_coord + width / 2, y_coord + height / 2, width / 3,
height / 3, WHITE)
draw_rectangle(output, x_coord + width / 2, y_coord + height / 4,
0.5 * height, width / 2, WHITE)
output.write("<!-- End 'C' -->\n")
<|code_end|>
, determine the next line of code. You have imports:
from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module
and context (class names, function names, or code) available:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | def draw_letter_g(output, x_coord, y_coord, width, height, colour=YELLOW, |
Based on the snippet: <|code_start|> :type step: float
:note: The computation of the information content is done within the
drawing since it follows the same algorithm computing the emission
probabilities. So we do not call the get_information_content method for
an algorithmic improvement.
"""
previous_position_proba = tffm.background_emission_proba()
information_content = 0.
start = tffm.get_position_start()
for position in range(start, len(tffm) + start):
position_proba = {'A': 0., 'C': 0., 'G': 0., 'T': 0.}
yposition = 10.
if tffm.kind == TFFM_KIND.ZERO_ORDER:
__ = tffm.get_emission_update_pos_proba(position_proba, position,
previous_position_proba, 0)
else:
for i in range(0, 4):
if logo_type == LOGO_TYPE.SUMMARY:
__ = tffm.get_emission_update_pos_proba(position_proba,
position,
previous_position_proba,
i)
else:
emissions = tffm.get_emission_update_pos_proba(
position_proba, position, previous_position_proba, i)
yposition = draw_dense_letters(output, emissions, width,
xposition, yposition, step,
previous_position_proba,
<|code_end|>
, predict the immediate next line with the help of imports:
from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module
and context (classes, functions, sometimes code) from other files:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | ALPHABET[i]) |
Next line prediction: <|code_start|> step=10.):
"""
Print the svg instructions to draw the letters of the logo and return the
information content to be printed.
:arg output: Stream where to write the svg instruction to draw the logo.
:type output: file
:arg tffm: The TFFM for which drawing the logo.
:type tffm: :class:`TFFM`
:arg logo_type: Kind of logo to draw (either 'summary' or 'dense')
:type logo_type: str
:arg xposition: x-coordinate where to start the logo (default: 90.)
:type xposition: float
:arg width: Width of the logo.
:type within: float
:arg step: Distance between two boxes containing letters in the logo.
:type step: float
:note: The computation of the information content is done within the
drawing since it follows the same algorithm computing the emission
probabilities. So we do not call the get_information_content method for
an algorithmic improvement.
"""
previous_position_proba = tffm.background_emission_proba()
information_content = 0.
start = tffm.get_position_start()
for position in range(start, len(tffm) + start):
position_proba = {'A': 0., 'C': 0., 'G': 0., 'T': 0.}
yposition = 10.
<|code_end|>
. Use current file imports:
(from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module)
and context including class names, function names, or small code snippets from other files:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | if tffm.kind == TFFM_KIND.ZERO_ORDER: |
Given snippet: <|code_start|> :arg output: Stream where to write the svg instruction to draw the logo.
:type output: file
:arg tffm: The TFFM for which drawing the logo.
:type tffm: :class:`TFFM`
:arg logo_type: Kind of logo to draw (either 'summary' or 'dense')
:type logo_type: str
:arg xposition: x-coordinate where to start the logo (default: 90.)
:type xposition: float
:arg width: Width of the logo.
:type within: float
:arg step: Distance between two boxes containing letters in the logo.
:type step: float
:note: The computation of the information content is done within the
drawing since it follows the same algorithm computing the emission
probabilities. So we do not call the get_information_content method for
an algorithmic improvement.
"""
previous_position_proba = tffm.background_emission_proba()
information_content = 0.
start = tffm.get_position_start()
for position in range(start, len(tffm) + start):
position_proba = {'A': 0., 'C': 0., 'G': 0., 'T': 0.}
yposition = 10.
if tffm.kind == TFFM_KIND.ZERO_ORDER:
__ = tffm.get_emission_update_pos_proba(position_proba, position,
previous_position_proba, 0)
else:
for i in range(0, 4):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from constants import BLACK, WHITE, BLUE, RED, GREEN, YELLOW, ALPHABET
from constants import TFFM_KIND, LOGO_TYPE
import tffm_module
and context:
# Path: constants.py
# BLACK = (0, 0, 0)
#
# WHITE = (255, 255, 255)
#
# BLUE = (0, 0, 128)
#
# RED = (178, 34, 34)
#
# GREEN = (0, 100, 0)
#
# YELLOW = (255, 165, 0)
#
# ALPHABET = ['A', 'C', 'G', 'T']
#
# Path: constants.py
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
which might include code, classes, or functions. Output only the next line. | if logo_type == LOGO_TYPE.SUMMARY: |
Next line prediction: <|code_start|> model excluding the background.
"""
if self.kind == TFFM_KIND.FIRST_ORDER:
return self.N - 2
elif self.kind == TFFM_KIND.DETAILED:
return self.N / 4 - 1
else: # 0-order HMM here
return self.N - 1
def background_emission_proba(self):
"""
Return the emission probabilities of the nucleotides in the background
state.
:returns: A dictionnary with characters 'A', 'C', 'G', and 'T' as keys
and the corresponding probabilities as values.
:rtype: dict
"""
emissions = {'A': 0., 'C': 0., 'G': 0., 'T': 0.}
if self.kind == TFFM_KIND.FIRST_ORDER:
emissions = background_emission_proba_1storder(self)
elif self.kind == TFFM_KIND.DETAILED:
emissions = background_emission_proba_detailed(self)
else: # 0-order HMM here
emissions = background_emission_proba_detailed(self)
for i in xrange(4):
<|code_end|>
. Use current file imports:
(import sys
import os
import math
import re
import ghmm
import drawing
import exceptions_errors
import utils
import hit_module
from Bio.Alphabet import IUPAC
from Bio import SeqIO
from Bio import motifs
from constants import ALPHABET, EXTENDED_ALPHABET, TFFM_KIND, LOGO_TYPE)
and context including class names, function names, or small code snippets from other files:
# Path: constants.py
# ALPHABET = ['A', 'C', 'G', 'T']
#
# EXTENDED_ALPHABET = ['A', 'C', 'G', 'T', 'N', 'M', 'K']
#
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | emissions[ALPHABET[i]] = self.getEmission(0)[i] |
Next line prediction: <|code_start|> considered significant (given in this order).
:rtype: tuple
"""
pos_ic = self.get_positions_ic()
first = 1
last = len(self)
while first <= last and pos_ic[first - 1] < threshold:
first += 1
while last > 0 and pos_ic[last - 1] < threshold:
last -= 1
return first, last
def _get_posterior_proba(self, sequence_split):
"""
Get the posterior probabilities at each nucleotide position given the
TFFM.
:arg sequence_split: The sequence splitted in subsequences to not
consider non ACGT nucleotides.
:type sequence_split: list
:returns: The posterior probabilities at each position of the sequence.
:rtype: list of list
:note: One example of a sequence_split is ["ACT", "N", "ATC"].
"""
<|code_end|>
. Use current file imports:
(import sys
import os
import math
import re
import ghmm
import drawing
import exceptions_errors
import utils
import hit_module
from Bio.Alphabet import IUPAC
from Bio import SeqIO
from Bio import motifs
from constants import ALPHABET, EXTENDED_ALPHABET, TFFM_KIND, LOGO_TYPE)
and context including class names, function names, or small code snippets from other files:
# Path: constants.py
# ALPHABET = ['A', 'C', 'G', 'T']
#
# EXTENDED_ALPHABET = ['A', 'C', 'G', 'T', 'N', 'M', 'K']
#
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | ghmm_extended_alphabet = ghmm.Alphabet(EXTENDED_ALPHABET) |
Predict the next line for this snippet: <|code_start|> functions :func:`tffm_from_xml` or :func:`tffm_from_meme`.
"""
def __init__(self, emission_domain, distribution, cmodel, kind,
name="TFFM"):
"""
Construct an instance of the TFFM class.
:arg emission_domain: The emission domain of the
underlying :class:`ghmm.HMM`.
:type emission_domain: ghmm.EmissionDomain
:arg distribution: The distribution over the emission domain.
:type distribution: :class:`ghmm.Distribution`
:arg cmodel: The cmodel (HMM itself implemented in C) of the underlying
:class:`ghmm.HMM`.
:arg kind: The TFFM can be either a 0-order, a 1st-order, or a detailed
TFFM, use `TFFM_KIND.ZERO_ORDER, or `TFFM_KIND.FIRST_ORDER`, or
`TFFM_KIND.DETAILED` respectively.
:type kind: Enum
:arg name: Give the name of the TFFM. 'TFFM' is given by default.
:type name: str
:raises: :class:`exceptions.TFFMKindError` when the given kind is
neither '1st-order' nor 'detailed'.
"""
# Construct the underlying ghmm.EmissionHMM
super(ghmm.DiscreteEmissionHMM, self).__init__(emission_domain,
distribution, cmodel)
<|code_end|>
with the help of current file imports:
import sys
import os
import math
import re
import ghmm
import drawing
import exceptions_errors
import utils
import hit_module
from Bio.Alphabet import IUPAC
from Bio import SeqIO
from Bio import motifs
from constants import ALPHABET, EXTENDED_ALPHABET, TFFM_KIND, LOGO_TYPE
and context from other files:
# Path: constants.py
# ALPHABET = ['A', 'C', 'G', 'T']
#
# EXTENDED_ALPHABET = ['A', 'C', 'G', 'T', 'N', 'M', 'K']
#
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
, which may contain function names, class names, or code. Output only the next line. | if(kind != TFFM_KIND.FIRST_ORDER and kind != TFFM_KIND.DETAILED and |
Using the snippet: <|code_start|> and the corresponding probabilities as values.
:rtype: dict
"""
emissions = {'A': 0., 'C': 0., 'G': 0., 'T': 0.}
if self.kind == TFFM_KIND.FIRST_ORDER:
emissions = background_emission_proba_1storder(self)
elif self.kind == TFFM_KIND.DETAILED:
emissions = background_emission_proba_detailed(self)
else: # 0-order HMM here
emissions = background_emission_proba_detailed(self)
for i in xrange(4):
emissions[ALPHABET[i]] = self.getEmission(0)[i]
return emissions
def print_summary_logo(self, output=sys.stdout):
"""
Print the svg code of the corresponding summary logo (i.e. similar to a
regular sequence logo).
:arg output: Stream where to output the svg
(defaut: :class:`sys.stdout`).
:type output: file
:note: The *output* argument is not a file name but it is an
**already** open file stream.
"""
<|code_end|>
, determine the next line of code. You have imports:
import sys
import os
import math
import re
import ghmm
import drawing
import exceptions_errors
import utils
import hit_module
from Bio.Alphabet import IUPAC
from Bio import SeqIO
from Bio import motifs
from constants import ALPHABET, EXTENDED_ALPHABET, TFFM_KIND, LOGO_TYPE
and context (class names, function names, or code) available:
# Path: constants.py
# ALPHABET = ['A', 'C', 'G', 'T']
#
# EXTENDED_ALPHABET = ['A', 'C', 'G', 'T', 'N', 'M', 'K']
#
# TFFM_KIND = enum(FIRST_ORDER="1st-order", DETAILED="detailed",
# ZERO_ORDER="0-order")
#
# LOGO_TYPE = enum(SUMMARY="summary", DENSE="dense")
. Output only the next line. | drawing.draw_logo(output, self, LOGO_TYPE.SUMMARY) |
Next line prediction: <|code_start|> linkingAgentIdentifierRole.text = 'implementer'
linkingAgentIdentifierType.text = i[0]
linkingAgentIdentifierValue.text = i[1]
def main():
print 'This is not a standalone script. It is a library of functions that other scripts can use'
sys.exit()
def setup_xml(source_file):
premisxml = os.path.dirname(os.path.dirname(source_file)) + '/metadata' '/' + os.path.basename(os.path.dirname(os.path.dirname(source_file))) + '_premis.xml'
namespace = '<premis:premis xmlns:premis="http://www.loc.gov/premis/v3" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/premis/v3 https://www.loc.gov/standards/premis/premis.xsd" version="3.0"></premis:premis>'
premis_namespace = "http://www.loc.gov/premis/v3"
xsi_namespace = "http://www.w3.org/2001/XMLSchema-instance"
print premisxml
if os.path.isfile(premisxml):
print 'looks like premis already exists?'
parser = ET.XMLParser(remove_blank_text=True)
doc = ET.parse(premisxml,parser=parser)
premis = doc.getroot()
else:
premis = ET.fromstring(namespace)
doc = ET.ElementTree(premis)
return premisxml, premis_namespace, doc, premis
def representation_uuid_csv(filmographic, source_accession, uuid):
uuid_csv = os.path.expanduser('~/Desktop/uuid.csv')
if not os.path.isfile(uuid_csv):
create_csv(uuid_csv, ('reference number','source accession number' 'uuid'))
<|code_end|>
. Use current file imports:
(import lxml.etree as ET
import lxml.builder as builder
import uuid
import time
import sys
import subprocess
import os
import hashlib
import csv
from glob import glob
from collections import OrderedDict
from ififuncs import append_csv
from ififuncs import create_csv)
and context including class names, function names, or small code snippets from other files:
# Path: ififuncs.py
# def append_csv(csv_file, *args):
# f = open(csv_file, 'a', newline='')
# try:
# writer = csv.writer(f)
# writer.writerow(*args)
# finally:
# f.close()
#
# Path: ififuncs.py
# def create_csv(csv_file, *args):
# f = open(csv_file, 'w', newline='')
# try:
# writer = csv.writer(f)
# writer.writerow(*args)
# finally:
# f.close()
. Output only the next line. | append_csv(uuid_csv, (filmographic, source_accession, uuid) ) |
Predict the next line after this snippet: <|code_start|> linkingAgentIdentifierRole = create_unit(2,linkingAgentIdentifier,'linkingAgentRole')
linkingAgentIdentifierRole.text = 'implementer'
linkingAgentIdentifierType.text = i[0]
linkingAgentIdentifierValue.text = i[1]
def main():
print 'This is not a standalone script. It is a library of functions that other scripts can use'
sys.exit()
def setup_xml(source_file):
premisxml = os.path.dirname(os.path.dirname(source_file)) + '/metadata' '/' + os.path.basename(os.path.dirname(os.path.dirname(source_file))) + '_premis.xml'
namespace = '<premis:premis xmlns:premis="http://www.loc.gov/premis/v3" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/premis/v3 https://www.loc.gov/standards/premis/premis.xsd" version="3.0"></premis:premis>'
premis_namespace = "http://www.loc.gov/premis/v3"
xsi_namespace = "http://www.w3.org/2001/XMLSchema-instance"
print premisxml
if os.path.isfile(premisxml):
print 'looks like premis already exists?'
parser = ET.XMLParser(remove_blank_text=True)
doc = ET.parse(premisxml,parser=parser)
premis = doc.getroot()
else:
premis = ET.fromstring(namespace)
doc = ET.ElementTree(premis)
return premisxml, premis_namespace, doc, premis
def representation_uuid_csv(filmographic, source_accession, uuid):
uuid_csv = os.path.expanduser('~/Desktop/uuid.csv')
if not os.path.isfile(uuid_csv):
<|code_end|>
using the current file's imports:
import lxml.etree as ET
import lxml.builder as builder
import uuid
import time
import sys
import subprocess
import os
import hashlib
import csv
from glob import glob
from collections import OrderedDict
from ififuncs import append_csv
from ififuncs import create_csv
and any relevant context from other files:
# Path: ififuncs.py
# def append_csv(csv_file, *args):
# f = open(csv_file, 'a', newline='')
# try:
# writer = csv.writer(f)
# writer.writerow(*args)
# finally:
# f.close()
#
# Path: ififuncs.py
# def create_csv(csv_file, *args):
# f = open(csv_file, 'w', newline='')
# try:
# writer = csv.writer(f)
# writer.writerow(*args)
# finally:
# f.close()
. Output only the next line. | create_csv(uuid_csv, ('reference number','source accession number' 'uuid')) |
Based on the snippet: <|code_start|>#!/usr/bin/env python
'''
Generates sidecar MD5 or SHA512 checksum manifest.
'''
def remove_bad_files(root_dir, log_name_source):
'''
Removes unwanted files.
Verify if this is different than the same function in ififuncs.
'''
rm_these = ['.DS_Store', 'Thumbs.db', 'desktop.ini']
for root, _, files in os.walk(root_dir):
for name in files:
path = os.path.join(root, name)
for i in rm_these:
if name == i:
print(('***********************' + 'removing: ' + path))
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context (classes, functions, sometimes code) from other files:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | generate_log( |
Given the following code snippet before the placeholder: <|code_start|> generate_log(
log_name_source,
'EVENT = Generating manifest: status=started, eventType=message digest calculation, module=%s, agent=OSX' % module
)
elif sys.platform == "linux2":
generate_log(
log_name_source,
'EVENT = Generating manifest: status=started, eventType=message digest calculation, module=%s, agent=Linux' % module
)
ififuncs.generate_log(
log_name_source,
'eventDetail=manifest.py %s' % ififuncs.get_script_version('manifest.py'))
generate_log(log_name_source, 'Source: %s' % source)
if os.path.isfile(source):
print('\nFile checksum is not currently supported, only directories.\n')
generate_log(log_name_source, 'Error: Attempted to generate manifest for file. Only Directories/Folders are currently supported')
generate_log(log_name_source, 'manifest.py exit')
sys.exit()
elif not os.path.isdir(source):
print((' %s is either not a directory or it does not exist' % source))
generate_log(log_name_source, ' %s is either not a directory or it does not exist' % source)
generate_log(log_name_source, 'manifest.py exit')
sys.exit()
remove_bad_files(source, log_name_source)
source_count = 0
for _, _, filenames in os.walk(source):
# There has to be a better way to count the files..
for _ in filenames:
source_count += 1 #works in windows at least
if os.path.isfile(manifest):
<|code_end|>
, predict the next line using imports from the current file:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context including class names, function names, and sometimes code from other files:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | count_in_manifest = manifest_file_count(manifest) |
Using the snippet: <|code_start|> if os.path.isfile(source):
print('\nFile checksum is not currently supported, only directories.\n')
generate_log(log_name_source, 'Error: Attempted to generate manifest for file. Only Directories/Folders are currently supported')
generate_log(log_name_source, 'manifest.py exit')
sys.exit()
elif not os.path.isdir(source):
print((' %s is either not a directory or it does not exist' % source))
generate_log(log_name_source, ' %s is either not a directory or it does not exist' % source)
generate_log(log_name_source, 'manifest.py exit')
sys.exit()
remove_bad_files(source, log_name_source)
source_count = 0
for _, _, filenames in os.walk(source):
# There has to be a better way to count the files..
for _ in filenames:
source_count += 1 #works in windows at least
if os.path.isfile(manifest):
count_in_manifest = manifest_file_count(manifest)
if source_count != count_in_manifest:
print('This manifest may be outdated as the number of files in your directory does not match the number of files in the manifest')
generate_log(log_name_source, 'EVENT = Existing source manifest check - Failure - The number of files in the source directory is not equal to the number of files in the source manifest ')
sys.exit()
if not os.path.isfile(manifest):
try:
print('Generating source manifest')
generate_log(log_name_source, 'EVENT = Generating source manifest')
if args.f:
if args.sha512:
ififuncs.sha512_manifest(source, manifest, source)
else:
<|code_end|>
, determine the next line of code. You have imports:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context (class names, function names, or code) available:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | hashlib_manifest(source, manifest, source) |
Using the snippet: <|code_start|> action='store_true',
help='Generates sha512 checksums instead of md5'
)
args = parser.parse_args(args_)
source = args.source
source_parent_dir = os.path.dirname(source)
normpath = os.path.normpath(source)
relative_path = normpath.split(os.sep)[-1]
log_name_source_ = os.path.basename(
args.source
) + time.strftime("_%Y_%m_%dT%H_%M_%S")
if args.s:
if args.sha512:
manifest = source_parent_dir + '/%s_manifest-sha512.txt' % relative_path
else:
manifest = source_parent_dir + '/%s_manifest.md5' % relative_path
log_name_source = source_parent_dir + '/%s.log' % log_name_source_
elif args.f:
if args.sha512:
manifest = source_parent_dir + '/%s_manifest-sha512.txt' % relative_path
else:
manifest = source + '/%s_manifest.md5' % relative_path
log_name_source = source_parent_dir + '/%s.log' % log_name_source_
else:
if args.sha512:
manifest_ = manifest_ = '/%s_manifest-sha512.txt' % relative_path
else:
manifest_ = '/%s_manifest.md5' % relative_path
desktop_manifest_dir = make_desktop_manifest_dir()
manifest = "%s/%s" % (desktop_manifest_dir, manifest_)
<|code_end|>
, determine the next line of code. You have imports:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context (class names, function names, or code) available:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | desktop_logs_dir = make_desktop_logs_dir() |
Given the following code snippet before the placeholder: <|code_start|> parser.add_argument(
'-sha512',
action='store_true',
help='Generates sha512 checksums instead of md5'
)
args = parser.parse_args(args_)
source = args.source
source_parent_dir = os.path.dirname(source)
normpath = os.path.normpath(source)
relative_path = normpath.split(os.sep)[-1]
log_name_source_ = os.path.basename(
args.source
) + time.strftime("_%Y_%m_%dT%H_%M_%S")
if args.s:
if args.sha512:
manifest = source_parent_dir + '/%s_manifest-sha512.txt' % relative_path
else:
manifest = source_parent_dir + '/%s_manifest.md5' % relative_path
log_name_source = source_parent_dir + '/%s.log' % log_name_source_
elif args.f:
if args.sha512:
manifest = source_parent_dir + '/%s_manifest-sha512.txt' % relative_path
else:
manifest = source + '/%s_manifest.md5' % relative_path
log_name_source = source_parent_dir + '/%s.log' % log_name_source_
else:
if args.sha512:
manifest_ = manifest_ = '/%s_manifest-sha512.txt' % relative_path
else:
manifest_ = '/%s_manifest.md5' % relative_path
<|code_end|>
, predict the next line using imports from the current file:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context including class names, function names, and sometimes code from other files:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | desktop_manifest_dir = make_desktop_manifest_dir() |
Continue the code snippet: <|code_start|> logs_dir = os.path.join(sip_dir, 'logs')
logfile = os.path.join(logs_dir, logname)
if os.path.isfile(logfile):
with open(log, 'r') as fo:
validate_log = fo.readlines()
with open(logfile, 'a') as ba:
for lines in validate_log:
ba.write(lines)
for possible_manifest in possible_manifests:
if os.path.isfile(possible_manifest):
with open(possible_manifest, 'r') as manifesto:
manifest_lines = manifesto.readlines()
for lines in manifest_lines:
if logname in lines:
if 'manifest-sha512.txt' in possible_manifest:
lines = lines[:127].replace(lines[:127], ififuncs.hashlib_sha512(logfile)) + lines[128:]
elif '_manifest.md5' in possible_manifest:
lines = lines[:31].replace(lines[:31], ififuncs.hashlib_md5(logfile)) + lines[32:]
updated_manifest.append(lines)
with open(possible_manifest, 'w') as fo:
for lines in updated_manifest:
fo.write(lines)
updated_manifest = []
def main(args_):
'''
Launches all other functions when called from the command line.
'''
args = make_parser(args_)
<|code_end|>
. Use current file imports:
import sys
import os
import argparse
import time
import unicodedata
import ififuncs
from ififuncs import make_desktop_logs_dir
and context (classes, functions, or code) from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
. Output only the next line. | desktop_logs_dir = make_desktop_logs_dir() |
Using the snippet: <|code_start|>#!/usr/bin/env python
'''
This script will ask mediainfo to get all durations with a folder
'''
def main():
'''
Recursively search for AV files and print duration in seconds
'''
all_files = sys.argv[1:]
duration = 0
for parent_directory in all_files:
for root, dirnames, filenames in os.walk(parent_directory):
for filename in filenames:
if filename.endswith(('.MP4', '.mov', '.mkv')):
<|code_end|>
, determine the next line of code. You have imports:
import os
import sys
from ififuncs import get_milliseconds
and context (class names, function names, or code) available:
# Path: ififuncs.py
# def get_milliseconds(filename):
# '''
# Returns a float with the duration of a file in milliseconds.
# '''
# milliseconds = get_mediainfo(
# 'miliseconds',
# '--inform=General;%Duration%',
# filename
# )
# return float(milliseconds)
. Output only the next line. | milliseconds = get_milliseconds( |
Continue the code snippet: <|code_start|> # make sure that the alternate log filename is more recent
if int(
os.path.basename(logs)[-12:-4].replace('_', '')) > int(os.path.basename(i)[-12:-4].replace('_', '')):
print(' - trying to analyze %s' % logs)
print(" - %-*s : %s" % (50, os.path.basename(logs)[:-24], analyze_log(os.path.join(desktop_logs_dir, logs))))
def main():
'''
Launches the other functions wihch attempt to run multiple copyit.py
instances if manifests and matching sidecar directories are found
inside of the input directory.
'''
args = parse_args()
all_files = find_manifest(args)
processed_dirs = []
log_names = []
print('\n\n - **** All of these folders will be copied to %s\n' % args.o)
for i in all_files:
absolute_path = os.path.join(args.o, os.path.basename(i))
if os.path.isdir(absolute_path):
print(' - %s already exists, skipping' % absolute_path)
else:
print(' - %s will be copied' % i)
time.sleep(2)
for i in all_files:
absolute_path = os.path.join(args.o, os.path.basename(i))
if os.path.isdir(absolute_path):
print(' - %s already exists, skipping' % absolute_path)
else:
<|code_end|>
. Use current file imports:
import os
import argparse
import time
import copyit
from ififuncs import make_desktop_logs_dir
and context (classes, functions, or code) from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
. Output only the next line. | desktop_logs_dir = make_desktop_logs_dir() |
Next line prediction: <|code_start|> output = filename + "_h264.mov"
ffmpeg_args = [
'ffmpeg',
'-i', filename,
]
if args.logo:
ffmpeg_args.extend(['-i', args.logo])
ffmpeg_args += [
'-c:a', 'aac',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-crf', crf_value
]
if args.wide:
ffmpeg_args.append('-aspect')
ffmpeg_args.append('16:9')
if not args.map:
ffmpeg_args.append('-map')
ffmpeg_args.append('0:a?')
ffmpeg_args.append('-map')
ffmpeg_args.append('0:v')
if len(filter_list) > 0:
for _filter in filter_list:
ffmpeg_args.append(_filter)
ffmpeg_args.append(output)
print(ffmpeg_args)
subprocess.call(ffmpeg_args)
if args.md5:
manifest = '%s_manifest.md5' % filename
print('Generating md5 sidecar...')
<|code_end|>
. Use current file imports:
(import argparse
import subprocess
import sys
import os
import ififuncs
from ififuncs import hashlib_md5)
and context including class names, function names, or small code snippets from other files:
# Path: ififuncs.py
# def hashlib_md5(filename):
# '''
# uses hashlib to return an MD5 checksum of an input filename
# '''
# read_size = 0
# last_percent_done = 0
# m = hashlib.md5()
# total_size = os.path.getsize(filename)
# with open(str(filename), 'rb') as f:
# while True:
# buf = f.read(2**20)
# if not buf:
# break
# read_size += len(buf)
# m.update(buf)
# percent_done = 100 * read_size / total_size
# if percent_done > last_percent_done:
# sys.stdout.write('[%d%%]\r' % percent_done)
# sys.stdout.flush()
# last_percent_done = percent_done
# md5_output = m.hexdigest()
# return md5_output
. Output only the next line. | h264_md5 = hashlib_md5(filename) |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
'''
This script will create a new UUID
via ififuncs.create_uuid and print to terminal
'''
def main():
'''
Prints a new UUID to the terminal
'''
<|code_end|>
. Use current file imports:
from ififuncs import create_uuid
and context (classes, functions, or code) from other files:
# Path: ififuncs.py
# def create_uuid():
# '''
# Returns a randonly generated UUID as a string
# '''
# new_uuid = str(uuid.uuid4())
# return new_uuid
. Output only the next line. | new_uuid = create_uuid() |
Next line prediction: <|code_start|> if dirname == '':
rootpos = 'y'
'''
dirname = raw_input(
'What do you want your destination folder to be called?\n'
)
'''
relative_path = normpath.split(os.sep)[-1]
# or hardcode
destination_final_path = os.path.join(destination, dirname)
if rootpos == 'y':
manifest_destination = os.path.dirname(destination) + '/%s_manifest.md5' % os.path.basename(destination)
else:
manifest_destination = destination + '/%s_manifest.md5' % dirname
if os.path.isfile(manifest_destination):
print('Destination manifest already exists')
if rootpos == 'y':
manifest_filename = '%s_manifest.md5' % os.path.basename(destination)
else:
manifest_filename = '%s_manifest.md5' % dirname
desktop_manifest_dir = make_desktop_manifest_dir()
# manifest = desktop manifest, looks like this can get rewritten later.
manifest = os.path.join(
desktop_manifest_dir, manifest_filename
)
manifest_sidecar = os.path.join(
os.path.dirname(source), relative_path + '_manifest.md5'
)
manifest_root = source + '/%s_manifest.md5' % os.path.basename(source)
log_name_filename = dirname + time.strftime("_%Y_%m_%dT%H_%M_%S")
<|code_end|>
. Use current file imports:
(import sys
import subprocess
import os
import filecmp
import tempfile
import time
import argparse
import hashlib
import shutil
import unicodedata
import ififuncs
from builtins import input
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir, generate_log)
and context including class names, function names, or small code snippets from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
#
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
. Output only the next line. | desktop_logs_dir = make_desktop_logs_dir() |
Based on the snippet: <|code_start|> if os.path.isdir(dircheck):
source = check_for_sip(args.source)
destination = os.path.join(args.destination, os.path.basename(args.source))
os.makedirs(destination)
else:
source = os.path.abspath(args.source)
destination = args.destination
normpath = os.path.normpath(source)
#is there any benefit to this over os.path.basename
dirname = os.path.split(os.path.basename(source))[1]
if dirname == '':
rootpos = 'y'
'''
dirname = raw_input(
'What do you want your destination folder to be called?\n'
)
'''
relative_path = normpath.split(os.sep)[-1]
# or hardcode
destination_final_path = os.path.join(destination, dirname)
if rootpos == 'y':
manifest_destination = os.path.dirname(destination) + '/%s_manifest.md5' % os.path.basename(destination)
else:
manifest_destination = destination + '/%s_manifest.md5' % dirname
if os.path.isfile(manifest_destination):
print('Destination manifest already exists')
if rootpos == 'y':
manifest_filename = '%s_manifest.md5' % os.path.basename(destination)
else:
manifest_filename = '%s_manifest.md5' % dirname
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import subprocess
import os
import filecmp
import tempfile
import time
import argparse
import hashlib
import shutil
import unicodedata
import ififuncs
from builtins import input
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir, generate_log
and context (classes, functions, sometimes code) from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
#
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
. Output only the next line. | desktop_manifest_dir = make_desktop_manifest_dir() |
Predict the next line after this snippet: <|code_start|> last_percent_done = 0
md5_object = hashlib.md5()
total_size = os.path.getsize(filename)
with open(str(filename), 'rb') as file_object:
while True:
buf = file_object.read(2**20)
if not buf:
break
read_size += len(buf)
md5_object.update(buf)
percent_done = 100 * read_size / total_size
if percent_done > last_percent_done:
sys.stdout.write('[%d%%]\r' % percent_done)
sys.stdout.flush()
last_percent_done = percent_done
md5_output = md5_object.hexdigest()
return md5_output + ' ' + os.path.abspath(filename) + '\n'
def test_write_capabilities(directory, log_name_source):
'''
Checks if drives have write access.
Also checks if source is a file or directory (no file support right now)
'''
if os.path.isdir(directory):
temp = tempfile.mkstemp(dir=directory, suffix='.tmp')
os.close(temp[0]) # Needed for windows.
os.remove(temp[1])
elif os.path.isfile(directory):
print('\nFile transfer is not currently supported, only directories.\n')
<|code_end|>
using the current file's imports:
import sys
import subprocess
import os
import filecmp
import tempfile
import time
import argparse
import hashlib
import shutil
import unicodedata
import ififuncs
from builtins import input
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir, generate_log
and any relevant context from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
#
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
. Output only the next line. | generate_log( |
Given the following code snippet before the placeholder: <|code_start|> objects_dir = os.path.join(sip_path, 'objects')
uuid = os.path.basename(sip_path)
old_basename, ext = os.path.splitext(item)
new_path = os.path.join(objects_dir, uuid + ext)
os.rename(os.path.join(objects_dir, os.path.basename(item)), new_path)
manifest = os.path.join(os.path.dirname(new_path), os.path.basename(item)) + '_manifest.md5'
updated_lines = []
ififuncs.generate_log(
log_name,
'EVENT = Filename change - eventDetail=original filename replaced with uuid, eventOutcomeDetailNote=%s replaced with %s, agentName=%s, agentName=sipcreator.py))' % (os.path.basename(item), uuid + ext, user))
with open(manifest, 'r') as file_object:
checksums = file_object.readlines()
for line in checksums:
if os.path.basename(item) in line:
line = line.replace(os.path.basename(item), os.path.basename(new_path))
updated_lines.append(line)
with open(manifest, 'w') as fo:
for lines in updated_lines:
fo.write(lines)
consolidate_logs(log_names, sip_path)
return log_names
def log_report(log_names):
'''
Analyzes all the moveit.py logs on the desktop and print a report.
'''
desktop_logs_dir = ififuncs.make_desktop_logs_dir()
for i in log_names:
if os.path.isfile(i):
<|code_end|>
, predict the next line using imports from the current file:
import os
import argparse
import sys
import shutil
import datetime
import time
import copyit
import ififuncs
import package_update
import accession
import manifest
import makezip
import accession
import dicttoxml
import clairmeta
from masscopy import analyze_log
from clairmeta.utils.xml import prettyprint_xml
from clairmeta import DCP
and context including class names, function names, and sometimes code from other files:
# Path: masscopy.py
# def analyze_log(logfile):
# '''
# Analyzes logfiles on the desktop and summarises the outcome.
# '''
# outcome = ''
# with open(logfile, 'r') as fo:
# log_lines = fo.readlines()
# for line in log_lines:
# if 'EVENT = File Transfer Judgement - Success' in line:
# outcome = 'success'
# if 'EVENT = File Transfer Outcome - Failure' in line:
# outcome = 'failure'
# if 'EVENT = Existing source manifest check - Failure' in line:
# outcome = 'failure - might be outdated manifests in use'
# return outcome
. Output only the next line. | print(("%-*s : copyit job was a %s" % (50, os.path.basename(i)[:-24], analyze_log(i)))) |
Given the following code snippet before the placeholder: <|code_start|> print('Exiting as you selected -dryrun')
sys.exit()
logs = []
if args.y:
proceed = 'Y'
else:
proceed = ififuncs.ask_yes_no(
'Do you want to proceed?'
)
if proceed == 'Y':
for sips in sorted(oe_dict):
print(oe_dict[sips])
sipcreator_cmd = ['-i',]
for sipcreator_inputs in oe_dict[sips][0]:
sipcreator_cmd.append(sipcreator_inputs)
sipcreator_cmd += ['-supplement']
for sipcreator_supplements in oe_dict[sips][1]:
sipcreator_cmd.append(sipcreator_supplements)
sipcreator_cmd += ['-user', user, '-oe', sips, '-o', args.o]
if args.rename_uuid:
sipcreator_cmd.append('-rename_uuid')
if args.zip:
sipcreator_cmd.append('-zip')
if args.l:
sipcreator_cmd.append('-l')
print(sipcreator_cmd)
sipcreator_log, _ = sipcreator.main(sipcreator_cmd)
logs.append(sipcreator_log)
for i in logs:
if os.path.isfile(i):
<|code_end|>
, predict the next line using imports from the current file:
import argparse
import os
import sys
import ififuncs
import sipcreator
from masscopy import analyze_log
and context including class names, function names, and sometimes code from other files:
# Path: masscopy.py
# def analyze_log(logfile):
# '''
# Analyzes logfiles on the desktop and summarises the outcome.
# '''
# outcome = ''
# with open(logfile, 'r') as fo:
# log_lines = fo.readlines()
# for line in log_lines:
# if 'EVENT = File Transfer Judgement - Success' in line:
# outcome = 'success'
# if 'EVENT = File Transfer Outcome - Failure' in line:
# outcome = 'failure'
# if 'EVENT = Existing source manifest check - Failure' in line:
# outcome = 'failure - might be outdated manifests in use'
# return outcome
. Output only the next line. | print(("%-*s : copyit job was a %s" % (50, os.path.basename(i), analyze_log(i)))) |
Continue the code snippet: <|code_start|> 'Accepts a parent folder as input and will generate manifest for each subfolder.'
' Designed for a specific IFI Irish Film Archive workflow. '
'Written by Kieran O\'Leary.'
)
parser.add_argument(
'input', help='file path of parent directory'
)
parser.add_argument(
'-v', action='store_true',
help='verbose mode - some extra information such as overall file count.'
)
return parser
def create_manifest(source):
'''
Generates a master log and creates checksum manifests for all subdirectories.
'''
master_log = os.path.expanduser('~/Desktop/batchfixity_errors.log')
os.chdir(source)
for dirname in os.walk('.').next()[1]:
full_path = os.path.join(source, dirname)
manifest_textfile = '%s/%s_manifest.md5' % (full_path, dirname)
if not os.path.isfile(manifest_textfile):
log_name = '%s/%s_fixity.log' % (
os.path.dirname(full_path), dirname
)
generate_log(log_name, 'batchfixity started')
generate_log(log_name, '%s created' % manifest_textfile)
try:
<|code_end|>
. Use current file imports:
import argparse
import os
import shutil
from ififuncs import hashlib_manifest
from ififuncs import generate_log
and context (classes, functions, or code) from other files:
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
. Output only the next line. | hashlib_manifest(full_path, manifest_textfile, full_path) |
Given snippet: <|code_start|> '''
parser = argparse.ArgumentParser(
description='Batch MD5 checksum generator.'
'Accepts a parent folder as input and will generate manifest for each subfolder.'
' Designed for a specific IFI Irish Film Archive workflow. '
'Written by Kieran O\'Leary.'
)
parser.add_argument(
'input', help='file path of parent directory'
)
parser.add_argument(
'-v', action='store_true',
help='verbose mode - some extra information such as overall file count.'
)
return parser
def create_manifest(source):
'''
Generates a master log and creates checksum manifests for all subdirectories.
'''
master_log = os.path.expanduser('~/Desktop/batchfixity_errors.log')
os.chdir(source)
for dirname in os.walk('.').next()[1]:
full_path = os.path.join(source, dirname)
manifest_textfile = '%s/%s_manifest.md5' % (full_path, dirname)
if not os.path.isfile(manifest_textfile):
log_name = '%s/%s_fixity.log' % (
os.path.dirname(full_path), dirname
)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import argparse
import os
import shutil
from ififuncs import hashlib_manifest
from ififuncs import generate_log
and context:
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
which might include code, classes, or functions. Output only the next line. | generate_log(log_name, 'batchfixity started') |
Given snippet: <|code_start|> codes = attr.ib(default=attr.Factory(list))
@attr.s
class Data(object):
soc_id = attr.ib()
sub_case = attr.ib()
year = attr.ib()
var_id = attr.ib()
code = attr.ib()
comment = attr.ib()
references = attr.ib(convert=semicolon_split)
source_coded_data = attr.ib()
admin_comment = attr.ib()
@attr.s
class ObjectWithSource(object):
id = attr.ib()
name = attr.ib()
year = attr.ib()
author = attr.ib()
reference = attr.ib()
base_dir = attr.ib()
@property
def dir(self):
return self.base_dir.joinpath(self.id)
def as_source(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import re
import django
import attr
from itertools import groupby
from time import time
from functools import partial
from django.db import transaction
from clldutils.dsv import reader
from clldutils.text import split_text
from clldutils.path import Path
from clldutils import jsonlib
from dplace_app.models import Source
from loader.util import configure_logging, load_regions
from loader.society import society_locations, load_societies, load_society_relations
from loader.phylogenies import load_phylogenies
from loader.variables import load_vars
from loader.values import load_data
from loader.sources import load_references
from loader.glottocode import load_languages
and context:
# Path: dplace_app/models.py
# class Source(models.Model):
# """
# Stores references for Value, also for dataset sources.
# """
# # Not really sure if we should separate dataset sources from references (I
# # think we should), but since all the code has already been written with
# # this model, I won't change it yet.
#
# # text, because might be '1996', '1999-2001', or 'ND'
# year = models.CharField(max_length=30, db_index=True)
# author = models.TextField(db_index=True)
# reference = models.TextField()
# name = models.CharField(max_length=100, db_index=True, default="")
#
# def __unicode__(self):
# return "%s (%s)" % (self.author, self.year)
#
# class Meta(object):
# unique_together = ('year', 'author')
# ordering = ('name', )
which might include code, classes, or functions. Output only the next line. | return Source.objects.create( |
Predict the next line for this snippet: <|code_start|># coding: utf8
from __future__ import unicode_literals
class Test(APITestCase):
"""Tests rest-framework API"""
def _fixture_teardown(self):
try:
APITestCase._fixture_teardown(self)
except:
pass
def get_json(self, urlname, *args, **kw):
kw.setdefault('format', 'json')
reverse_args = kw.pop('reverse_args', [])
response = self.client.get(reverse(urlname, args=reverse_args), *args, **kw)
self.assertEqual(response.status_code, status.HTTP_200_OK)
try: # csv download doesn't return json
return json.loads(response.content)
except:
return response.content
def obj_in_results(self, obj, response):
return getattr(obj, 'id', obj) in [x['id'] for x in response['results']]
def setUp(self):
sources._SOURCE_CACHE = {}
<|code_end|>
with the help of current file imports:
import json
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from clldutils.path import Path
from dplace_app.models import *
from dplace_app.load import load
from dplace_app.loader import sources
and context from other files:
# Path: dplace_app/load.py
# def load(repos, test=True):
# configure_logging(test=test)
# repos = Repos(repos)
#
# for func in [
# load_societies,
# load_society_relations,
# load_regions,
# society_locations,
# load_vars,
# load_languages,
# load_references,
# load_data,
# load_phylogenies,
# ]:
# with transaction.atomic():
# if not test:
# print("%s..." % func.__name__) # pragma: no cover
# start = time()
# res = func(repos)
# if not test: # pragma: no cover
# print("{0} loaded in {1:.2f} secs".format(res, time() - start))
#
# Path: dplace_app/loader/sources.py
# _SOURCE_CACHE = {}
# def get_source(ds):
# def load_references(repos):
, which may contain function names, class names, or code. Output only the next line. | load(Path(__file__).parent.joinpath('data')) |
Using the snippet: <|code_start|># coding: utf8
from __future__ import unicode_literals
class Test(APITestCase):
"""Tests rest-framework API"""
def _fixture_teardown(self):
try:
APITestCase._fixture_teardown(self)
except:
pass
def get_json(self, urlname, *args, **kw):
kw.setdefault('format', 'json')
reverse_args = kw.pop('reverse_args', [])
response = self.client.get(reverse(urlname, args=reverse_args), *args, **kw)
self.assertEqual(response.status_code, status.HTTP_200_OK)
try: # csv download doesn't return json
return json.loads(response.content)
except:
return response.content
def obj_in_results(self, obj, response):
return getattr(obj, 'id', obj) in [x['id'] for x in response['results']]
def setUp(self):
<|code_end|>
, determine the next line of code. You have imports:
import json
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from clldutils.path import Path
from dplace_app.models import *
from dplace_app.load import load
from dplace_app.loader import sources
and context (class names, function names, or code) available:
# Path: dplace_app/load.py
# def load(repos, test=True):
# configure_logging(test=test)
# repos = Repos(repos)
#
# for func in [
# load_societies,
# load_society_relations,
# load_regions,
# society_locations,
# load_vars,
# load_languages,
# load_references,
# load_data,
# load_phylogenies,
# ]:
# with transaction.atomic():
# if not test:
# print("%s..." % func.__name__) # pragma: no cover
# start = time()
# res = func(repos)
# if not test: # pragma: no cover
# print("{0} loaded in {1:.2f} secs".format(res, time() - start))
#
# Path: dplace_app/loader/sources.py
# _SOURCE_CACHE = {}
# def get_source(ds):
# def load_references(repos):
. Output only the next line. | sources._SOURCE_CACHE = {} |
Predict the next line after this snippet: <|code_start|>
log = logging.getLogger('profile')
class VariableViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.VariableSerializer
filter_fields = ('label', 'name', 'index_categories', 'niche_categories', 'source')
<|code_end|>
using the current file's imports:
import json
import re
import datetime
import logging
from itertools import groupby
from django.db import connection
from django.db.models import Prefetch, Q, Count
from django.shortcuts import get_object_or_404
from django.http import Http404
from rest_framework import viewsets
from rest_framework.pagination import PageNumberPagination
from rest_framework.decorators import api_view, permission_classes, renderer_classes
from rest_framework.permissions import AllowAny
from rest_framework.views import Response
from rest_framework.renderers import JSONRenderer
from dplace_app.renderers import DPLACECSVRenderer
from dplace_app import serializers
from dplace_app import models
from dplace_app.tree import update_newick
from time import time
from time import time
from django.db import connection
and any relevant context from other files:
# Path: dplace_app/serializers.py
# SEARCH_LANGUAGE = 'l'
# SEARCH_ENVIRONMENTAL = 'e'
# SEARCH_VARIABLES = 'v'
# SEARCH_GEOGRAPHIC = 'g'
# class SourceSerializer(serializers.ModelSerializer):
# class Meta(object):
# class CodeDescriptionSerializer(serializers.ModelSerializer):
# class Meta(object):
# class VariableSerializer(serializers.ModelSerializer):
# class Meta(object):
# class CategorySerializer(serializers.ModelSerializer):
# class Meta(object):
# class VariableDetailSerializer(serializers.ModelSerializer):
# class Meta(object):
# class CategoryDetailSerializer(serializers.ModelSerializer):
# class Meta(object):
# class ValueSerializer(serializers.ModelSerializer):
# class Meta(object):
# class LanguageFamilySerializer(serializers.ModelSerializer):
# class Meta(object):
# class LanguageSerializer(serializers.ModelSerializer):
# class Meta(object):
# class LanguageSerializerWithSocieties(serializers.ModelSerializer):
# class Meta(object):
# class SocietySerializer(serializers.ModelSerializer):
# class Meta(object):
# class GeographicRegionSerializer(serializers.ModelSerializer):
# class Meta(object):
# class SocietyWithRegionSerializer(SocietySerializer):
# class TreeSocietySerializer(serializers.ModelSerializer):
# class Meta(object):
# class LanguageTreeLabelsSequenceSerializer(serializers.HyperlinkedModelSerializer):
# class Meta:
# class LanguageTreeLabelsSerializer(serializers.ModelSerializer):
# class Meta(object):
# class LanguageTreeSerializer(serializers.ModelSerializer):
# class Meta(object):
# class SocietyResult(object):
# class VariableCode(object):
# class SocietyResultSet(object):
# class VariableCodeSerializer(serializers.Serializer):
# class SocietyResultSerializer(serializers.Serializer):
# class SocietyResultSetSerializer(serializers.Serializer):
# class Legend(object):
# class LegendSerializer(serializers.Serializer):
# def __init__(self):
# def __init__(self, society):
# def __eq__(self, other):
# def __init__(self, codes, variable):
# def __eq__(self, other):
# def __init__(self):
# def __init__(self, name, svg):
#
# Path: dplace_app/models.py
# class Society(models.Model):
# class Meta(object):
# class SocietyRelation(models.Model):
# class Category(models.Model):
# class Meta(object):
# class Variable(models.Model):
# class Meta(object):
# class CodeDescription(models.Model):
# class Meta(object):
# class Value(models.Model):
# class Meta(object):
# class Source(models.Model):
# class Meta(object):
# class LanguageFamily(models.Model):
# class Meta(object):
# class Language(models.Model):
# class Meta(object):
# class GeographicRegion(models.Model):
# class Meta(object):
# class LanguageTree(models.Model):
# class Meta(object):
# class LanguageTreeLabels(models.Model):
# class Meta:
# class LanguageTreeLabelsSequence(models.Model):
# class Meta:
# def related(self):
# def location(self):
# def original_location(self):
# def get_environmental_data(self):
# def get_cultural_trait_data(self):
# def get_data_references(self):
# def __unicode__(self):
# def get_absolute_url(self):
# def __unicode__(self):
# def coded_societies(self):
# def __unicode__(self):
# def save(self, *args, **kwargs):
# def read_code_number(self):
# def coded_societies(self):
# def __unicode__(self):
# def get_description(self):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# def get_absolute_url(self):
# def __unicode__(self):
. Output only the next line. | queryset = models.Variable.objects\ |
Continue the code snippet: <|code_start|>
class EATestCase(TestCase):
"""
Tests basic functionality of Ethnographic Atlas variable codings in model
"""
@classmethod
def setUpTestData(cls):
<|code_end|>
. Use current file imports:
from django.test import TestCase
from dplace_app import models
and context (classes, functions, or code) from other files:
# Path: dplace_app/models.py
# class Society(models.Model):
# class Meta(object):
# class SocietyRelation(models.Model):
# class Category(models.Model):
# class Meta(object):
# class Variable(models.Model):
# class Meta(object):
# class CodeDescription(models.Model):
# class Meta(object):
# class Value(models.Model):
# class Meta(object):
# class Source(models.Model):
# class Meta(object):
# class LanguageFamily(models.Model):
# class Meta(object):
# class Language(models.Model):
# class Meta(object):
# class GeographicRegion(models.Model):
# class Meta(object):
# class LanguageTree(models.Model):
# class Meta(object):
# class LanguageTreeLabels(models.Model):
# class Meta:
# class LanguageTreeLabelsSequence(models.Model):
# class Meta:
# def related(self):
# def location(self):
# def original_location(self):
# def get_environmental_data(self):
# def get_cultural_trait_data(self):
# def get_data_references(self):
# def __unicode__(self):
# def get_absolute_url(self):
# def __unicode__(self):
# def coded_societies(self):
# def __unicode__(self):
# def save(self, *args, **kwargs):
# def read_code_number(self):
# def coded_societies(self):
# def __unicode__(self):
# def get_description(self):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# def get_absolute_url(self):
# def __unicode__(self):
. Output only the next line. | cls.source = models.Source.objects.create( |
Given snippet: <|code_start|>
# constants for SocietyResult
SEARCH_LANGUAGE = 'l'
SEARCH_ENVIRONMENTAL = 'e'
SEARCH_VARIABLES = 'v'
SEARCH_GEOGRAPHIC = 'g'
class SourceSerializer(serializers.ModelSerializer):
class Meta(object):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from rest_framework import serializers
from dplace_app import models
and context:
# Path: dplace_app/models.py
# class Society(models.Model):
# class Meta(object):
# class SocietyRelation(models.Model):
# class Category(models.Model):
# class Meta(object):
# class Variable(models.Model):
# class Meta(object):
# class CodeDescription(models.Model):
# class Meta(object):
# class Value(models.Model):
# class Meta(object):
# class Source(models.Model):
# class Meta(object):
# class LanguageFamily(models.Model):
# class Meta(object):
# class Language(models.Model):
# class Meta(object):
# class GeographicRegion(models.Model):
# class Meta(object):
# class LanguageTree(models.Model):
# class Meta(object):
# class LanguageTreeLabels(models.Model):
# class Meta:
# class LanguageTreeLabelsSequence(models.Model):
# class Meta:
# def related(self):
# def location(self):
# def original_location(self):
# def get_environmental_data(self):
# def get_cultural_trait_data(self):
# def get_data_references(self):
# def __unicode__(self):
# def get_absolute_url(self):
# def __unicode__(self):
# def coded_societies(self):
# def __unicode__(self):
# def save(self, *args, **kwargs):
# def read_code_number(self):
# def coded_societies(self):
# def __unicode__(self):
# def get_description(self):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# def get_absolute_url(self):
# def __unicode__(self):
which might include code, classes, or functions. Output only the next line. | model = models.Source |
Continue the code snippet: <|code_start|># coding: utf8
from __future__ import unicode_literals
def society_locations(repos):
societies = {s.ext_id: s for s in Society.objects.all()}
<|code_end|>
. Use current file imports:
import logging
from dplace_app.models import Society, GeographicRegion, SocietyRelation
from sources import get_source
and context (classes, functions, or code) from other files:
# Path: dplace_app/models.py
# class Society(models.Model):
# ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
# xd_id = models.CharField(
# 'Cross ID', db_index=True, default=None, null=True, max_length=10
# )
# name = models.CharField('Name', db_index=True, max_length=200)
# latitude = models.FloatField('Latitude', null=True)
# longitude = models.FloatField('Longitude', null=True)
# focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
# alternate_names = models.TextField(default="")
# original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
# original_latitude = models.FloatField('ORIG_latitude', null=True)
# original_longitude = models.FloatField('ORIG_longitude', null=True)
#
# region = models.ForeignKey('GeographicRegion', null=True)
# source = models.ForeignKey('Source', null=True, related_name="societies")
# language = models.ForeignKey('Language', null=True, related_name="societies")
#
# hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
# chirila_link = models.CharField('CHIRILA', default=None, null=True, max_length=200)
# relationships = models.ManyToManyField(
# 'self', through='SocietyRelation', symmetrical=False)
#
# @property
# def related(self):
# return list(self.relationships.all())
#
# @property
# def location(self):
# return dict(coordinates=[self.longitude, self.latitude])
#
# @property
# def original_location(self):
# return dict(coordinates=[self.original_longitude, self.original_latitude])
#
# def get_environmental_data(self):
# """Returns environmental data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('variable').order_by('variable__name').all():
# if value.variable.type == 'environmental':
# categories = value.variable.index_categories.all()
# valueDict[str(categories[0])].append({
# 'name': value.variable.name,
# 'value': '{0}'.format(value),
# 'units': value.variable.units,
# 'comment': value.comment
# })
# return valueDict
#
# def get_cultural_trait_data(self):
# """Returns the data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('code').\
# select_related('variable').order_by('variable__label').all():
# if value.variable.type == 'cultural':
# categories = value.variable.index_categories.all()
# for c in categories:
# valueDict[str(c)].append({
# 'id': value.id,
# 'label': value.variable.label,
# 'name': value.variable.name,
# 'code': value.coded_value,
# 'description': value.get_description(),
# 'year': value.focal_year,
# 'comment': value.comment,
# 'sources': value.references.all(),
# })
# return valueDict
#
# def get_data_references(self):
# """Returns the references for the cultural trait data"""
# refs = []
# qset = self.value_set
# for value in qset.all():
# for r in value.references.all():
# if r not in refs:
# refs.append(r)
# return sorted(refs, key=lambda r: r.author)
#
# def __unicode__(self):
# return "%s - %s" % (self.ext_id, self.name)
#
# def get_absolute_url(self):
# return reverse("view_society", args=[self.ext_id])
#
# class Meta(object):
# verbose_name_plural = "Societies"
# ordering = ('name', )
#
# class GeographicRegion(models.Model):
# level_2_re = models.FloatField()
# count = models.FloatField()
# region_nam = models.CharField(max_length=254, db_index=True)
# continent = models.CharField(max_length=254, db_index=True)
# tdwg_code = models.IntegerField()
#
# def __unicode__(self):
# return "Region: %s, Continent %s" % (self.region_nam, self.continent)
#
# class Meta(object):
# ordering = ('region_nam', )
#
# class SocietyRelation(models.Model):
# from_society = models.ForeignKey(Society, related_name='from_societies')
# to_society = models.ForeignKey(Society, related_name='to_societies')
# type = models.CharField(max_length=100, default='similar')
. Output only the next line. | regions = {r.region_nam: r for r in GeographicRegion.objects.all()} |
Given snippet: <|code_start|>
def society_locations(repos):
societies = {s.ext_id: s for s in Society.objects.all()}
regions = {r.region_nam: r for r in GeographicRegion.objects.all()}
count = 0
for soc_id, spec in repos.read_json('geo', 'societies_tdwg.json').items():
society = societies.get(soc_id)
if not society:
logging.warn("No matching society found for %s" % soc_id)
continue
region = regions.get(spec['name'])
if not region:
logging.warn("No matching region found for %s" % spec['name'])
else:
society.region = region
society.save()
count += 1
return count
def load_society_relations(repos):
count = 0
societies = {s.ext_id: s for s in Society.objects.all()}
for ds in repos.datasets:
for item in ds.society_relations:
if item.id in societies:
for rel in item.related:
if rel.id in societies:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
from dplace_app.models import Society, GeographicRegion, SocietyRelation
from sources import get_source
and context:
# Path: dplace_app/models.py
# class Society(models.Model):
# ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
# xd_id = models.CharField(
# 'Cross ID', db_index=True, default=None, null=True, max_length=10
# )
# name = models.CharField('Name', db_index=True, max_length=200)
# latitude = models.FloatField('Latitude', null=True)
# longitude = models.FloatField('Longitude', null=True)
# focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
# alternate_names = models.TextField(default="")
# original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
# original_latitude = models.FloatField('ORIG_latitude', null=True)
# original_longitude = models.FloatField('ORIG_longitude', null=True)
#
# region = models.ForeignKey('GeographicRegion', null=True)
# source = models.ForeignKey('Source', null=True, related_name="societies")
# language = models.ForeignKey('Language', null=True, related_name="societies")
#
# hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
# chirila_link = models.CharField('CHIRILA', default=None, null=True, max_length=200)
# relationships = models.ManyToManyField(
# 'self', through='SocietyRelation', symmetrical=False)
#
# @property
# def related(self):
# return list(self.relationships.all())
#
# @property
# def location(self):
# return dict(coordinates=[self.longitude, self.latitude])
#
# @property
# def original_location(self):
# return dict(coordinates=[self.original_longitude, self.original_latitude])
#
# def get_environmental_data(self):
# """Returns environmental data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('variable').order_by('variable__name').all():
# if value.variable.type == 'environmental':
# categories = value.variable.index_categories.all()
# valueDict[str(categories[0])].append({
# 'name': value.variable.name,
# 'value': '{0}'.format(value),
# 'units': value.variable.units,
# 'comment': value.comment
# })
# return valueDict
#
# def get_cultural_trait_data(self):
# """Returns the data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('code').\
# select_related('variable').order_by('variable__label').all():
# if value.variable.type == 'cultural':
# categories = value.variable.index_categories.all()
# for c in categories:
# valueDict[str(c)].append({
# 'id': value.id,
# 'label': value.variable.label,
# 'name': value.variable.name,
# 'code': value.coded_value,
# 'description': value.get_description(),
# 'year': value.focal_year,
# 'comment': value.comment,
# 'sources': value.references.all(),
# })
# return valueDict
#
# def get_data_references(self):
# """Returns the references for the cultural trait data"""
# refs = []
# qset = self.value_set
# for value in qset.all():
# for r in value.references.all():
# if r not in refs:
# refs.append(r)
# return sorted(refs, key=lambda r: r.author)
#
# def __unicode__(self):
# return "%s - %s" % (self.ext_id, self.name)
#
# def get_absolute_url(self):
# return reverse("view_society", args=[self.ext_id])
#
# class Meta(object):
# verbose_name_plural = "Societies"
# ordering = ('name', )
#
# class GeographicRegion(models.Model):
# level_2_re = models.FloatField()
# count = models.FloatField()
# region_nam = models.CharField(max_length=254, db_index=True)
# continent = models.CharField(max_length=254, db_index=True)
# tdwg_code = models.IntegerField()
#
# def __unicode__(self):
# return "Region: %s, Continent %s" % (self.region_nam, self.continent)
#
# class Meta(object):
# ordering = ('region_nam', )
#
# class SocietyRelation(models.Model):
# from_society = models.ForeignKey(Society, related_name='from_societies')
# to_society = models.ForeignKey(Society, related_name='to_societies')
# type = models.CharField(max_length=100, default='similar')
which might include code, classes, or functions. Output only the next line. | SocietyRelation.objects.create( |
Given snippet: <|code_start|> families, languages = {}, {}
societies = {s.ext_id: s for s in Society.objects.all()}
for ds in repos.datasets:
for soc in ds.societies:
ldata = languoids.get(soc.glottocode)
if not ldata: # pragma: no cover
logging.warning("No language found for %s, skipping" % soc.glottocode)
continue
soc = societies[soc.id]
soc.language = load_language(ldata, languages, families)
soc.save()
return len(languages)
def load_language(ldata, languages, families):
# get or create the language family:
# Note: If the related languoid is an isolate or a top-level family, we create a
# LanguageFamily object with the data of the languoid.
family_id = ldata.family_id or ldata.id
family = families.get(family_id)
if not family:
family_name = ldata.family_name or ldata.name
family = LanguageFamily.objects.create(name=family_name)
family.save()
families[family_id] = family
# get or create the language:
language = languages.get(ldata.id)
if not language:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
from dplace_app.models import Language, LanguageFamily, Society
and context:
# Path: dplace_app/models.py
# class Language(models.Model):
# name = models.CharField(max_length=200, db_index=True)
# glotto_code = models.CharField(max_length=8, null=False, unique=True)
#
# # needs to be null=True because some glottolog languages do not have isocodes
# iso_code = models.CharField(max_length=3, null=True)
# family = models.ForeignKey('LanguageFamily', null=True)
#
# def __unicode__(self):
# return "Language: %s, ISO Code %s, Glotto Code %s" % (
# self.name, self.iso_code, self.glotto_code)
#
# def get_absolute_url(self):
# return reverse("view_language", args=[self.glotto_code])
#
# class Meta(object):
# verbose_name = "Language"
# ordering = ('name', )
#
# class LanguageFamily(models.Model):
# name = models.CharField(max_length=50, db_index=True)
#
# class Meta(object):
# ordering = ('name', )
#
# class Society(models.Model):
# ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
# xd_id = models.CharField(
# 'Cross ID', db_index=True, default=None, null=True, max_length=10
# )
# name = models.CharField('Name', db_index=True, max_length=200)
# latitude = models.FloatField('Latitude', null=True)
# longitude = models.FloatField('Longitude', null=True)
# focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
# alternate_names = models.TextField(default="")
# original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
# original_latitude = models.FloatField('ORIG_latitude', null=True)
# original_longitude = models.FloatField('ORIG_longitude', null=True)
#
# region = models.ForeignKey('GeographicRegion', null=True)
# source = models.ForeignKey('Source', null=True, related_name="societies")
# language = models.ForeignKey('Language', null=True, related_name="societies")
#
# hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
# chirila_link = models.CharField('CHIRILA', default=None, null=True, max_length=200)
# relationships = models.ManyToManyField(
# 'self', through='SocietyRelation', symmetrical=False)
#
# @property
# def related(self):
# return list(self.relationships.all())
#
# @property
# def location(self):
# return dict(coordinates=[self.longitude, self.latitude])
#
# @property
# def original_location(self):
# return dict(coordinates=[self.original_longitude, self.original_latitude])
#
# def get_environmental_data(self):
# """Returns environmental data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('variable').order_by('variable__name').all():
# if value.variable.type == 'environmental':
# categories = value.variable.index_categories.all()
# valueDict[str(categories[0])].append({
# 'name': value.variable.name,
# 'value': '{0}'.format(value),
# 'units': value.variable.units,
# 'comment': value.comment
# })
# return valueDict
#
# def get_cultural_trait_data(self):
# """Returns the data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('code').\
# select_related('variable').order_by('variable__label').all():
# if value.variable.type == 'cultural':
# categories = value.variable.index_categories.all()
# for c in categories:
# valueDict[str(c)].append({
# 'id': value.id,
# 'label': value.variable.label,
# 'name': value.variable.name,
# 'code': value.coded_value,
# 'description': value.get_description(),
# 'year': value.focal_year,
# 'comment': value.comment,
# 'sources': value.references.all(),
# })
# return valueDict
#
# def get_data_references(self):
# """Returns the references for the cultural trait data"""
# refs = []
# qset = self.value_set
# for value in qset.all():
# for r in value.references.all():
# if r not in refs:
# refs.append(r)
# return sorted(refs, key=lambda r: r.author)
#
# def __unicode__(self):
# return "%s - %s" % (self.ext_id, self.name)
#
# def get_absolute_url(self):
# return reverse("view_society", args=[self.ext_id])
#
# class Meta(object):
# verbose_name_plural = "Societies"
# ordering = ('name', )
which might include code, classes, or functions. Output only the next line. | language = Language.objects.create( |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
def load_languages(repos):
languoids = {
l.id: l for l in repos.read_csv('csv', 'glottolog.csv', namedtuples=True)}
families, languages = {}, {}
societies = {s.ext_id: s for s in Society.objects.all()}
for ds in repos.datasets:
for soc in ds.societies:
ldata = languoids.get(soc.glottocode)
if not ldata: # pragma: no cover
logging.warning("No language found for %s, skipping" % soc.glottocode)
continue
soc = societies[soc.id]
soc.language = load_language(ldata, languages, families)
soc.save()
return len(languages)
def load_language(ldata, languages, families):
# get or create the language family:
# Note: If the related languoid is an isolate or a top-level family, we create a
# LanguageFamily object with the data of the languoid.
family_id = ldata.family_id or ldata.id
family = families.get(family_id)
if not family:
family_name = ldata.family_name or ldata.name
<|code_end|>
, generate the next line using the imports in this file:
import logging
from dplace_app.models import Language, LanguageFamily, Society
and context (functions, classes, or occasionally code) from other files:
# Path: dplace_app/models.py
# class Language(models.Model):
# name = models.CharField(max_length=200, db_index=True)
# glotto_code = models.CharField(max_length=8, null=False, unique=True)
#
# # needs to be null=True because some glottolog languages do not have isocodes
# iso_code = models.CharField(max_length=3, null=True)
# family = models.ForeignKey('LanguageFamily', null=True)
#
# def __unicode__(self):
# return "Language: %s, ISO Code %s, Glotto Code %s" % (
# self.name, self.iso_code, self.glotto_code)
#
# def get_absolute_url(self):
# return reverse("view_language", args=[self.glotto_code])
#
# class Meta(object):
# verbose_name = "Language"
# ordering = ('name', )
#
# class LanguageFamily(models.Model):
# name = models.CharField(max_length=50, db_index=True)
#
# class Meta(object):
# ordering = ('name', )
#
# class Society(models.Model):
# ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
# xd_id = models.CharField(
# 'Cross ID', db_index=True, default=None, null=True, max_length=10
# )
# name = models.CharField('Name', db_index=True, max_length=200)
# latitude = models.FloatField('Latitude', null=True)
# longitude = models.FloatField('Longitude', null=True)
# focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
# alternate_names = models.TextField(default="")
# original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
# original_latitude = models.FloatField('ORIG_latitude', null=True)
# original_longitude = models.FloatField('ORIG_longitude', null=True)
#
# region = models.ForeignKey('GeographicRegion', null=True)
# source = models.ForeignKey('Source', null=True, related_name="societies")
# language = models.ForeignKey('Language', null=True, related_name="societies")
#
# hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
# chirila_link = models.CharField('CHIRILA', default=None, null=True, max_length=200)
# relationships = models.ManyToManyField(
# 'self', through='SocietyRelation', symmetrical=False)
#
# @property
# def related(self):
# return list(self.relationships.all())
#
# @property
# def location(self):
# return dict(coordinates=[self.longitude, self.latitude])
#
# @property
# def original_location(self):
# return dict(coordinates=[self.original_longitude, self.original_latitude])
#
# def get_environmental_data(self):
# """Returns environmental data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('variable').order_by('variable__name').all():
# if value.variable.type == 'environmental':
# categories = value.variable.index_categories.all()
# valueDict[str(categories[0])].append({
# 'name': value.variable.name,
# 'value': '{0}'.format(value),
# 'units': value.variable.units,
# 'comment': value.comment
# })
# return valueDict
#
# def get_cultural_trait_data(self):
# """Returns the data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('code').\
# select_related('variable').order_by('variable__label').all():
# if value.variable.type == 'cultural':
# categories = value.variable.index_categories.all()
# for c in categories:
# valueDict[str(c)].append({
# 'id': value.id,
# 'label': value.variable.label,
# 'name': value.variable.name,
# 'code': value.coded_value,
# 'description': value.get_description(),
# 'year': value.focal_year,
# 'comment': value.comment,
# 'sources': value.references.all(),
# })
# return valueDict
#
# def get_data_references(self):
# """Returns the references for the cultural trait data"""
# refs = []
# qset = self.value_set
# for value in qset.all():
# for r in value.references.all():
# if r not in refs:
# refs.append(r)
# return sorted(refs, key=lambda r: r.author)
#
# def __unicode__(self):
# return "%s - %s" % (self.ext_id, self.name)
#
# def get_absolute_url(self):
# return reverse("view_society", args=[self.ext_id])
#
# class Meta(object):
# verbose_name_plural = "Societies"
# ordering = ('name', )
. Output only the next line. | family = LanguageFamily.objects.create(name=family_name) |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
def load_languages(repos):
languoids = {
l.id: l for l in repos.read_csv('csv', 'glottolog.csv', namedtuples=True)}
families, languages = {}, {}
<|code_end|>
, determine the next line of code. You have imports:
import logging
from dplace_app.models import Language, LanguageFamily, Society
and context (class names, function names, or code) available:
# Path: dplace_app/models.py
# class Language(models.Model):
# name = models.CharField(max_length=200, db_index=True)
# glotto_code = models.CharField(max_length=8, null=False, unique=True)
#
# # needs to be null=True because some glottolog languages do not have isocodes
# iso_code = models.CharField(max_length=3, null=True)
# family = models.ForeignKey('LanguageFamily', null=True)
#
# def __unicode__(self):
# return "Language: %s, ISO Code %s, Glotto Code %s" % (
# self.name, self.iso_code, self.glotto_code)
#
# def get_absolute_url(self):
# return reverse("view_language", args=[self.glotto_code])
#
# class Meta(object):
# verbose_name = "Language"
# ordering = ('name', )
#
# class LanguageFamily(models.Model):
# name = models.CharField(max_length=50, db_index=True)
#
# class Meta(object):
# ordering = ('name', )
#
# class Society(models.Model):
# ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
# xd_id = models.CharField(
# 'Cross ID', db_index=True, default=None, null=True, max_length=10
# )
# name = models.CharField('Name', db_index=True, max_length=200)
# latitude = models.FloatField('Latitude', null=True)
# longitude = models.FloatField('Longitude', null=True)
# focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
# alternate_names = models.TextField(default="")
# original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
# original_latitude = models.FloatField('ORIG_latitude', null=True)
# original_longitude = models.FloatField('ORIG_longitude', null=True)
#
# region = models.ForeignKey('GeographicRegion', null=True)
# source = models.ForeignKey('Source', null=True, related_name="societies")
# language = models.ForeignKey('Language', null=True, related_name="societies")
#
# hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
# chirila_link = models.CharField('CHIRILA', default=None, null=True, max_length=200)
# relationships = models.ManyToManyField(
# 'self', through='SocietyRelation', symmetrical=False)
#
# @property
# def related(self):
# return list(self.relationships.all())
#
# @property
# def location(self):
# return dict(coordinates=[self.longitude, self.latitude])
#
# @property
# def original_location(self):
# return dict(coordinates=[self.original_longitude, self.original_latitude])
#
# def get_environmental_data(self):
# """Returns environmental data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('variable').order_by('variable__name').all():
# if value.variable.type == 'environmental':
# categories = value.variable.index_categories.all()
# valueDict[str(categories[0])].append({
# 'name': value.variable.name,
# 'value': '{0}'.format(value),
# 'units': value.variable.units,
# 'comment': value.comment
# })
# return valueDict
#
# def get_cultural_trait_data(self):
# """Returns the data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('code').\
# select_related('variable').order_by('variable__label').all():
# if value.variable.type == 'cultural':
# categories = value.variable.index_categories.all()
# for c in categories:
# valueDict[str(c)].append({
# 'id': value.id,
# 'label': value.variable.label,
# 'name': value.variable.name,
# 'code': value.coded_value,
# 'description': value.get_description(),
# 'year': value.focal_year,
# 'comment': value.comment,
# 'sources': value.references.all(),
# })
# return valueDict
#
# def get_data_references(self):
# """Returns the references for the cultural trait data"""
# refs = []
# qset = self.value_set
# for value in qset.all():
# for r in value.references.all():
# if r not in refs:
# refs.append(r)
# return sorted(refs, key=lambda r: r.author)
#
# def __unicode__(self):
# return "%s - %s" % (self.ext_id, self.name)
#
# def get_absolute_url(self):
# return reverse("view_society", args=[self.ext_id])
#
# class Meta(object):
# verbose_name_plural = "Societies"
# ordering = ('name', )
. Output only the next line. | societies = {s.ext_id: s for s in Society.objects.all()} |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
_SOURCE_CACHE = {}
def get_source(ds):
dsid = getattr(ds, 'id', ds)
if dsid not in _SOURCE_CACHE:
try:
<|code_end|>
, generate the next line using the imports in this file:
import logging
from clldutils.text import split_text
from dplace_app.models import Source
and context (functions, classes, or occasionally code) from other files:
# Path: dplace_app/models.py
# class Source(models.Model):
# """
# Stores references for Value, also for dataset sources.
# """
# # Not really sure if we should separate dataset sources from references (I
# # think we should), but since all the code has already been written with
# # this model, I won't change it yet.
#
# # text, because might be '1996', '1999-2001', or 'ND'
# year = models.CharField(max_length=30, db_index=True)
# author = models.TextField(db_index=True)
# reference = models.TextField()
# name = models.CharField(max_length=100, db_index=True, default="")
#
# def __unicode__(self):
# return "%s (%s)" % (self.author, self.year)
#
# class Meta(object):
# unique_together = ('year', 'author')
# ordering = ('name', )
. Output only the next line. | o = Source.objects.get(year=ds.year, author=ds.author) |
Predict the next line for this snippet: <|code_start|>router.register(r'codes', api_views.CodeDescriptionViewSet)
router.register(r'values', api_views.ValueViewSet)
router.register(r'societies', api_views.SocietyViewSet)
router.register(r'language_families', api_views.LanguageFamilyViewSet)
router.register(r'languages', api_views.LanguageViewSet)
router.register(r'language_trees', api_views.LanguageTreeViewSet)
router.register(r'geographic_regions', api_views.GeographicRegionViewSet)
router.register(r'sources', api_views.SourceViewSet)
router.register(r'language_tree_labels', api_views.LanguageTreeLabelsViewSet)
urlpatterns = [
# !!!!!! Important !!!!!!
# To reduce traffic caused by bots, spiders, etc
# all URL paths with "...?param1=a¶m2=b" etc. will return HTTP 404
# It's set in:
# - "views.py" => def angular(request): [for all pages passed to angular]
# - "api_views.py" => def detail(self, request, society_id): [for /society/...?...&]
url(r'^$', RedirectView.as_view(url='angular/', permanent=True), name='home'),
# Redirect all static icon images (different sizes) crawled by any sites and bots
url(r'^favicon\.ico/?$', RedirectView.as_view(url='/static/images/icons/favicon.ico')),
url(r'^apple\-touch\-icon(\-precomposed)?\.png/?$', RedirectView.as_view(url='/static/images/icons/D-PLACE_Favicon_57x57.png')),
url(r'^touch\-icon\iphone-\.png/?$', RedirectView.as_view(url='/static/images/icons/D-PLACE_Favicon_57x57.png')),
url(r'^(apple\-)?touch\-icon\-(?P<size>.+?)(\-precomposed)?\.png/?$', RedirectView.as_view(url='/static/images/icons/D-PLACE_Favicon_%(size)s.png')),
url(r'^browserconfig\.xml/?$', RedirectView.as_view(url='/static/images/icons/browserconfig.xml')),
# This is needed in order to auto-generate sitemaps links
# and to pass only valid paths to angular
<|code_end|>
with the help of current file imports:
from django.conf.urls import url, include
from django.views.generic.base import RedirectView
from rest_framework import routers
from rest_framework import renderers
from dplace_app import views, api_views
and context from other files:
# Path: dplace_app/views.py
# def view_language(request, glottocode):
# def download_file(request):
# def angular(request):
#
# Path: dplace_app/api_views.py
# class VariableViewSet(viewsets.ReadOnlyModelViewSet):
# class CategoryViewSet(viewsets.ReadOnlyModelViewSet):
# class CodeDescriptionViewSet(viewsets.ReadOnlyModelViewSet):
# class ValueViewSet(viewsets.ReadOnlyModelViewSet):
# class SocietyViewSet(viewsets.ReadOnlyModelViewSet):
# class LargeResultsSetPagination(PageNumberPagination):
# class VeryLargeResultsSetPagination(PageNumberPagination):
# class LanguageViewSet(viewsets.ReadOnlyModelViewSet):
# class LanguageFamilyViewSet(viewsets.ReadOnlyModelViewSet):
# class TreeResultsSetPagination(PageNumberPagination):
# class LanguageTreeViewSet(viewsets.ReadOnlyModelViewSet):
# class LanguageTreeLabelsViewSet(viewsets.ReadOnlyModelViewSet):
# class SourceViewSet(viewsets.ReadOnlyModelViewSet):
# class GeographicRegionViewSet(viewsets.ReadOnlyModelViewSet):
# def retrieve(self, request, *args, **kwargs):
# def retrieve(self, request, *args, **kwargs):
# def detail(self, request, society_id):
# def get_query_from_json(request):
# def result_set_from_query_dict(query_dict):
# def id_array(l):
# def trees_from_societies(request):
# def find_societies(request):
# def get_categories(request):
# def get_dataset_sources(request):
# def get_min_and_max(request):
# def bin_cont_data(request): # MAKE THIS GENERIC
# def csv_download(request):
, which may contain function names, class names, or code. Output only the next line. | url(r'^home/?$', views.angular), |
Predict the next line for this snippet: <|code_start|>
def load_phylogenies(repos):
sources, sequences = {}, []
for obj in repos.phylogenies:
_load(obj, sources, sequences)
LanguageTreeLabelsSequence.objects.bulk_create(sequences)
return len(repos.phylogenies)
def _load(obj, sources, sequences):
# now add languages to the tree
reader = NexusReader(obj.trees.as_posix())
<|code_end|>
with the help of current file imports:
import re
from clldutils.misc import nfilter
from nexus import NexusReader
from ete3 import Tree
from dplace_app.models import (
Society, LanguageTree, LanguageTreeLabels, LanguageTreeLabelsSequence,
)
and context from other files:
# Path: dplace_app/models.py
# class Society(models.Model):
# ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
# xd_id = models.CharField(
# 'Cross ID', db_index=True, default=None, null=True, max_length=10
# )
# name = models.CharField('Name', db_index=True, max_length=200)
# latitude = models.FloatField('Latitude', null=True)
# longitude = models.FloatField('Longitude', null=True)
# focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
# alternate_names = models.TextField(default="")
# original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
# original_latitude = models.FloatField('ORIG_latitude', null=True)
# original_longitude = models.FloatField('ORIG_longitude', null=True)
#
# region = models.ForeignKey('GeographicRegion', null=True)
# source = models.ForeignKey('Source', null=True, related_name="societies")
# language = models.ForeignKey('Language', null=True, related_name="societies")
#
# hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
# chirila_link = models.CharField('CHIRILA', default=None, null=True, max_length=200)
# relationships = models.ManyToManyField(
# 'self', through='SocietyRelation', symmetrical=False)
#
# @property
# def related(self):
# return list(self.relationships.all())
#
# @property
# def location(self):
# return dict(coordinates=[self.longitude, self.latitude])
#
# @property
# def original_location(self):
# return dict(coordinates=[self.original_longitude, self.original_latitude])
#
# def get_environmental_data(self):
# """Returns environmental data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('variable').order_by('variable__name').all():
# if value.variable.type == 'environmental':
# categories = value.variable.index_categories.all()
# valueDict[str(categories[0])].append({
# 'name': value.variable.name,
# 'value': '{0}'.format(value),
# 'units': value.variable.units,
# 'comment': value.comment
# })
# return valueDict
#
# def get_cultural_trait_data(self):
# """Returns the data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('code').\
# select_related('variable').order_by('variable__label').all():
# if value.variable.type == 'cultural':
# categories = value.variable.index_categories.all()
# for c in categories:
# valueDict[str(c)].append({
# 'id': value.id,
# 'label': value.variable.label,
# 'name': value.variable.name,
# 'code': value.coded_value,
# 'description': value.get_description(),
# 'year': value.focal_year,
# 'comment': value.comment,
# 'sources': value.references.all(),
# })
# return valueDict
#
# def get_data_references(self):
# """Returns the references for the cultural trait data"""
# refs = []
# qset = self.value_set
# for value in qset.all():
# for r in value.references.all():
# if r not in refs:
# refs.append(r)
# return sorted(refs, key=lambda r: r.author)
#
# def __unicode__(self):
# return "%s - %s" % (self.ext_id, self.name)
#
# def get_absolute_url(self):
# return reverse("view_society", args=[self.ext_id])
#
# class Meta(object):
# verbose_name_plural = "Societies"
# ordering = ('name', )
#
# class LanguageTree(models.Model):
# name = models.CharField(max_length=50, db_index=True)
# newick_string = models.TextField(default='')
# source = models.ForeignKey('Source', null=True)
# taxa = models.ManyToManyField('LanguageTreeLabels')
#
# class Meta(object):
# ordering = ('name', )
#
# class LanguageTreeLabels(models.Model):
# languageTree = models.ForeignKey('LanguageTree')
# label = models.CharField(max_length=255, db_index=True)
# language = models.ForeignKey('Language', null=True)
# societies = models.ManyToManyField('Society', through="LanguageTreeLabelsSequence")
#
# class Meta:
# ordering = ("-languagetreelabelssequence__fixed_order",)
#
# class LanguageTreeLabelsSequence(models.Model):
# society = models.ForeignKey('Society')
# labels = models.ForeignKey('LanguageTreeLabels')
# fixed_order = models.PositiveSmallIntegerField(db_index=True)
#
# class Meta:
# ordering = ("-fixed_order",)
, which may contain function names, class names, or code. Output only the next line. | tree = LanguageTree.objects.create(name=obj.id) |
Using the snippet: <|code_start|>
def _load(obj, sources, sequences):
# now add languages to the tree
reader = NexusReader(obj.trees.as_posix())
tree = LanguageTree.objects.create(name=obj.id)
source = sources.get((obj.author, obj.year))
if not source:
sources[(obj.author, obj.year)] = source = obj.as_source()
source.save()
tree.source = source
# Remove '[&R]' from newick string
reader.trees.detranslate()
newick = re.sub(r'\[.*?\]', '', reader.trees.trees[0])
try:
newick = newick[newick.index('=') + 1:]
except ValueError: # pragma: no cover
newick = newick
tree.newick_string = str(newick)
Tree(tree.newick_string, format=1)
for item in obj.taxa:
name_on_tip = item['taxon']
xd_ids = nfilter(i.strip() for i in item['xd_ids'].split(','))
society_ids = nfilter(i.strip() for i in item['soc_ids'].split(','))
if not xd_ids:
continue
<|code_end|>
, determine the next line of code. You have imports:
import re
from clldutils.misc import nfilter
from nexus import NexusReader
from ete3 import Tree
from dplace_app.models import (
Society, LanguageTree, LanguageTreeLabels, LanguageTreeLabelsSequence,
)
and context (class names, function names, or code) available:
# Path: dplace_app/models.py
# class Society(models.Model):
# ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
# xd_id = models.CharField(
# 'Cross ID', db_index=True, default=None, null=True, max_length=10
# )
# name = models.CharField('Name', db_index=True, max_length=200)
# latitude = models.FloatField('Latitude', null=True)
# longitude = models.FloatField('Longitude', null=True)
# focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
# alternate_names = models.TextField(default="")
# original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
# original_latitude = models.FloatField('ORIG_latitude', null=True)
# original_longitude = models.FloatField('ORIG_longitude', null=True)
#
# region = models.ForeignKey('GeographicRegion', null=True)
# source = models.ForeignKey('Source', null=True, related_name="societies")
# language = models.ForeignKey('Language', null=True, related_name="societies")
#
# hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
# chirila_link = models.CharField('CHIRILA', default=None, null=True, max_length=200)
# relationships = models.ManyToManyField(
# 'self', through='SocietyRelation', symmetrical=False)
#
# @property
# def related(self):
# return list(self.relationships.all())
#
# @property
# def location(self):
# return dict(coordinates=[self.longitude, self.latitude])
#
# @property
# def original_location(self):
# return dict(coordinates=[self.original_longitude, self.original_latitude])
#
# def get_environmental_data(self):
# """Returns environmental data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('variable').order_by('variable__name').all():
# if value.variable.type == 'environmental':
# categories = value.variable.index_categories.all()
# valueDict[str(categories[0])].append({
# 'name': value.variable.name,
# 'value': '{0}'.format(value),
# 'units': value.variable.units,
# 'comment': value.comment
# })
# return valueDict
#
# def get_cultural_trait_data(self):
# """Returns the data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('code').\
# select_related('variable').order_by('variable__label').all():
# if value.variable.type == 'cultural':
# categories = value.variable.index_categories.all()
# for c in categories:
# valueDict[str(c)].append({
# 'id': value.id,
# 'label': value.variable.label,
# 'name': value.variable.name,
# 'code': value.coded_value,
# 'description': value.get_description(),
# 'year': value.focal_year,
# 'comment': value.comment,
# 'sources': value.references.all(),
# })
# return valueDict
#
# def get_data_references(self):
# """Returns the references for the cultural trait data"""
# refs = []
# qset = self.value_set
# for value in qset.all():
# for r in value.references.all():
# if r not in refs:
# refs.append(r)
# return sorted(refs, key=lambda r: r.author)
#
# def __unicode__(self):
# return "%s - %s" % (self.ext_id, self.name)
#
# def get_absolute_url(self):
# return reverse("view_society", args=[self.ext_id])
#
# class Meta(object):
# verbose_name_plural = "Societies"
# ordering = ('name', )
#
# class LanguageTree(models.Model):
# name = models.CharField(max_length=50, db_index=True)
# newick_string = models.TextField(default='')
# source = models.ForeignKey('Source', null=True)
# taxa = models.ManyToManyField('LanguageTreeLabels')
#
# class Meta(object):
# ordering = ('name', )
#
# class LanguageTreeLabels(models.Model):
# languageTree = models.ForeignKey('LanguageTree')
# label = models.CharField(max_length=255, db_index=True)
# language = models.ForeignKey('Language', null=True)
# societies = models.ManyToManyField('Society', through="LanguageTreeLabelsSequence")
#
# class Meta:
# ordering = ("-languagetreelabelssequence__fixed_order",)
#
# class LanguageTreeLabelsSequence(models.Model):
# society = models.ForeignKey('Society')
# labels = models.ForeignKey('LanguageTreeLabels')
# fixed_order = models.PositiveSmallIntegerField(db_index=True)
#
# class Meta:
# ordering = ("-fixed_order",)
. Output only the next line. | label = LanguageTreeLabels.objects.create(languageTree=tree, label=name_on_tip) |
Given the following code snippet before the placeholder: <|code_start|>
def load_phylogenies(repos):
sources, sequences = {}, []
for obj in repos.phylogenies:
_load(obj, sources, sequences)
<|code_end|>
, predict the next line using imports from the current file:
import re
from clldutils.misc import nfilter
from nexus import NexusReader
from ete3 import Tree
from dplace_app.models import (
Society, LanguageTree, LanguageTreeLabels, LanguageTreeLabelsSequence,
)
and context including class names, function names, and sometimes code from other files:
# Path: dplace_app/models.py
# class Society(models.Model):
# ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
# xd_id = models.CharField(
# 'Cross ID', db_index=True, default=None, null=True, max_length=10
# )
# name = models.CharField('Name', db_index=True, max_length=200)
# latitude = models.FloatField('Latitude', null=True)
# longitude = models.FloatField('Longitude', null=True)
# focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
# alternate_names = models.TextField(default="")
# original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
# original_latitude = models.FloatField('ORIG_latitude', null=True)
# original_longitude = models.FloatField('ORIG_longitude', null=True)
#
# region = models.ForeignKey('GeographicRegion', null=True)
# source = models.ForeignKey('Source', null=True, related_name="societies")
# language = models.ForeignKey('Language', null=True, related_name="societies")
#
# hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
# chirila_link = models.CharField('CHIRILA', default=None, null=True, max_length=200)
# relationships = models.ManyToManyField(
# 'self', through='SocietyRelation', symmetrical=False)
#
# @property
# def related(self):
# return list(self.relationships.all())
#
# @property
# def location(self):
# return dict(coordinates=[self.longitude, self.latitude])
#
# @property
# def original_location(self):
# return dict(coordinates=[self.original_longitude, self.original_latitude])
#
# def get_environmental_data(self):
# """Returns environmental data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('variable').order_by('variable__name').all():
# if value.variable.type == 'environmental':
# categories = value.variable.index_categories.all()
# valueDict[str(categories[0])].append({
# 'name': value.variable.name,
# 'value': '{0}'.format(value),
# 'units': value.variable.units,
# 'comment': value.comment
# })
# return valueDict
#
# def get_cultural_trait_data(self):
# """Returns the data for the given society"""
# valueDict = defaultdict(list)
# for value in self.value_set\
# .select_related('code').\
# select_related('variable').order_by('variable__label').all():
# if value.variable.type == 'cultural':
# categories = value.variable.index_categories.all()
# for c in categories:
# valueDict[str(c)].append({
# 'id': value.id,
# 'label': value.variable.label,
# 'name': value.variable.name,
# 'code': value.coded_value,
# 'description': value.get_description(),
# 'year': value.focal_year,
# 'comment': value.comment,
# 'sources': value.references.all(),
# })
# return valueDict
#
# def get_data_references(self):
# """Returns the references for the cultural trait data"""
# refs = []
# qset = self.value_set
# for value in qset.all():
# for r in value.references.all():
# if r not in refs:
# refs.append(r)
# return sorted(refs, key=lambda r: r.author)
#
# def __unicode__(self):
# return "%s - %s" % (self.ext_id, self.name)
#
# def get_absolute_url(self):
# return reverse("view_society", args=[self.ext_id])
#
# class Meta(object):
# verbose_name_plural = "Societies"
# ordering = ('name', )
#
# class LanguageTree(models.Model):
# name = models.CharField(max_length=50, db_index=True)
# newick_string = models.TextField(default='')
# source = models.ForeignKey('Source', null=True)
# taxa = models.ManyToManyField('LanguageTreeLabels')
#
# class Meta(object):
# ordering = ('name', )
#
# class LanguageTreeLabels(models.Model):
# languageTree = models.ForeignKey('LanguageTree')
# label = models.CharField(max_length=255, db_index=True)
# language = models.ForeignKey('Language', null=True)
# societies = models.ManyToManyField('Society', through="LanguageTreeLabelsSequence")
#
# class Meta:
# ordering = ("-languagetreelabelssequence__fixed_order",)
#
# class LanguageTreeLabelsSequence(models.Model):
# society = models.ForeignKey('Society')
# labels = models.ForeignKey('LanguageTreeLabels')
# fixed_order = models.PositiveSmallIntegerField(db_index=True)
#
# class Meta:
# ordering = ("-fixed_order",)
. Output only the next line. | LanguageTreeLabelsSequence.objects.bulk_create(sequences) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.