Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|>
friendships: Friendships = {user.id: [] for user in users}
for i, j in friend_pairs:
friendships[i].append(j)
friendships[j].append(i)
assert friendships[4] == [3, 5]
assert friendships[8] == [6, 7, 9]
Path = List[int]
def shortest_paths_from(from_user_id: int,
friendships: Friendships) -> Dict[int, List[Path]]:
# A dictionary from "user_id" to *all* shortest paths to that user
shortest_paths_to: Dict[int, List[Path]] = {from_user_id: [[]]}
# A queue of (previous user, next user) that we need to check.
# Starts out with all pairs (from_user, friend_of_from_user)
frontier = deque((from_user_id, friend_id)
for friend_id in friendships[from_user_id])
# Keep going until we empty the queue.
while frontier:
# Remove the pair that's next in the queue.
prev_user_id, user_id = frontier.popleft()
# Because of the way we're adding to the queue,
# necessarily we already know some shortest paths to prev_user
<|code_end|>
, generate the next line using the imports in this file:
from typing import NamedTuple
from typing import Dict, List
from collections import deque
from scratch.linear_algebra import Matrix, make_matrix, shape
from scratch.linear_algebra import Vector, dot
from typing import Tuple
from scratch.linear_algebra import magnitude, distance
from collections import Counter
import random
import tqdm
and context (functions, classes, or occasionally code) from other files:
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def magnitude(v: Vector) -> float:
# """Returns the magnitude (or length) of v"""
# return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
#
# def distance(v: Vector, w: Vector) -> float:
# """Computes the distance between v and w"""
# return math.sqrt(squared_distance(v, w))
. Output only the next line. | paths_to_prev_user = shortest_paths_to[prev_user_id] |
Based on the snippet: <|code_start|>
class User(NamedTuple):
id: int
name: str
users = [User(0, "Hero"), User(1, "Dunn"), User(2, "Sue"), User(3, "Chi"),
User(4, "Thor"), User(5, "Clive"), User(6, "Hicks"),
User(7, "Devin"), User(8, "Kate"), User(9, "Klein")]
friend_pairs = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# type alias for keeping track of Friendships
Friendships = Dict[int, List[int]]
friendships: Friendships = {user.id: [] for user in users}
for i, j in friend_pairs:
friendships[i].append(j)
friendships[j].append(i)
assert friendships[4] == [3, 5]
assert friendships[8] == [6, 7, 9]
<|code_end|>
, predict the immediate next line with the help of imports:
from typing import NamedTuple
from typing import Dict, List
from collections import deque
from scratch.linear_algebra import Matrix, make_matrix, shape
from scratch.linear_algebra import Vector, dot
from typing import Tuple
from scratch.linear_algebra import magnitude, distance
from collections import Counter
import random
import tqdm
and context (classes, functions, sometimes code) from other files:
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def magnitude(v: Vector) -> float:
# """Returns the magnitude (or length) of v"""
# return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
#
# def distance(v: Vector, w: Vector) -> float:
# """Computes the distance between v and w"""
# return math.sqrt(squared_distance(v, w))
. Output only the next line. | Path = List[int] |
Predict the next line after this snippet: <|code_start|> new_paths_to_user = [path
for path in new_paths_to_user
if len(path) <= min_path_length
and path not in old_paths_to_user]
shortest_paths_to[user_id] = old_paths_to_user + new_paths_to_user
# Add never-seen neighbors to the frontier
frontier.extend((user_id, friend_id)
for friend_id in friendships[user_id]
if friend_id not in shortest_paths_to)
return shortest_paths_to
# For each from_user, for each to_user, a list of shortest paths.
shortest_paths = {user.id: shortest_paths_from(user.id, friendships)
for user in users}
betweenness_centrality = {user.id: 0.0 for user in users}
for source in users:
for target_id, paths in shortest_paths[source.id].items():
if source.id < target_id: # don't double count
num_paths = len(paths) # how many shortest paths?
contrib = 1 / num_paths # contribution to centrality
for path in paths:
for between_id in path:
if between_id not in [source.id, target_id]:
betweenness_centrality[between_id] += contrib
<|code_end|>
using the current file's imports:
from typing import NamedTuple
from typing import Dict, List
from collections import deque
from scratch.linear_algebra import Matrix, make_matrix, shape
from scratch.linear_algebra import Vector, dot
from typing import Tuple
from scratch.linear_algebra import magnitude, distance
from collections import Counter
import random
import tqdm
and any relevant context from other files:
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def magnitude(v: Vector) -> float:
# """Returns the magnitude (or length) of v"""
# return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
#
# def distance(v: Vector, w: Vector) -> float:
# """Computes the distance between v and w"""
# return math.sqrt(squared_distance(v, w))
. Output only the next line. | def farness(user_id: int) -> float: |
Based on the snippet: <|code_start|> if old_paths_to_user:
min_path_length = len(old_paths_to_user[0])
else:
min_path_length = float('inf')
# Only keep paths that aren't too long and are actually new
new_paths_to_user = [path
for path in new_paths_to_user
if len(path) <= min_path_length
and path not in old_paths_to_user]
shortest_paths_to[user_id] = old_paths_to_user + new_paths_to_user
# Add never-seen neighbors to the frontier
frontier.extend((user_id, friend_id)
for friend_id in friendships[user_id]
if friend_id not in shortest_paths_to)
return shortest_paths_to
# For each from_user, for each to_user, a list of shortest paths.
shortest_paths = {user.id: shortest_paths_from(user.id, friendships)
for user in users}
betweenness_centrality = {user.id: 0.0 for user in users}
for source in users:
for target_id, paths in shortest_paths[source.id].items():
if source.id < target_id: # don't double count
num_paths = len(paths) # how many shortest paths?
<|code_end|>
, predict the immediate next line with the help of imports:
from typing import NamedTuple
from typing import Dict, List
from collections import deque
from scratch.linear_algebra import Matrix, make_matrix, shape
from scratch.linear_algebra import Vector, dot
from typing import Tuple
from scratch.linear_algebra import magnitude, distance
from collections import Counter
import random
import tqdm
and context (classes, functions, sometimes code) from other files:
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def magnitude(v: Vector) -> float:
# """Returns the magnitude (or length) of v"""
# return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
#
# def distance(v: Vector, w: Vector) -> float:
# """Computes the distance between v and w"""
# return math.sqrt(squared_distance(v, w))
. Output only the next line. | contrib = 1 / num_paths # contribution to centrality |
Using the snippet: <|code_start|>
class User(NamedTuple):
id: int
name: str
users = [User(0, "Hero"), User(1, "Dunn"), User(2, "Sue"), User(3, "Chi"),
User(4, "Thor"), User(5, "Clive"), User(6, "Hicks"),
User(7, "Devin"), User(8, "Kate"), User(9, "Klein")]
friend_pairs = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# type alias for keeping track of Friendships
Friendships = Dict[int, List[int]]
friendships: Friendships = {user.id: [] for user in users}
for i, j in friend_pairs:
friendships[i].append(j)
friendships[j].append(i)
assert friendships[4] == [3, 5]
<|code_end|>
, determine the next line of code. You have imports:
from typing import NamedTuple
from typing import Dict, List
from collections import deque
from scratch.linear_algebra import Matrix, make_matrix, shape
from scratch.linear_algebra import Vector, dot
from typing import Tuple
from scratch.linear_algebra import magnitude, distance
from collections import Counter
import random
import tqdm
and context (class names, function names, or code) available:
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def magnitude(v: Vector) -> float:
# """Returns the magnitude (or length) of v"""
# return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
#
# def distance(v: Vector, w: Vector) -> float:
# """Computes the distance between v and w"""
# return math.sqrt(squared_distance(v, w))
. Output only the next line. | assert friendships[8] == [6, 7, 9] |
Predict the next line after this snippet: <|code_start|>
# What's the shortest path to here that we've seen so far?
if old_paths_to_user:
min_path_length = len(old_paths_to_user[0])
else:
min_path_length = float('inf')
# Only keep paths that aren't too long and are actually new
new_paths_to_user = [path
for path in new_paths_to_user
if len(path) <= min_path_length
and path not in old_paths_to_user]
shortest_paths_to[user_id] = old_paths_to_user + new_paths_to_user
# Add never-seen neighbors to the frontier
frontier.extend((user_id, friend_id)
for friend_id in friendships[user_id]
if friend_id not in shortest_paths_to)
return shortest_paths_to
# For each from_user, for each to_user, a list of shortest paths.
shortest_paths = {user.id: shortest_paths_from(user.id, friendships)
for user in users}
betweenness_centrality = {user.id: 0.0 for user in users}
for source in users:
for target_id, paths in shortest_paths[source.id].items():
<|code_end|>
using the current file's imports:
from typing import NamedTuple
from typing import Dict, List
from collections import deque
from scratch.linear_algebra import Matrix, make_matrix, shape
from scratch.linear_algebra import Vector, dot
from typing import Tuple
from scratch.linear_algebra import magnitude, distance
from collections import Counter
import random
import tqdm
and any relevant context from other files:
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
#
# Path: scratch/linear_algebra.py
# def magnitude(v: Vector) -> float:
# """Returns the magnitude (or length) of v"""
# return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
#
# def distance(v: Vector, w: Vector) -> float:
# """Computes the distance between v and w"""
# return math.sqrt(squared_distance(v, w))
. Output only the next line. | if source.id < target_id: # don't double count |
Next line prediction: <|code_start|>
def raw_majority_vote(labels: List[str]) -> str:
votes = Counter(labels)
winner, _ = votes.most_common(1)[0]
<|code_end|>
. Use current file imports:
(from typing import List
from collections import Counter
from typing import NamedTuple
from scratch.linear_algebra import Vector, distance
from typing import Dict
from collections import defaultdict
from matplotlib import pyplot as plt
from scratch.machine_learning import split_data
from typing import Tuple
import random
import csv
import random
import tqdm)
and context including class names, function names, or small code snippets from other files:
# Path: scratch/linear_algebra.py
# def add(v: Vector, w: Vector) -> Vector:
# def subtract(v: Vector, w: Vector) -> Vector:
# def vector_sum(vectors: List[Vector]) -> Vector:
# def scalar_multiply(c: float, v: Vector) -> Vector:
# def vector_mean(vectors: List[Vector]) -> Vector:
# def dot(v: Vector, w: Vector) -> float:
# def sum_of_squares(v: Vector) -> float:
# def magnitude(v: Vector) -> float:
# def squared_distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float:
# def distance(v: Vector, w: Vector) -> float: # type: ignore
# def shape(A: Matrix) -> Tuple[int, int]:
# def get_row(A: Matrix, i: int) -> Vector:
# def get_column(A: Matrix, j: int) -> Vector:
# def make_matrix(num_rows: int,
# num_cols: int,
# entry_fn: Callable[[int, int], float]) -> Matrix:
# def identity_matrix(n: int) -> Matrix:
# A = [[1, 2, 3], # A has 2 rows and 3 columns
# [4, 5, 6]]
# B = [[1, 2], # B has 3 rows and 2 columns
# [3, 4],
# [5, 6]]
. Output only the next line. | return winner |
Predict the next line after this snippet: <|code_start|>Tensor = list
def shape(tensor: Tensor) -> List[int]:
sizes: List[int] = []
while isinstance(tensor, list):
sizes.append(len(tensor))
<|code_end|>
using the current file's imports:
from typing import List
from typing import Callable
from typing import Iterable, Tuple
from scratch.neural_networks import sigmoid
from scratch.probability import inverse_normal_cdf
from scratch.linear_algebra import dot
from typing import List
from scratch.linear_algebra import squared_distance
from scratch.neural_networks import binary_encode, fizz_buzz_encode, argmax
import operator
import random
import math
import json
import tqdm
import mnist
import matplotlib.pyplot as plt
import tqdm
and any relevant context from other files:
# Path: scratch/neural_networks.py
# def sigmoid(t: float) -> float:
# return 1 / (1 + math.exp(-t))
#
# Path: scratch/probability.py
# def inverse_normal_cdf(p: float,
# mu: float = 0,
# sigma: float = 1,
# tolerance: float = 0.00001) -> float:
# """Find approximate inverse using binary search"""
#
# # if not standard, compute standard and rescale
# if mu != 0 or sigma != 1:
# return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance)
#
# low_z = -10.0 # normal_cdf(-10) is (very close to) 0
# hi_z = 10.0 # normal_cdf(10) is (very close to) 1
# while hi_z - low_z > tolerance:
# mid_z = (low_z + hi_z) / 2 # Consider the midpoint
# mid_p = normal_cdf(mid_z) # and the cdf's value there
# if mid_p < p:
# low_z = mid_z # Midpoint too low, search above it
# else:
# hi_z = mid_z # Midpoint too high, search below it
#
# return mid_z
#
# Path: scratch/linear_algebra.py
# def dot(v: Vector, w: Vector) -> float:
# """Computes v_1 * w_1 + ... + v_n * w_n"""
# assert len(v) == len(w), "vectors must be same length"
#
# return sum(v_i * w_i for v_i, w_i in zip(v, w))
#
# Path: scratch/linear_algebra.py
# def squared_distance(v: Vector, w: Vector) -> float:
# """Computes (v_1 - w_1) ** 2 + ... + (v_n - w_n) ** 2"""
# return sum_of_squares(subtract(v, w))
. Output only the next line. | tensor = tensor[0] |
Based on the snippet: <|code_start|>Tensor = list
def shape(tensor: Tensor) -> List[int]:
sizes: List[int] = []
while isinstance(tensor, list):
sizes.append(len(tensor))
tensor = tensor[0]
<|code_end|>
, predict the immediate next line with the help of imports:
from typing import List
from typing import Callable
from typing import Iterable, Tuple
from scratch.neural_networks import sigmoid
from scratch.probability import inverse_normal_cdf
from scratch.linear_algebra import dot
from typing import List
from scratch.linear_algebra import squared_distance
from scratch.neural_networks import binary_encode, fizz_buzz_encode, argmax
import operator
import random
import math
import json
import tqdm
import mnist
import matplotlib.pyplot as plt
import tqdm
and context (classes, functions, sometimes code) from other files:
# Path: scratch/neural_networks.py
# def sigmoid(t: float) -> float:
# return 1 / (1 + math.exp(-t))
#
# Path: scratch/probability.py
# def inverse_normal_cdf(p: float,
# mu: float = 0,
# sigma: float = 1,
# tolerance: float = 0.00001) -> float:
# """Find approximate inverse using binary search"""
#
# # if not standard, compute standard and rescale
# if mu != 0 or sigma != 1:
# return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance)
#
# low_z = -10.0 # normal_cdf(-10) is (very close to) 0
# hi_z = 10.0 # normal_cdf(10) is (very close to) 1
# while hi_z - low_z > tolerance:
# mid_z = (low_z + hi_z) / 2 # Consider the midpoint
# mid_p = normal_cdf(mid_z) # and the cdf's value there
# if mid_p < p:
# low_z = mid_z # Midpoint too low, search above it
# else:
# hi_z = mid_z # Midpoint too high, search below it
#
# return mid_z
#
# Path: scratch/linear_algebra.py
# def dot(v: Vector, w: Vector) -> float:
# """Computes v_1 * w_1 + ... + v_n * w_n"""
# assert len(v) == len(w), "vectors must be same length"
#
# return sum(v_i * w_i for v_i, w_i in zip(v, w))
#
# Path: scratch/linear_algebra.py
# def squared_distance(v: Vector, w: Vector) -> float:
# """Computes (v_1 - w_1) ** 2 + ... + (v_n - w_n) ** 2"""
# return sum_of_squares(subtract(v, w))
. Output only the next line. | return sizes |
Using the snippet: <|code_start|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_LOCKED_FOR_READ = b"0-"
_LOCKED_FOR_WRITE = b"00"
_LOCK_TIME = 64
_PREFIX = b"NDB30"
warnings.filterwarnings("always", module=__name__)
log = logging.getLogger(__name__)
class ContextCache(dict):
"""A per-context in-memory entity cache.
This cache verifies the fetched entity has the correct key before
returning a result, in order to handle cases where the entity's key was
modified but the cache's key was not updated.
"""
def get_and_validate(self, key):
"""Verify that the entity's key has not changed since it was added
to the cache. If it has changed, consider this a cache miss.
See issue 13. http://goo.gl/jxjOP"""
<|code_end|>
, determine the next line of code. You have imports:
import functools
import itertools
import logging
import uuid
import warnings
from google.api_core import retry as core_retry
from google.cloud.ndb import _batch
from google.cloud.ndb import context as context_module
from google.cloud.ndb import tasklets
from google.cloud.ndb import utils
and context (class names, function names, or code) available:
# Path: google/cloud/ndb/_batch.py
# def get_batch(batch_cls, options=None):
# def idler(batch):
# def idle():
#
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
#
# Path: google/cloud/ndb/utils.py
# TRUTHY_STRINGS = {"t", "true", "y", "yes", "on", "1"}
# DEBUG = asbool(os.environ.get("NDB_DEBUG", False))
# def asbool(value):
# def code_info(*args, **kwargs):
# def decorator(*args, **kwargs):
# def frame_info(*args, **kwargs):
# def func_info(*args, **kwargs):
# def gen_info(*args, **kwargs):
# def get_stack(*args, **kwargs):
# def logging_debug(log, message, *args, **kwargs):
# def __init__(self, **kwargs):
# def __call__(self, wrapped):
# def wrapper(*args, **kwargs):
# def positional(max_pos_args):
# def positional_decorator(wrapped):
# def positional_wrapper(*args, **kwds):
# def tweak_logging(*args, **kwargs):
# def wrapping(*args, **kwargs):
# class keyword_only(object):
. Output only the next line. | entity = self[key] # May be None, meaning "doesn't exist". |
Continue the code snippet: <|code_start|>_LOCK_TIME = 64
_PREFIX = b"NDB30"
warnings.filterwarnings("always", module=__name__)
log = logging.getLogger(__name__)
class ContextCache(dict):
"""A per-context in-memory entity cache.
This cache verifies the fetched entity has the correct key before
returning a result, in order to handle cases where the entity's key was
modified but the cache's key was not updated.
"""
def get_and_validate(self, key):
"""Verify that the entity's key has not changed since it was added
to the cache. If it has changed, consider this a cache miss.
See issue 13. http://goo.gl/jxjOP"""
entity = self[key] # May be None, meaning "doesn't exist".
if entity is None or entity._key == key:
return entity
else:
del self[key]
raise KeyError(key)
def __repr__(self):
return "ContextCache()"
<|code_end|>
. Use current file imports:
import functools
import itertools
import logging
import uuid
import warnings
from google.api_core import retry as core_retry
from google.cloud.ndb import _batch
from google.cloud.ndb import context as context_module
from google.cloud.ndb import tasklets
from google.cloud.ndb import utils
and context (classes, functions, or code) from other files:
# Path: google/cloud/ndb/_batch.py
# def get_batch(batch_cls, options=None):
# def idler(batch):
# def idle():
#
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
#
# Path: google/cloud/ndb/utils.py
# TRUTHY_STRINGS = {"t", "true", "y", "yes", "on", "1"}
# DEBUG = asbool(os.environ.get("NDB_DEBUG", False))
# def asbool(value):
# def code_info(*args, **kwargs):
# def decorator(*args, **kwargs):
# def frame_info(*args, **kwargs):
# def func_info(*args, **kwargs):
# def gen_info(*args, **kwargs):
# def get_stack(*args, **kwargs):
# def logging_debug(log, message, *args, **kwargs):
# def __init__(self, **kwargs):
# def __call__(self, wrapped):
# def wrapper(*args, **kwargs):
# def positional(max_pos_args):
# def positional_decorator(wrapped):
# def positional_wrapper(*args, **kwds):
# def tweak_logging(*args, **kwargs):
# def wrapping(*args, **kwargs):
# class keyword_only(object):
. Output only the next line. | def _future_result(result): |
Predict the next line for this snippet: <|code_start|>
_LOCKED_FOR_READ = b"0-"
_LOCKED_FOR_WRITE = b"00"
_LOCK_TIME = 64
_PREFIX = b"NDB30"
warnings.filterwarnings("always", module=__name__)
log = logging.getLogger(__name__)
class ContextCache(dict):
"""A per-context in-memory entity cache.
This cache verifies the fetched entity has the correct key before
returning a result, in order to handle cases where the entity's key was
modified but the cache's key was not updated.
"""
def get_and_validate(self, key):
"""Verify that the entity's key has not changed since it was added
to the cache. If it has changed, consider this a cache miss.
See issue 13. http://goo.gl/jxjOP"""
entity = self[key] # May be None, meaning "doesn't exist".
if entity is None or entity._key == key:
return entity
else:
del self[key]
raise KeyError(key)
<|code_end|>
with the help of current file imports:
import functools
import itertools
import logging
import uuid
import warnings
from google.api_core import retry as core_retry
from google.cloud.ndb import _batch
from google.cloud.ndb import context as context_module
from google.cloud.ndb import tasklets
from google.cloud.ndb import utils
and context from other files:
# Path: google/cloud/ndb/_batch.py
# def get_batch(batch_cls, options=None):
# def idler(batch):
# def idle():
#
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
#
# Path: google/cloud/ndb/utils.py
# TRUTHY_STRINGS = {"t", "true", "y", "yes", "on", "1"}
# DEBUG = asbool(os.environ.get("NDB_DEBUG", False))
# def asbool(value):
# def code_info(*args, **kwargs):
# def decorator(*args, **kwargs):
# def frame_info(*args, **kwargs):
# def func_info(*args, **kwargs):
# def gen_info(*args, **kwargs):
# def get_stack(*args, **kwargs):
# def logging_debug(log, message, *args, **kwargs):
# def __init__(self, **kwargs):
# def __call__(self, wrapped):
# def wrapper(*args, **kwargs):
# def positional(max_pos_args):
# def positional_decorator(wrapped):
# def positional_wrapper(*args, **kwds):
# def tweak_logging(*args, **kwargs):
# def wrapping(*args, **kwargs):
# class keyword_only(object):
, which may contain function names, class names, or code. Output only the next line. | def __repr__(self): |
Continue the code snippet: <|code_start|># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_LOCKED_FOR_READ = b"0-"
_LOCKED_FOR_WRITE = b"00"
_LOCK_TIME = 64
_PREFIX = b"NDB30"
warnings.filterwarnings("always", module=__name__)
log = logging.getLogger(__name__)
class ContextCache(dict):
"""A per-context in-memory entity cache.
This cache verifies the fetched entity has the correct key before
returning a result, in order to handle cases where the entity's key was
modified but the cache's key was not updated.
"""
def get_and_validate(self, key):
"""Verify that the entity's key has not changed since it was added
to the cache. If it has changed, consider this a cache miss.
See issue 13. http://goo.gl/jxjOP"""
entity = self[key] # May be None, meaning "doesn't exist".
if entity is None or entity._key == key:
<|code_end|>
. Use current file imports:
import functools
import itertools
import logging
import uuid
import warnings
from google.api_core import retry as core_retry
from google.cloud.ndb import _batch
from google.cloud.ndb import context as context_module
from google.cloud.ndb import tasklets
from google.cloud.ndb import utils
and context (classes, functions, or code) from other files:
# Path: google/cloud/ndb/_batch.py
# def get_batch(batch_cls, options=None):
# def idler(batch):
# def idle():
#
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
#
# Path: google/cloud/ndb/utils.py
# TRUTHY_STRINGS = {"t", "true", "y", "yes", "on", "1"}
# DEBUG = asbool(os.environ.get("NDB_DEBUG", False))
# def asbool(value):
# def code_info(*args, **kwargs):
# def decorator(*args, **kwargs):
# def frame_info(*args, **kwargs):
# def func_info(*args, **kwargs):
# def gen_info(*args, **kwargs):
# def get_stack(*args, **kwargs):
# def logging_debug(log, message, *args, **kwargs):
# def __init__(self, **kwargs):
# def __call__(self, wrapped):
# def wrapper(*args, **kwargs):
# def positional(max_pos_args):
# def positional_decorator(wrapped):
# def positional_wrapper(*args, **kwds):
# def tweak_logging(*args, **kwargs):
# def wrapping(*args, **kwargs):
# class keyword_only(object):
. Output only the next line. | return entity |
Predict the next line for this snippet: <|code_start|>
return "foo"
retry = _retry.retry_async(callback)
assert retry().result() == "foo"
@staticmethod
@mock.patch("google.cloud.ndb.tasklets.sleep", mock_sleep)
@pytest.mark.usefixtures("in_context")
def test_nested_retry_with_exception():
error = Exception("Fail")
def callback():
def nested_callback():
raise error
nested = _retry.retry_async(nested_callback, retries=1)
return nested()
with pytest.raises(core_exceptions.RetryError):
retry = _retry.retry_async(callback, retries=1)
retry().result()
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_success_callback_is_tasklet():
tasklet_future = tasklets.Future()
@tasklets.tasklet
def callback():
<|code_end|>
with the help of current file imports:
import itertools
import mock
import pytest
from unittest import mock
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import _retry
from google.cloud.ndb import tasklets
from . import utils
and context from other files:
# Path: google/cloud/ndb/_retry.py
# _DEFAULT_INITIAL_DELAY = 1.0 # seconds
# _DEFAULT_MAXIMUM_DELAY = 60.0 # seconds
# _DEFAULT_DELAY_MULTIPLIER = 2.0
# _DEFAULT_RETRIES = 3
# TRANSIENT_ERRORS = (
# core_exceptions.ServiceUnavailable,
# core_exceptions.InternalServerError,
# core_exceptions.Aborted,
# core_exceptions.Unknown,
# )
# def wraps_safely(obj, attr_names=functools.WRAPPER_ASSIGNMENTS):
# def retry_async(callback, retries=_DEFAULT_RETRIES):
# def retry_wrapper(*args, **kwargs):
# def is_transient_error(error):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
, which may contain function names, class names, or code. Output only the next line. | result = yield tasklet_future |
Predict the next line after this snippet: <|code_start|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test___all__():
utils.verify___all__(msgprop)
class TestEnumProperty:
@staticmethod
<|code_end|>
using the current file's imports:
import pytest
from google.cloud.ndb import msgprop
from . import utils
and any relevant context from other files:
# Path: google/cloud/ndb/msgprop.py
# class EnumProperty(object):
# class MessageProperty(object):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
. Output only the next line. | def test_constructor(): |
Predict the next line for this snippet: <|code_start|> assert not batch.idle_called
different_options = {"food": "barn"}
assert _batch.get_batch(MockBatch, different_options) is not batch
assert _batch.get_batch(MockBatch) is not batch
assert _batch.get_batch(MockBatch, options) is batch
batch._full = True
batch2 = _batch.get_batch(MockBatch, options)
assert batch2 is not batch
assert not batch2.idle_called
_eventloop.run()
assert batch.idle_called
assert batch2.idle_called
class MockBatch:
_full = False
def __init__(self, options):
self.options = options
self.idle_called = False
def idle_callback(self):
self.idle_called = True
def full(self):
<|code_end|>
with the help of current file imports:
import pytest
from google.cloud.ndb import _batch
from google.cloud.ndb import _eventloop
and context from other files:
# Path: google/cloud/ndb/_batch.py
# def get_batch(batch_cls, options=None):
# def idler(batch):
# def idle():
#
# Path: google/cloud/ndb/_eventloop.py
# class EventLoop(object):
# def __init__(self):
# def clear(self):
# def insort_event_right(self, event):
# def call_soon(self, callback, *args, **kwargs):
# def queue_call(self, delay, callback, *args, **kwargs):
# def queue_rpc(self, rpc, callback):
# def rpc_callback(rpc):
# def add_idle(self, callback, *args, **kwargs):
# def run_idle(self):
# def _run_current(self):
# def run0(self):
# def run1(self):
# def run(self):
# def get_event_loop():
# def add_idle(callback, *args, **kwargs):
# def call_soon(callback, *args, **kwargs):
# def queue_call(delay, callback, *args, **kwargs):
# def queue_rpc(future, rpc):
# def run():
# def run1():
, which may contain function names, class names, or code. Output only the next line. | return self._full |
Based on the snippet: <|code_start|> def test_constructor():
with pytest.raises(NotImplementedError):
tasklets.QueueFuture()
class TestReducingFuture:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
tasklets.ReducingFuture()
def test_Return():
assert not issubclass(tasklets.Return, StopIteration)
assert issubclass(tasklets.Return, Exception)
class TestSerialQueueFuture:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
tasklets.SerialQueueFuture()
def test_set_context():
with pytest.raises(NotImplementedError):
tasklets.set_context()
@pytest.mark.usefixtures("in_context")
<|code_end|>
, predict the immediate next line with the help of imports:
from unittest import mock
from google.cloud.ndb import context as context_module
from google.cloud.ndb import _eventloop
from google.cloud.ndb import exceptions
from google.cloud.ndb import _remote
from google.cloud.ndb import tasklets
from . import utils
import mock
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/_eventloop.py
# class EventLoop(object):
# def __init__(self):
# def clear(self):
# def insort_event_right(self, event):
# def call_soon(self, callback, *args, **kwargs):
# def queue_call(self, delay, callback, *args, **kwargs):
# def queue_rpc(self, rpc, callback):
# def rpc_callback(rpc):
# def add_idle(self, callback, *args, **kwargs):
# def run_idle(self):
# def _run_current(self):
# def run0(self):
# def run1(self):
# def run(self):
# def get_event_loop():
# def add_idle(callback, *args, **kwargs):
# def call_soon(callback, *args, **kwargs):
# def queue_call(delay, callback, *args, **kwargs):
# def queue_rpc(future, rpc):
# def run():
# def run1():
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_remote.py
# class RemoteCall(object):
# def __init__(self, future, info):
# def record_time(future):
# def __repr__(self):
# def exception(self):
# def result(self):
# def add_done_callback(self, callback):
# def wrapper(rpc):
# def cancel(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | def test_synctasklet(): |
Predict the next line after this snippet: <|code_start|> future.set_exception(error)
@staticmethod
@mock.patch("google.cloud.ndb.tasklets._eventloop")
def test_wait(_eventloop):
def side_effects(future):
yield True
yield True
future.set_result(42)
yield True
future = tasklets.Future()
_eventloop.run1.side_effect = side_effects(future)
future.wait()
assert future.result() == 42
assert _eventloop.run1.call_count == 3
@staticmethod
@mock.patch("google.cloud.ndb.tasklets._eventloop")
def test_wait_loop_exhausted(_eventloop):
future = tasklets.Future()
_eventloop.run1.return_value = False
with pytest.raises(RuntimeError):
future.wait()
@staticmethod
@mock.patch("google.cloud.ndb.tasklets._eventloop")
def test_check_success(_eventloop):
def side_effects(future):
yield True
<|code_end|>
using the current file's imports:
from unittest import mock
from google.cloud.ndb import context as context_module
from google.cloud.ndb import _eventloop
from google.cloud.ndb import exceptions
from google.cloud.ndb import _remote
from google.cloud.ndb import tasklets
from . import utils
import mock
import pytest
and any relevant context from other files:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/_eventloop.py
# class EventLoop(object):
# def __init__(self):
# def clear(self):
# def insort_event_right(self, event):
# def call_soon(self, callback, *args, **kwargs):
# def queue_call(self, delay, callback, *args, **kwargs):
# def queue_rpc(self, rpc, callback):
# def rpc_callback(rpc):
# def add_idle(self, callback, *args, **kwargs):
# def run_idle(self):
# def _run_current(self):
# def run0(self):
# def run1(self):
# def run(self):
# def get_event_loop():
# def add_idle(callback, *args, **kwargs):
# def call_soon(callback, *args, **kwargs):
# def queue_call(delay, callback, *args, **kwargs):
# def queue_rpc(future, rpc):
# def run():
# def run1():
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_remote.py
# class RemoteCall(object):
# def __init__(self, future, info):
# def record_time(future):
# def __repr__(self):
# def exception(self):
# def result(self):
# def add_done_callback(self, callback):
# def wrapper(rpc):
# def cancel(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | yield True |
Given the code snippet: <|code_start|> assert future.result() == 42
@staticmethod
def test_cancelled():
future = tasklets.Future()
assert future.cancelled() is False
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_wait_any():
futures = [tasklets.Future() for _ in range(3)]
def callback():
futures[1].set_result(42)
_eventloop.add_idle(callback)
future = tasklets.Future.wait_any(futures)
assert future is futures[1]
assert future.result() == 42
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_wait_any_loop_exhausted():
futures = [tasklets.Future() for _ in range(3)]
with pytest.raises(RuntimeError):
tasklets.Future.wait_any(futures)
@staticmethod
<|code_end|>
, generate the next line using the imports in this file:
from unittest import mock
from google.cloud.ndb import context as context_module
from google.cloud.ndb import _eventloop
from google.cloud.ndb import exceptions
from google.cloud.ndb import _remote
from google.cloud.ndb import tasklets
from . import utils
import mock
import pytest
and context (functions, classes, or occasionally code) from other files:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/_eventloop.py
# class EventLoop(object):
# def __init__(self):
# def clear(self):
# def insort_event_right(self, event):
# def call_soon(self, callback, *args, **kwargs):
# def queue_call(self, delay, callback, *args, **kwargs):
# def queue_rpc(self, rpc, callback):
# def rpc_callback(rpc):
# def add_idle(self, callback, *args, **kwargs):
# def run_idle(self):
# def _run_current(self):
# def run0(self):
# def run1(self):
# def run(self):
# def get_event_loop():
# def add_idle(callback, *args, **kwargs):
# def call_soon(callback, *args, **kwargs):
# def queue_call(delay, callback, *args, **kwargs):
# def queue_rpc(future, rpc):
# def run():
# def run1():
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_remote.py
# class RemoteCall(object):
# def __init__(self, future, info):
# def record_time(future):
# def __repr__(self):
# def exception(self):
# def result(self):
# def add_done_callback(self, callback):
# def wrapper(rpc):
# def cancel(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | def test_wait_any_no_futures(): |
Given the code snippet: <|code_start|> with pytest.raises(NotImplementedError):
tasklets.ReducingFuture()
def test_Return():
assert not issubclass(tasklets.Return, StopIteration)
assert issubclass(tasklets.Return, Exception)
class TestSerialQueueFuture:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
tasklets.SerialQueueFuture()
def test_set_context():
with pytest.raises(NotImplementedError):
tasklets.set_context()
@pytest.mark.usefixtures("in_context")
def test_synctasklet():
@tasklets.synctasklet
def generator_function(value):
future = tasklets.Future(value)
future.set_result(value)
x = yield future
raise tasklets.Return(x + 3)
<|code_end|>
, generate the next line using the imports in this file:
from unittest import mock
from google.cloud.ndb import context as context_module
from google.cloud.ndb import _eventloop
from google.cloud.ndb import exceptions
from google.cloud.ndb import _remote
from google.cloud.ndb import tasklets
from . import utils
import mock
import pytest
and context (functions, classes, or occasionally code) from other files:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/_eventloop.py
# class EventLoop(object):
# def __init__(self):
# def clear(self):
# def insort_event_right(self, event):
# def call_soon(self, callback, *args, **kwargs):
# def queue_call(self, delay, callback, *args, **kwargs):
# def queue_rpc(self, rpc, callback):
# def rpc_callback(rpc):
# def add_idle(self, callback, *args, **kwargs):
# def run_idle(self):
# def _run_current(self):
# def run0(self):
# def run1(self):
# def run(self):
# def get_event_loop():
# def add_idle(callback, *args, **kwargs):
# def call_soon(callback, *args, **kwargs):
# def queue_call(delay, callback, *args, **kwargs):
# def queue_rpc(future, rpc):
# def run():
# def run1():
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_remote.py
# class RemoteCall(object):
# def __init__(self, future, info):
# def record_time(future):
# def __repr__(self):
# def exception(self):
# def result(self):
# def add_done_callback(self, callback):
# def wrapper(rpc):
# def cancel(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | result = generator_function(8) |
Given the code snippet: <|code_start|> future.check_success()
assert error_context.value is error
@staticmethod
@mock.patch("google.cloud.ndb.tasklets._eventloop")
def test_result_block_for_result(_eventloop):
def side_effects(future):
yield True
yield True
future.set_result(42)
yield True
future = tasklets.Future()
_eventloop.run1.side_effect = side_effects(future)
assert future.result() == 42
assert _eventloop.run1.call_count == 3
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_cancel():
# Integration test. Actually test that a cancel propagates properly.
rpc = tasklets.Future("Fake RPC")
wrapped_rpc = _remote.RemoteCall(rpc, "Wrapped Fake RPC")
@tasklets.tasklet
def inner_tasklet():
yield wrapped_rpc
@tasklets.tasklet
<|code_end|>
, generate the next line using the imports in this file:
from unittest import mock
from google.cloud.ndb import context as context_module
from google.cloud.ndb import _eventloop
from google.cloud.ndb import exceptions
from google.cloud.ndb import _remote
from google.cloud.ndb import tasklets
from . import utils
import mock
import pytest
and context (functions, classes, or occasionally code) from other files:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/_eventloop.py
# class EventLoop(object):
# def __init__(self):
# def clear(self):
# def insort_event_right(self, event):
# def call_soon(self, callback, *args, **kwargs):
# def queue_call(self, delay, callback, *args, **kwargs):
# def queue_rpc(self, rpc, callback):
# def rpc_callback(rpc):
# def add_idle(self, callback, *args, **kwargs):
# def run_idle(self):
# def _run_current(self):
# def run0(self):
# def run1(self):
# def run(self):
# def get_event_loop():
# def add_idle(callback, *args, **kwargs):
# def call_soon(callback, *args, **kwargs):
# def queue_call(delay, callback, *args, **kwargs):
# def queue_rpc(future, rpc):
# def run():
# def run1():
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_remote.py
# class RemoteCall(object):
# def __init__(self, future, info):
# def record_time(future):
# def __repr__(self):
# def exception(self):
# def result(self):
# def add_done_callback(self, callback):
# def wrapper(rpc):
# def cancel(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | def outer_tasklet(): |
Continue the code snippet: <|code_start|> def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_element().TryMerge(d)
continue
if tt == 0:
raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
class Reference(ProtocolBuffer.ProtocolMessage):
has_app_ = 0
app_ = ""
has_name_space_ = 0
name_space_ = ""
has_path_ = 0
has_database_id_ = 0
database_id_ = ""
def __init__(self):
self.path_ = Path()
@property
def app(self):
return self.app_
def set_app(self, x):
self.has_app_ = 1
<|code_end|>
. Use current file imports:
from google.cloud.ndb import _legacy_protocol_buffer as ProtocolBuffer
and context (classes, functions, or code) from other files:
# Path: google/cloud/ndb/_legacy_protocol_buffer.py
# class ProtocolBufferDecodeError(Exception):
# class ProtocolMessage:
# class Decoder:
# def MergePartialFromString(self, s):
# def __init__(self, buf, idx, limit):
# def avail(self):
# def buffer(self):
# def pos(self):
# def skip(self, n):
# def skipData(self, tag):
# def get8(self):
# def get16(self):
# def get32(self):
# def get64(self):
# def getVarInt32(self):
# def getVarInt64(self):
# def getVarUint64(self):
# def getDouble(self):
# def getBoolean(self):
# def getPrefixedString(self):
# NUMERIC = 0
# DOUBLE = 1
# STRING = 2
# STARTGROUP = 3
# ENDGROUP = 4
# FLOAT = 5
# MAX_TYPE = 6
. Output only the next line. | self.app_ = x |
Next line prediction: <|code_start|> with pytest.raises(TypeError):
test_func(1, 2, 3)
with pytest.raises(TypeError):
test_func2(1, 2)
assert test_func(4, 5, x=0) == (4, 5)
assert test_func(6) == (6, 2)
assert test_func2(6) == 6
def test_keyword_only():
@utils.keyword_only(foo=1, bar=2, baz=3)
def test_kwonly(**kwargs):
return kwargs["foo"], kwargs["bar"], kwargs["baz"]
with pytest.raises(TypeError):
test_kwonly(faz=4)
assert test_kwonly() == (1, 2, 3)
assert test_kwonly(foo=3, bar=5, baz=7) == (3, 5, 7)
assert test_kwonly(baz=7) == (1, 2, 7)
def test_threading_local():
assert utils.threading_local is threading.local
def test_tweak_logging():
<|code_end|>
. Use current file imports:
(import threading
import mock
import pytest
from unittest import mock
from google.cloud.ndb import utils)
and context including class names, function names, or small code snippets from other files:
# Path: google/cloud/ndb/utils.py
# TRUTHY_STRINGS = {"t", "true", "y", "yes", "on", "1"}
# DEBUG = asbool(os.environ.get("NDB_DEBUG", False))
# def asbool(value):
# def code_info(*args, **kwargs):
# def decorator(*args, **kwargs):
# def frame_info(*args, **kwargs):
# def func_info(*args, **kwargs):
# def gen_info(*args, **kwargs):
# def get_stack(*args, **kwargs):
# def logging_debug(log, message, *args, **kwargs):
# def __init__(self, **kwargs):
# def __call__(self, wrapped):
# def wrapper(*args, **kwargs):
# def positional(max_pos_args):
# def positional_decorator(wrapped):
# def positional_wrapper(*args, **kwds):
# def tweak_logging(*args, **kwargs):
# def wrapping(*args, **kwargs):
# class keyword_only(object):
. Output only the next line. | with pytest.raises(NotImplementedError): |
Predict the next line for this snippet: <|code_start|>
assert batch.expires == 5
with in_context.new(global_cache=cache).use():
batch.idle_callback()
cache.set.assert_called_once_with({b"foo": b"one", b"bar": b"two"}, expires=5)
assert future1.result() is None
assert future2.result() is None
@staticmethod
def test_add_and_idle_and_done_callbacks_w_error(in_context):
error = Exception("spurious error")
cache = mock.Mock(spec=("set",))
cache.set.return_value = tasklets.Future()
cache.set.return_value.set_exception(error)
batch = _cache._GlobalCacheSetBatch({})
future1 = batch.add(b"foo", b"one")
future2 = batch.add(b"bar", b"two")
with in_context.new(global_cache=cache).use():
batch.idle_callback()
cache.set.assert_called_once_with(
{b"foo": b"one", b"bar": b"two"}, expires=None
)
assert future1.exception() is error
assert future2.exception() is error
<|code_end|>
with the help of current file imports:
import warnings
import mock
import pytest
from unittest import mock
from google.cloud.ndb import _cache
from google.cloud.ndb import tasklets
and context from other files:
# Path: google/cloud/ndb/_cache.py
# _LOCKED_FOR_READ = b"0-"
# _LOCKED_FOR_WRITE = b"00"
# _LOCK_TIME = 64
# _PREFIX = b"NDB30"
# class ContextCache(dict):
# class _GlobalCacheBatch(object):
# class _GlobalCacheGetBatch(_GlobalCacheBatch):
# class _GlobalCacheSetBatch(_GlobalCacheBatch):
# class _GlobalCacheSetIfNotExistsBatch(_GlobalCacheSetBatch):
# class _GlobalCacheDeleteBatch(_GlobalCacheBatch):
# class _GlobalCacheWatchBatch(_GlobalCacheSetBatch):
# class _GlobalCacheUnwatchBatch(_GlobalCacheDeleteBatch):
# class _GlobalCacheCompareAndSwapBatch(_GlobalCacheSetBatch):
# def get_and_validate(self, key):
# def __repr__(self):
# def _future_result(result):
# def _future_exception(error):
# def _global_cache():
# def full(self):
# def idle_callback(self):
# def done_callback(self, cache_call):
# def make_call(self):
# def future_info(self, key):
# def _handle_transient_errors(read=False):
# def wrap(wrapped):
# def retry(wrapped, transient_errors):
# def retry_wrapper(key, *args, **kwargs):
# def wrapper(key, *args, **kwargs):
# def _global_get(key):
# def __init__(self, ignore_options):
# def add(self, key):
# def done_callback(self, cache_call):
# def make_call(self):
# def future_info(self, key):
# def global_set(key, value, expires=None, read=False):
# def __init__(self, options):
# def done_callback(self, cache_call):
# def add(self, key, value):
# def make_call(self):
# def future_info(self, key, value):
# def global_set_if_not_exists(key, value, expires=None):
# def add(self, key, value):
# def make_call(self):
# def future_info(self, key, value):
# def _global_delete(key):
# def __init__(self, ignore_options):
# def add(self, key):
# def make_call(self):
# def future_info(self, key):
# def _global_watch(key, value):
# def make_call(self):
# def future_info(self, key, value):
# def global_unwatch(key):
# def make_call(self):
# def future_info(self, key):
# def _global_compare_and_swap(key, value, expires=None):
# def make_call(self):
# def future_info(self, key, value):
# def global_lock_for_read(key, prev_value):
# def global_lock_for_write(key):
# def new_value(old_value):
# def global_unlock_for_write(key, lock):
# def new_value(old_value):
# def _update_key(key, new_value):
# def is_locked_value(value):
# def global_cache_key(key):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
, which may contain function names, class names, or code. Output only the next line. | @staticmethod |
Using the snippet: <|code_start|> batch.add.return_value = future
_global_cache.return_value = mock.Mock(
transient_errors=(),
strict_write=False,
spec=("transient_errors", "strict_write"),
)
future = _cache.global_set(b"key", b"value", expires=5)
assert future.result() == "hi mom!"
_batch.get_batch.assert_called_once_with(
_cache._GlobalCacheSetBatch, {"expires": 5}
)
batch.add.assert_called_once_with(b"key", b"value")
class Test_GlobalCacheSetBatch:
@staticmethod
def test_add_duplicate_key_and_value():
batch = _cache._GlobalCacheSetBatch({})
future1 = batch.add(b"foo", b"one")
future2 = batch.add(b"foo", b"one")
assert future1 is future2
@staticmethod
def test_add_and_idle_and_done_callbacks(in_context):
cache = mock.Mock(spec=("set",))
cache.set.return_value = []
batch = _cache._GlobalCacheSetBatch({})
future1 = batch.add(b"foo", b"one")
<|code_end|>
, determine the next line of code. You have imports:
import warnings
import mock
import pytest
from unittest import mock
from google.cloud.ndb import _cache
from google.cloud.ndb import tasklets
and context (class names, function names, or code) available:
# Path: google/cloud/ndb/_cache.py
# _LOCKED_FOR_READ = b"0-"
# _LOCKED_FOR_WRITE = b"00"
# _LOCK_TIME = 64
# _PREFIX = b"NDB30"
# class ContextCache(dict):
# class _GlobalCacheBatch(object):
# class _GlobalCacheGetBatch(_GlobalCacheBatch):
# class _GlobalCacheSetBatch(_GlobalCacheBatch):
# class _GlobalCacheSetIfNotExistsBatch(_GlobalCacheSetBatch):
# class _GlobalCacheDeleteBatch(_GlobalCacheBatch):
# class _GlobalCacheWatchBatch(_GlobalCacheSetBatch):
# class _GlobalCacheUnwatchBatch(_GlobalCacheDeleteBatch):
# class _GlobalCacheCompareAndSwapBatch(_GlobalCacheSetBatch):
# def get_and_validate(self, key):
# def __repr__(self):
# def _future_result(result):
# def _future_exception(error):
# def _global_cache():
# def full(self):
# def idle_callback(self):
# def done_callback(self, cache_call):
# def make_call(self):
# def future_info(self, key):
# def _handle_transient_errors(read=False):
# def wrap(wrapped):
# def retry(wrapped, transient_errors):
# def retry_wrapper(key, *args, **kwargs):
# def wrapper(key, *args, **kwargs):
# def _global_get(key):
# def __init__(self, ignore_options):
# def add(self, key):
# def done_callback(self, cache_call):
# def make_call(self):
# def future_info(self, key):
# def global_set(key, value, expires=None, read=False):
# def __init__(self, options):
# def done_callback(self, cache_call):
# def add(self, key, value):
# def make_call(self):
# def future_info(self, key, value):
# def global_set_if_not_exists(key, value, expires=None):
# def add(self, key, value):
# def make_call(self):
# def future_info(self, key, value):
# def _global_delete(key):
# def __init__(self, ignore_options):
# def add(self, key):
# def make_call(self):
# def future_info(self, key):
# def _global_watch(key, value):
# def make_call(self):
# def future_info(self, key, value):
# def global_unwatch(key):
# def make_call(self):
# def future_info(self, key):
# def _global_compare_and_swap(key, value, expires=None):
# def make_call(self):
# def future_info(self, key, value):
# def global_lock_for_read(key, prev_value):
# def global_lock_for_write(key):
# def new_value(old_value):
# def global_unlock_for_write(key, lock):
# def new_value(old_value):
# def _update_key(key, new_value):
# def is_locked_value(value):
# def global_cache_key(key):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | future2 = batch.add(b"bar", b"two") |
Given snippet: <|code_start|># you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retry functions."""
_DEFAULT_INITIAL_DELAY = 1.0 # seconds
_DEFAULT_MAXIMUM_DELAY = 60.0 # seconds
_DEFAULT_DELAY_MULTIPLIER = 2.0
_DEFAULT_RETRIES = 3
def wraps_safely(obj, attr_names=functools.WRAPPER_ASSIGNMENTS):
"""Python 2.7 functools.wraps has a bug where attributes like ``module``
are not copied to the wrappers and thus cause attribute errors. This
wrapper prevents that problem."""
return functools.wraps(
obj, assigned=(name for name in attr_names if hasattr(obj, name))
)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import functools
import itertools
from google.api_core import retry as core_retry
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import exceptions
from google.cloud.ndb import tasklets
from google.cloud.ndb import context as context_module
and context:
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
which might include code, classes, or functions. Output only the next line. | def retry_async(callback, retries=_DEFAULT_RETRIES): |
Continue the code snippet: <|code_start|># you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retry functions."""
_DEFAULT_INITIAL_DELAY = 1.0 # seconds
_DEFAULT_MAXIMUM_DELAY = 60.0 # seconds
_DEFAULT_DELAY_MULTIPLIER = 2.0
_DEFAULT_RETRIES = 3
def wraps_safely(obj, attr_names=functools.WRAPPER_ASSIGNMENTS):
"""Python 2.7 functools.wraps has a bug where attributes like ``module``
are not copied to the wrappers and thus cause attribute errors. This
wrapper prevents that problem."""
return functools.wraps(
obj, assigned=(name for name in attr_names if hasattr(obj, name))
)
<|code_end|>
. Use current file imports:
import functools
import itertools
from google.api_core import retry as core_retry
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import exceptions
from google.cloud.ndb import tasklets
from google.cloud.ndb import context as context_module
and context (classes, functions, or code) from other files:
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | def retry_async(callback, retries=_DEFAULT_RETRIES): |
Continue the code snippet: <|code_start|> loop = self._make_one()
assert loop.current == collections.deque()
assert loop.idlers == collections.deque()
assert loop.inactive == 0
assert loop.queue == []
assert loop.rpcs == {}
def test_clear_all(self):
loop = self._make_one()
loop.current.append("foo")
loop.idlers.append("bar")
loop.queue.append("baz")
loop.rpcs["qux"] = "quux"
loop.clear()
assert not loop.current
assert not loop.idlers
assert not loop.queue
assert not loop.rpcs
# idemptotence (branch coverage)
loop.clear()
assert not loop.current
assert not loop.idlers
assert not loop.queue
assert not loop.rpcs
def test_clear_current(self):
loop = self._make_one()
loop.current.append("foo")
loop.clear()
<|code_end|>
. Use current file imports:
import collections
import mock
import grpc
import pytest
from unittest import mock
from google.cloud.ndb import exceptions
from google.cloud.ndb import _eventloop
and context (classes, functions, or code) from other files:
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_eventloop.py
# class EventLoop(object):
# def __init__(self):
# def clear(self):
# def insort_event_right(self, event):
# def call_soon(self, callback, *args, **kwargs):
# def queue_call(self, delay, callback, *args, **kwargs):
# def queue_rpc(self, rpc, callback):
# def rpc_callback(rpc):
# def add_idle(self, callback, *args, **kwargs):
# def run_idle(self):
# def _run_current(self):
# def run0(self):
# def run1(self):
# def run(self):
# def get_event_loop():
# def add_idle(callback, *args, **kwargs):
# def call_soon(callback, *args, **kwargs):
# def queue_call(delay, callback, *args, **kwargs):
# def queue_rpc(future, rpc):
# def run():
# def run1():
. Output only the next line. | assert not loop.current |
Here is a snippet: <|code_start|> loop.inactive = 88
assert loop.run0() == 5
callback.assert_not_called()
assert len(loop.queue) == 1
assert loop.inactive == 88
@mock.patch("google.cloud.ndb._eventloop.time")
def test_run0_next_now(self, time):
time.time.return_value = 0
callback = mock.Mock(__name__="callback")
loop = self._make_one()
loop.queue_call(6, "foo")
loop.queue_call(5, callback, "foo", bar="baz")
loop.inactive = 88
time.time.return_value = 10
assert loop.run0() == 0
callback.assert_called_once_with("foo", bar="baz")
assert len(loop.queue) == 1
assert loop.inactive == 0
@pytest.mark.usefixtures("in_context")
def test_run0_rpc(self):
rpc = mock.Mock(spec=grpc.Future)
callback = mock.Mock(spec=())
loop = self._make_one()
loop.rpcs["foo"] = callback
loop.rpc_results.put(("foo", rpc))
loop.run0()
<|code_end|>
. Write the next line using the current file imports:
import collections
import mock
import grpc
import pytest
from unittest import mock
from google.cloud.ndb import exceptions
from google.cloud.ndb import _eventloop
and context from other files:
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_eventloop.py
# class EventLoop(object):
# def __init__(self):
# def clear(self):
# def insort_event_right(self, event):
# def call_soon(self, callback, *args, **kwargs):
# def queue_call(self, delay, callback, *args, **kwargs):
# def queue_rpc(self, rpc, callback):
# def rpc_callback(rpc):
# def add_idle(self, callback, *args, **kwargs):
# def run_idle(self):
# def _run_current(self):
# def run0(self):
# def run1(self):
# def run(self):
# def get_event_loop():
# def add_idle(callback, *args, **kwargs):
# def call_soon(callback, *args, **kwargs):
# def queue_call(delay, callback, *args, **kwargs):
# def queue_rpc(future, rpc):
# def run():
# def run1():
, which may include functions, classes, or code. Output only the next line. | assert len(loop.rpcs) == 0 |
Based on the snippet: <|code_start|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test___all__():
utils.verify___all__(django_middleware)
class TestNdbDjangoMiddleware:
@staticmethod
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from google.cloud.ndb import django_middleware
from . import utils
and context (classes, functions, sometimes code) from other files:
# Path: google/cloud/ndb/django_middleware.py
# class NdbDjangoMiddleware(object):
# def __init__(self, *args, **kwargs):
. Output only the next line. | def test_constructor(): |
Here is a snippet: <|code_start|> @staticmethod
def test_propagation_allowed_already_in_transaction(in_context):
def callback():
return "I tried, momma."
with mock.patch(
"google.cloud.ndb._transaction.transaction_async_",
side_effect=_transaction.transaction_async_,
) as transaction_async_:
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(
callback,
join=False,
propagation=context_module.TransactionOptions.ALLOWED,
)
assert future.result() == "I tried, momma."
transaction_async_.assert_called_once_with(
callback,
3,
False,
True,
True,
None,
)
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
<|code_end|>
. Write the next line using the current file imports:
import itertools
import logging
import mock
import pytest
from unittest import mock
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import context as context_module
from google.cloud.ndb import exceptions
from google.cloud.ndb import tasklets
from google.cloud.ndb import _transaction
and context from other files:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
#
# Path: google/cloud/ndb/_transaction.py
# class _Propagation(object):
# def __init__(self, propagation, join=None):
# def _handle_nested(self):
# def _handle_mandatory(self):
# def _handle_allowed(self):
# def _handle_independent(self):
# def _handle_join(self):
# def handle_propagation(self):
# def in_transaction():
# def transaction(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def transaction_async(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def transaction_async_(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def _transaction_async(context, callback, read_only=False):
# def run_inner_loop(inner_context):
# def transactional(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_wrapper(wrapped):
# def transactional_inner_wrapper(*args, **kwargs):
# def callback():
# def transactional_async(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_async_wrapper(wrapped):
# def transactional_async_inner_wrapper(*args, **kwargs):
# def callback():
# def transactional_tasklet(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_tasklet_wrapper(wrapped):
# def transactional_tasklet_inner_wrapper(*args, **kwargs):
# def callback():
# def non_transactional(allow_existing=True):
# def non_transactional_wrapper(wrapped):
# def non_transactional_inner_wrapper(*args, **kwargs):
, which may include functions, classes, or code. Output only the next line. | def test_propagation_allowed_not_yet_in_transaction(_datastore_api): |
Next line prediction: <|code_start|> @pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success_callback_is_tasklet(_datastore_api):
tasklet = tasklets.Future("tasklet")
def callback():
return tasklet
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
tasklet.set_result("I tried, momma.")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_run_inner_loop(_datastore_api):
<|code_end|>
. Use current file imports:
(import itertools
import logging
import mock
import pytest
from unittest import mock
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import context as context_module
from google.cloud.ndb import exceptions
from google.cloud.ndb import tasklets
from google.cloud.ndb import _transaction)
and context including class names, function names, or small code snippets from other files:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
#
# Path: google/cloud/ndb/_transaction.py
# class _Propagation(object):
# def __init__(self, propagation, join=None):
# def _handle_nested(self):
# def _handle_mandatory(self):
# def _handle_allowed(self):
# def _handle_independent(self):
# def _handle_join(self):
# def handle_propagation(self):
# def in_transaction():
# def transaction(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def transaction_async(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def transaction_async_(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def _transaction_async(context, callback, read_only=False):
# def run_inner_loop(inner_context):
# def transactional(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_wrapper(wrapped):
# def transactional_inner_wrapper(*args, **kwargs):
# def callback():
# def transactional_async(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_async_wrapper(wrapped):
# def transactional_async_inner_wrapper(*args, **kwargs):
# def callback():
# def transactional_tasklet(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_tasklet_wrapper(wrapped):
# def transactional_tasklet_inner_wrapper(*args, **kwargs):
# def callback():
# def non_transactional(allow_existing=True):
# def non_transactional_wrapper(wrapped):
# def non_transactional_inner_wrapper(*args, **kwargs):
. Output only the next line. | begin_futures = [ |
Predict the next line after this snippet: <|code_start|>
with pytest.raises(SpuriousError):
future.result()
on_commit_callback.assert_not_called()
transaction_complete_callback.assert_called_once_with()
@staticmethod
def test_success_join(in_context):
def callback():
return "I tried, momma."
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(callback, join=True)
assert future.result() == "I tried, momma."
@staticmethod
def test_success_join_callback_returns_future(in_context):
future = tasklets.Future()
def callback():
return future
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(callback, join=True)
future.set_result("I tried, momma.")
assert future.result() == "I tried, momma."
<|code_end|>
using the current file's imports:
import itertools
import logging
import mock
import pytest
from unittest import mock
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import context as context_module
from google.cloud.ndb import exceptions
from google.cloud.ndb import tasklets
from google.cloud.ndb import _transaction
and any relevant context from other files:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
#
# Path: google/cloud/ndb/_transaction.py
# class _Propagation(object):
# def __init__(self, propagation, join=None):
# def _handle_nested(self):
# def _handle_mandatory(self):
# def _handle_allowed(self):
# def _handle_independent(self):
# def _handle_join(self):
# def handle_propagation(self):
# def in_transaction():
# def transaction(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def transaction_async(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def transaction_async_(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def _transaction_async(context, callback, read_only=False):
# def run_inner_loop(inner_context):
# def transactional(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_wrapper(wrapped):
# def transactional_inner_wrapper(*args, **kwargs):
# def callback():
# def transactional_async(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_async_wrapper(wrapped):
# def transactional_async_inner_wrapper(*args, **kwargs):
# def callback():
# def transactional_tasklet(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_tasklet_wrapper(wrapped):
# def transactional_tasklet_inner_wrapper(*args, **kwargs):
# def callback():
# def non_transactional(allow_existing=True):
# def non_transactional_wrapper(wrapped):
# def non_transactional_inner_wrapper(*args, **kwargs):
. Output only the next line. | @staticmethod |
Given snippet: <|code_start|>
assert future.result() == "I tried, momma."
transaction_async_.assert_called_once_with(
callback,
3,
False,
False,
True,
None,
)
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success_no_retries(_datastore_api):
def callback():
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback, retries=0)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import itertools
import logging
import mock
import pytest
from unittest import mock
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import context as context_module
from google.cloud.ndb import exceptions
from google.cloud.ndb import tasklets
from google.cloud.ndb import _transaction
and context:
# Path: google/cloud/ndb/context.py
# @property
# def context(self):
# return self._context.get()
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
#
# Path: google/cloud/ndb/_transaction.py
# class _Propagation(object):
# def __init__(self, propagation, join=None):
# def _handle_nested(self):
# def _handle_mandatory(self):
# def _handle_allowed(self):
# def _handle_independent(self):
# def _handle_join(self):
# def handle_propagation(self):
# def in_transaction():
# def transaction(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def transaction_async(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def transaction_async_(
# callback,
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=False,
# xg=True,
# propagation=None,
# ):
# def _transaction_async(context, callback, read_only=False):
# def run_inner_loop(inner_context):
# def transactional(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_wrapper(wrapped):
# def transactional_inner_wrapper(*args, **kwargs):
# def callback():
# def transactional_async(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_async_wrapper(wrapped):
# def transactional_async_inner_wrapper(*args, **kwargs):
# def callback():
# def transactional_tasklet(
# retries=_retry._DEFAULT_RETRIES,
# read_only=False,
# join=True,
# xg=True,
# propagation=None,
# ):
# def transactional_tasklet_wrapper(wrapped):
# def transactional_tasklet_inner_wrapper(*args, **kwargs):
# def callback():
# def non_transactional(allow_existing=True):
# def non_transactional_wrapper(wrapped):
# def non_transactional_inner_wrapper(*args, **kwargs):
which might include code, classes, or functions. Output only the next line. | _datastore_api.commit.assert_called_once_with(b"tx123", retries=0) |
Continue the code snippet: <|code_start|> def test_get_kind():
kind = stats.KindRootEntityStat.STORED_KIND_NAME
assert stats.KindRootEntityStat._get_kind() == kind
@staticmethod
def test_constructor():
stat = stats.KindRootEntityStat(kind_name="test_stat", **DEFAULTS)
assert stat.bytes == 4
assert stat.count == 2
assert stat.kind_name == "test_stat"
assert stat.entity_bytes == 0
class TestKindNonRootEntityStat:
@staticmethod
def test_get_kind():
kind = stats.KindNonRootEntityStat.STORED_KIND_NAME
assert stats.KindNonRootEntityStat._get_kind() == kind
@staticmethod
def test_constructor():
stat = stats.KindNonRootEntityStat(kind_name="test_stat", **DEFAULTS)
assert stat.bytes == 4
assert stat.count == 2
assert stat.kind_name == "test_stat"
assert stat.entity_bytes == 0
class TestPropertyTypeStat:
@staticmethod
<|code_end|>
. Use current file imports:
import datetime
from google.cloud.ndb import stats
from . import utils
and context (classes, functions, or code) from other files:
# Path: google/cloud/ndb/stats.py
# class BaseStatistic(model.Model):
# class BaseKindStatistic(BaseStatistic):
# class GlobalStat(BaseStatistic):
# class NamespaceStat(BaseStatistic):
# class KindStat(BaseKindStatistic):
# class KindRootEntityStat(BaseKindStatistic):
# class KindNonRootEntityStat(BaseKindStatistic):
# class PropertyTypeStat(BaseStatistic):
# class KindPropertyTypeStat(BaseKindStatistic):
# class KindPropertyNameStat(BaseKindStatistic):
# class KindPropertyNamePropertyTypeStat(BaseKindStatistic):
# class KindCompositeIndexStat(BaseStatistic):
# class NamespaceGlobalStat(GlobalStat):
# class NamespaceKindStat(KindStat):
# class NamespaceKindRootEntityStat(KindRootEntityStat):
# class NamespaceKindNonRootEntityStat(KindNonRootEntityStat):
# class NamespacePropertyTypeStat(PropertyTypeStat):
# class NamespaceKindPropertyTypeStat(KindPropertyTypeStat):
# class NamespaceKindPropertyNameStat(KindPropertyNameStat):
# class NamespaceKindPropertyNamePropertyTypeStat(KindPropertyNamePropertyTypeStat):
# class NamespaceKindCompositeIndexStat(KindCompositeIndexStat):
# STORED_KIND_NAME = "__BaseStatistic__"
# STORED_KIND_NAME = "__BaseKindStatistic__"
# STORED_KIND_NAME = "__Stat_Total__"
# STORED_KIND_NAME = "__Stat_Namespace__"
# STORED_KIND_NAME = "__Stat_Kind__"
# STORED_KIND_NAME = "__Stat_Kind_IsRootEntity__"
# STORED_KIND_NAME = "__Stat_Kind_NotRootEntity__"
# STORED_KIND_NAME = "__Stat_PropertyType__"
# STORED_KIND_NAME = "__Stat_PropertyType_Kind__"
# STORED_KIND_NAME = "__Stat_PropertyName_Kind__"
# STORED_KIND_NAME = "__Stat_PropertyType_PropertyName_Kind__"
# STORED_KIND_NAME = "__Stat_Kind_CompositeIndex__"
# STORED_KIND_NAME = "__Stat_Ns_Total__"
# STORED_KIND_NAME = "__Stat_Ns_Kind__"
# STORED_KIND_NAME = "__Stat_Ns_Kind_IsRootEntity__"
# STORED_KIND_NAME = "__Stat_Ns_Kind_NotRootEntity__"
# STORED_KIND_NAME = "__Stat_Ns_PropertyType__"
# STORED_KIND_NAME = "__Stat_Ns_PropertyType_Kind__"
# STORED_KIND_NAME = "__Stat_Ns_PropertyName_Kind__"
# STORED_KIND_NAME = "__Stat_Ns_PropertyType_PropertyName_Kind__"
# STORED_KIND_NAME = "__Stat_Ns_Kind_CompositeIndex__"
# _DATASTORE_STATS_CLASSES_BY_KIND = {
# GlobalStat.STORED_KIND_NAME: GlobalStat,
# NamespaceStat.STORED_KIND_NAME: NamespaceStat,
# KindStat.STORED_KIND_NAME: KindStat,
# KindRootEntityStat.STORED_KIND_NAME: KindRootEntityStat,
# KindNonRootEntityStat.STORED_KIND_NAME: KindNonRootEntityStat,
# PropertyTypeStat.STORED_KIND_NAME: PropertyTypeStat,
# KindPropertyTypeStat.STORED_KIND_NAME: KindPropertyTypeStat,
# KindPropertyNameStat.STORED_KIND_NAME: KindPropertyNameStat,
# KindPropertyNamePropertyTypeStat.STORED_KIND_NAME: KindPropertyNamePropertyTypeStat, # noqa: E501
# KindCompositeIndexStat.STORED_KIND_NAME: KindCompositeIndexStat,
# NamespaceGlobalStat.STORED_KIND_NAME: NamespaceGlobalStat,
# NamespaceKindStat.STORED_KIND_NAME: NamespaceKindStat,
# NamespaceKindRootEntityStat.STORED_KIND_NAME: NamespaceKindRootEntityStat,
# NamespaceKindNonRootEntityStat.STORED_KIND_NAME: NamespaceKindNonRootEntityStat, # noqa: E501
# NamespacePropertyTypeStat.STORED_KIND_NAME: NamespacePropertyTypeStat,
# NamespaceKindPropertyTypeStat.STORED_KIND_NAME: NamespaceKindPropertyTypeStat, # noqa: E501
# NamespaceKindPropertyNameStat.STORED_KIND_NAME: NamespaceKindPropertyNameStat, # noqa: E501
# NamespaceKindPropertyNamePropertyTypeStat.STORED_KIND_NAME: NamespaceKindPropertyNamePropertyTypeStat, # noqa: E501
# NamespaceKindCompositeIndexStat.STORED_KIND_NAME: NamespaceKindCompositeIndexStat, # noqa: E501
# }
# def _get_kind(cls):
. Output only the next line. | def test_get_kind(): |
Based on the snippet: <|code_start|># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MyOptions(_options.Options):
__slots__ = ["foo", "bar"]
class TestOptions:
@staticmethod
def test_constructor_w_bad_arg():
with pytest.raises(TypeError):
MyOptions(kind="test")
@staticmethod
def test_constructor_w_deadline():
options = MyOptions(deadline=20)
assert options.timeout == 20
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from google.cloud.ndb import _datastore_api
from google.cloud.ndb import _options
from google.cloud.ndb import utils
and context (classes, functions, sometimes code) from other files:
# Path: google/cloud/ndb/_datastore_api.py
# EVENTUAL = datastore_pb2.ReadOptions.EVENTUAL
# EVENTUAL_CONSISTENCY = EVENTUAL # Legacy NDB
# STRONG = datastore_pb2.ReadOptions.STRONG
# _DEFAULT_TIMEOUT = None
# _NOT_FOUND = object()
# def stub():
# def make_call(rpc_name, request, retries=None, timeout=None):
# def rpc_call():
# def lookup(key, options):
# def __init__(self, options):
# def full(self):
# def add(self, key):
# def idle_callback(self):
# def lookup_callback(self, rpc):
# def _datastore_lookup(keys, read_options, retries=None, timeout=None):
# def get_read_options(options, default_read_consistency=None):
# def put(entity, options):
# def callback():
# def delete(key, options):
# def callback():
# def __init__(self, options):
# def full(self):
# def put(self, entity_pb):
# def delete(self, key):
# def idle_callback(self):
# def commit_callback(rpc):
# def prepare_to_commit(transaction):
# def commit(transaction, retries=None, timeout=None):
# def _get_commit_batch(transaction, options):
# def __init__(self, transaction, options):
# def put(self, entity_pb):
# def delete(self, key):
# def idle_callback(self):
# def callback(rpc):
# def allocate_ids_callback(self, rpc, mutations, futures):
# def commit(self, retries=None, timeout=None):
# def commit_callback(rpc):
# def _process_commit(rpc, futures):
# def _complete(key_pb):
# def _datastore_commit(mutations, transaction, retries=None, timeout=None):
# def allocate(keys, options):
# def __init__(self, options):
# def full(self):
# def room_left(self):
# def add(self, keys):
# def idle_callback(self):
# def allocate_ids_callback(self, rpc):
# def _datastore_allocate_ids(keys, retries=None, timeout=None):
# def begin_transaction(read_only, retries=None, timeout=None):
# def _datastore_begin_transaction(read_only, retries=None, timeout=None):
# def rollback(transaction, retries=None, timeout=None):
# def _datastore_rollback(transaction, retries=None, timeout=None):
# class _LookupBatch(object):
# class _NonTransactionalCommitBatch(object):
# class _TransactionalCommitBatch(_NonTransactionalCommitBatch):
# class _AllocateIdsBatch(object):
#
# Path: google/cloud/ndb/_options.py
# class Options(object):
# class ReadOptions(Options):
# def options_or_model_properties(cls, wrapped):
# def options(cls, wrapped, _disambiguate_from_model_properties=False):
# def wrapper(*args, **kwargs):
# def get_arg(kwargs, name):
# def slots(cls):
# def __init__(self, config=None, **kwargs):
# def __eq__(self, other):
# def __ne__(self, other):
# def __repr__(self):
# def copy(self, **kwargs):
# def items(self):
# def __init__(self, config=None, **kwargs):
#
# Path: google/cloud/ndb/utils.py
# TRUTHY_STRINGS = {"t", "true", "y", "yes", "on", "1"}
# DEBUG = asbool(os.environ.get("NDB_DEBUG", False))
# def asbool(value):
# def code_info(*args, **kwargs):
# def decorator(*args, **kwargs):
# def frame_info(*args, **kwargs):
# def func_info(*args, **kwargs):
# def gen_info(*args, **kwargs):
# def get_stack(*args, **kwargs):
# def logging_debug(log, message, *args, **kwargs):
# def __init__(self, **kwargs):
# def __call__(self, wrapped):
# def wrapper(*args, **kwargs):
# def positional(max_pos_args):
# def positional_decorator(wrapped):
# def positional_wrapper(*args, **kwds):
# def tweak_logging(*args, **kwargs):
# def wrapping(*args, **kwargs):
# class keyword_only(object):
. Output only the next line. | @staticmethod |
Given the following code snippet before the placeholder: <|code_start|> options = MyOptions(xg=True)
assert options == MyOptions()
@staticmethod
def test_constructor_with_config():
config = MyOptions(retries=5, foo="config_test")
options = MyOptions(config=config, retries=8, bar="app")
assert options.retries == 8
assert options.bar == "app"
assert options.foo == "config_test"
@staticmethod
def test_constructor_with_bad_config():
with pytest.raises(TypeError):
MyOptions(config="bad")
@staticmethod
def test___repr__():
representation = "MyOptions(foo='test', bar='app')"
options = MyOptions(foo="test", bar="app")
assert options.__repr__() == representation
@staticmethod
def test__eq__():
options = MyOptions(foo="test", bar="app")
other = MyOptions(foo="test", bar="app")
otherother = MyOptions(foo="nope", bar="noway")
assert options == other
assert options != otherother
<|code_end|>
, predict the next line using imports from the current file:
import pytest
from google.cloud.ndb import _datastore_api
from google.cloud.ndb import _options
from google.cloud.ndb import utils
and context including class names, function names, and sometimes code from other files:
# Path: google/cloud/ndb/_datastore_api.py
# EVENTUAL = datastore_pb2.ReadOptions.EVENTUAL
# EVENTUAL_CONSISTENCY = EVENTUAL # Legacy NDB
# STRONG = datastore_pb2.ReadOptions.STRONG
# _DEFAULT_TIMEOUT = None
# _NOT_FOUND = object()
# def stub():
# def make_call(rpc_name, request, retries=None, timeout=None):
# def rpc_call():
# def lookup(key, options):
# def __init__(self, options):
# def full(self):
# def add(self, key):
# def idle_callback(self):
# def lookup_callback(self, rpc):
# def _datastore_lookup(keys, read_options, retries=None, timeout=None):
# def get_read_options(options, default_read_consistency=None):
# def put(entity, options):
# def callback():
# def delete(key, options):
# def callback():
# def __init__(self, options):
# def full(self):
# def put(self, entity_pb):
# def delete(self, key):
# def idle_callback(self):
# def commit_callback(rpc):
# def prepare_to_commit(transaction):
# def commit(transaction, retries=None, timeout=None):
# def _get_commit_batch(transaction, options):
# def __init__(self, transaction, options):
# def put(self, entity_pb):
# def delete(self, key):
# def idle_callback(self):
# def callback(rpc):
# def allocate_ids_callback(self, rpc, mutations, futures):
# def commit(self, retries=None, timeout=None):
# def commit_callback(rpc):
# def _process_commit(rpc, futures):
# def _complete(key_pb):
# def _datastore_commit(mutations, transaction, retries=None, timeout=None):
# def allocate(keys, options):
# def __init__(self, options):
# def full(self):
# def room_left(self):
# def add(self, keys):
# def idle_callback(self):
# def allocate_ids_callback(self, rpc):
# def _datastore_allocate_ids(keys, retries=None, timeout=None):
# def begin_transaction(read_only, retries=None, timeout=None):
# def _datastore_begin_transaction(read_only, retries=None, timeout=None):
# def rollback(transaction, retries=None, timeout=None):
# def _datastore_rollback(transaction, retries=None, timeout=None):
# class _LookupBatch(object):
# class _NonTransactionalCommitBatch(object):
# class _TransactionalCommitBatch(_NonTransactionalCommitBatch):
# class _AllocateIdsBatch(object):
#
# Path: google/cloud/ndb/_options.py
# class Options(object):
# class ReadOptions(Options):
# def options_or_model_properties(cls, wrapped):
# def options(cls, wrapped, _disambiguate_from_model_properties=False):
# def wrapper(*args, **kwargs):
# def get_arg(kwargs, name):
# def slots(cls):
# def __init__(self, config=None, **kwargs):
# def __eq__(self, other):
# def __ne__(self, other):
# def __repr__(self):
# def copy(self, **kwargs):
# def items(self):
# def __init__(self, config=None, **kwargs):
#
# Path: google/cloud/ndb/utils.py
# TRUTHY_STRINGS = {"t", "true", "y", "yes", "on", "1"}
# DEBUG = asbool(os.environ.get("NDB_DEBUG", False))
# def asbool(value):
# def code_info(*args, **kwargs):
# def decorator(*args, **kwargs):
# def frame_info(*args, **kwargs):
# def func_info(*args, **kwargs):
# def gen_info(*args, **kwargs):
# def get_stack(*args, **kwargs):
# def logging_debug(log, message, *args, **kwargs):
# def __init__(self, **kwargs):
# def __call__(self, wrapped):
# def wrapper(*args, **kwargs):
# def positional(max_pos_args):
# def positional_decorator(wrapped):
# def positional_wrapper(*args, **kwds):
# def tweak_logging(*args, **kwargs):
# def wrapping(*args, **kwargs):
# class keyword_only(object):
. Output only the next line. | assert options != "foo" |
Predict the next line after this snippet: <|code_start|>
class MyOptions(_options.Options):
__slots__ = ["foo", "bar"]
class TestOptions:
@staticmethod
def test_constructor_w_bad_arg():
with pytest.raises(TypeError):
MyOptions(kind="test")
@staticmethod
def test_constructor_w_deadline():
options = MyOptions(deadline=20)
assert options.timeout == 20
@staticmethod
def test_constructor_w_deadline_and_timeout():
with pytest.raises(TypeError):
MyOptions(timeout=20, deadline=10)
@staticmethod
def test_constructor_w_use_memcache():
options = MyOptions(use_memcache=True)
assert options.use_global_cache is True
@staticmethod
def test_constructor_w_use_global_cache():
options = MyOptions(use_global_cache=True)
<|code_end|>
using the current file's imports:
import pytest
from google.cloud.ndb import _datastore_api
from google.cloud.ndb import _options
from google.cloud.ndb import utils
and any relevant context from other files:
# Path: google/cloud/ndb/_datastore_api.py
# EVENTUAL = datastore_pb2.ReadOptions.EVENTUAL
# EVENTUAL_CONSISTENCY = EVENTUAL # Legacy NDB
# STRONG = datastore_pb2.ReadOptions.STRONG
# _DEFAULT_TIMEOUT = None
# _NOT_FOUND = object()
# def stub():
# def make_call(rpc_name, request, retries=None, timeout=None):
# def rpc_call():
# def lookup(key, options):
# def __init__(self, options):
# def full(self):
# def add(self, key):
# def idle_callback(self):
# def lookup_callback(self, rpc):
# def _datastore_lookup(keys, read_options, retries=None, timeout=None):
# def get_read_options(options, default_read_consistency=None):
# def put(entity, options):
# def callback():
# def delete(key, options):
# def callback():
# def __init__(self, options):
# def full(self):
# def put(self, entity_pb):
# def delete(self, key):
# def idle_callback(self):
# def commit_callback(rpc):
# def prepare_to_commit(transaction):
# def commit(transaction, retries=None, timeout=None):
# def _get_commit_batch(transaction, options):
# def __init__(self, transaction, options):
# def put(self, entity_pb):
# def delete(self, key):
# def idle_callback(self):
# def callback(rpc):
# def allocate_ids_callback(self, rpc, mutations, futures):
# def commit(self, retries=None, timeout=None):
# def commit_callback(rpc):
# def _process_commit(rpc, futures):
# def _complete(key_pb):
# def _datastore_commit(mutations, transaction, retries=None, timeout=None):
# def allocate(keys, options):
# def __init__(self, options):
# def full(self):
# def room_left(self):
# def add(self, keys):
# def idle_callback(self):
# def allocate_ids_callback(self, rpc):
# def _datastore_allocate_ids(keys, retries=None, timeout=None):
# def begin_transaction(read_only, retries=None, timeout=None):
# def _datastore_begin_transaction(read_only, retries=None, timeout=None):
# def rollback(transaction, retries=None, timeout=None):
# def _datastore_rollback(transaction, retries=None, timeout=None):
# class _LookupBatch(object):
# class _NonTransactionalCommitBatch(object):
# class _TransactionalCommitBatch(_NonTransactionalCommitBatch):
# class _AllocateIdsBatch(object):
#
# Path: google/cloud/ndb/_options.py
# class Options(object):
# class ReadOptions(Options):
# def options_or_model_properties(cls, wrapped):
# def options(cls, wrapped, _disambiguate_from_model_properties=False):
# def wrapper(*args, **kwargs):
# def get_arg(kwargs, name):
# def slots(cls):
# def __init__(self, config=None, **kwargs):
# def __eq__(self, other):
# def __ne__(self, other):
# def __repr__(self):
# def copy(self, **kwargs):
# def items(self):
# def __init__(self, config=None, **kwargs):
#
# Path: google/cloud/ndb/utils.py
# TRUTHY_STRINGS = {"t", "true", "y", "yes", "on", "1"}
# DEBUG = asbool(os.environ.get("NDB_DEBUG", False))
# def asbool(value):
# def code_info(*args, **kwargs):
# def decorator(*args, **kwargs):
# def frame_info(*args, **kwargs):
# def func_info(*args, **kwargs):
# def gen_info(*args, **kwargs):
# def get_stack(*args, **kwargs):
# def logging_debug(log, message, *args, **kwargs):
# def __init__(self, **kwargs):
# def __call__(self, wrapped):
# def wrapper(*args, **kwargs):
# def positional(max_pos_args):
# def positional_decorator(wrapped):
# def positional_wrapper(*args, **kwds):
# def tweak_logging(*args, **kwargs):
# def wrapping(*args, **kwargs):
# class keyword_only(object):
. Output only the next line. | assert options.use_global_cache is True |
Given snippet: <|code_start|> unset = global_cache.MemcacheCache.KeyNotSet(b"foo")
assert unset == global_cache.MemcacheCache.KeyNotSet(b"foo")
assert not unset == global_cache.MemcacheCache.KeyNotSet(b"goo")
assert not unset == "hamburger"
@staticmethod
def test_delete():
client = mock.Mock(spec=("delete_many",))
cache = global_cache.MemcacheCache(client)
key1 = cache._key(b"one")
key2 = cache._key(b"two")
cache.delete((b"one", b"two"))
client.delete_many.assert_called_once_with([key1, key2])
@staticmethod
def test_watch():
client = mock.Mock(spec=("gets_many",))
cache = global_cache.MemcacheCache(client)
key1 = cache._key(b"one")
key2 = cache._key(b"two")
client.gets_many.return_value = {
key1: ("bun", b"0"),
key2: ("shoe", b"1"),
}
cache.watch(
collections.OrderedDict(
(
(b"one", "bun"),
(b"two", "shot"),
)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import collections
import mock
import pytest
import redis as redis_module
from unittest import mock
from google.cloud.ndb import global_cache
and context:
# Path: google/cloud/ndb/global_cache.py
# class GlobalCache(object):
# class _InProcessGlobalCache(GlobalCache):
# class RedisCache(GlobalCache):
# class MemcacheCache(GlobalCache):
# class KeyNotSet(Exception):
# def get(self, keys):
# def set(self, items, expires=None):
# def set_if_not_exists(self, items, expires=None):
# def delete(self, keys):
# def watch(self, items):
# def unwatch(self, keys):
# def compare_and_swap(self, items, expires=None):
# def clear(self):
# def __init__(self):
# def get(self, keys):
# def set(self, items, expires=None):
# def set_if_not_exists(self, items, expires=None):
# def delete(self, keys):
# def watch(self, items):
# def unwatch(self, keys):
# def compare_and_swap(self, items, expires=None):
# def clear(self):
# def from_environment(cls, strict_read=False, strict_write=True):
# def __init__(self, redis, strict_read=False, strict_write=True):
# def pipes(self):
# def get(self, keys):
# def set(self, items, expires=None):
# def set_if_not_exists(self, items, expires=None):
# def delete(self, keys):
# def watch(self, items):
# def unwatch(self, keys):
# def compare_and_swap(self, items, expires=None):
# def clear(self):
# def __init__(self, key):
# def __eq__(self, other):
# def _parse_host_string(host_string):
# def _key(key):
# def from_environment(cls, max_pool_size=4, strict_read=False, strict_write=True):
# def __init__(self, client, strict_read=False, strict_write=True):
# def caskeys(self):
# def get(self, keys):
# def set(self, items, expires=None):
# def set_if_not_exists(self, items, expires=None):
# def delete(self, keys):
# def watch(self, items):
# def unwatch(self, keys):
# def compare_and_swap(self, items, expires=None):
# def clear(self):
which might include code, classes, or functions. Output only the next line. | ) |
Using the snippet: <|code_start|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
except ImportError: # pragma: NO PY3 COVER
class TestBlobKey:
@staticmethod
def test_constructor_bytes():
<|code_end|>
, determine the next line of code. You have imports:
from unittest import mock
from google.cloud.ndb import _datastore_types
from google.cloud.ndb import exceptions
import mock
import pytest
and context (class names, function names, or code) available:
# Path: google/cloud/ndb/_datastore_types.py
# _MAX_STRING_LENGTH = 1500
# class BlobKey(object):
# def __init__(self, blob_key):
# def __eq__(self, other):
# def __lt__(self, other):
# def __hash__(self):
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
. Output only the next line. | value = b"abc" |
Predict the next line after this snippet: <|code_start|> def test_constructor_bytes():
value = b"abc"
blob_key = _datastore_types.BlobKey(value)
assert blob_key._blob_key is value
@staticmethod
def test_constructor_none():
blob_key = _datastore_types.BlobKey(None)
assert blob_key._blob_key is None
@staticmethod
def test_constructor_too_long():
value = b"a" * 2000
with pytest.raises(exceptions.BadValueError):
_datastore_types.BlobKey(value)
@staticmethod
def test_constructor_bad_type():
value = {"a": "b"}
with pytest.raises(exceptions.BadValueError):
_datastore_types.BlobKey(value)
@staticmethod
def test___eq__():
blob_key1 = _datastore_types.BlobKey(b"abc")
blob_key2 = _datastore_types.BlobKey(b"def")
blob_key3 = _datastore_types.BlobKey(None)
blob_key4 = b"ghi"
blob_key5 = mock.sentinel.blob_key
assert blob_key1 == blob_key1
<|code_end|>
using the current file's imports:
from unittest import mock
from google.cloud.ndb import _datastore_types
from google.cloud.ndb import exceptions
import mock
import pytest
and any relevant context from other files:
# Path: google/cloud/ndb/_datastore_types.py
# _MAX_STRING_LENGTH = 1500
# class BlobKey(object):
# def __init__(self, blob_key):
# def __eq__(self, other):
# def __lt__(self, other):
# def __hash__(self):
#
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
. Output only the next line. | assert not blob_key1 == blob_key2 |
Next line prediction: <|code_start|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def verify___all__(module_obj):
expected = []
for name in dir(module_obj):
<|code_end|>
. Use current file imports:
(import types
from google.cloud.ndb import tasklets)
and context including class names, function names, or small code snippets from other files:
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | if not name.startswith("_"): |
Given the following code snippet before the placeholder: <|code_start|> @staticmethod
def test_exception_FutureCancelledError():
error = grpc.FutureCancelledError()
future = tasklets.Future()
future.exception = mock.Mock(side_effect=error)
call = _remote.RemoteCall(future, "testing")
assert isinstance(call.exception(), exceptions.Cancelled)
@staticmethod
def test_result():
future = tasklets.Future()
future.set_result("positive")
call = _remote.RemoteCall(future, "testing")
assert call.result() == "positive"
@staticmethod
def test_add_done_callback():
future = tasklets.Future()
call = _remote.RemoteCall(future, "testing")
callback = mock.Mock(spec=())
call.add_done_callback(callback)
future.set_result(None)
callback.assert_called_once_with(call)
@staticmethod
def test_add_done_callback_already_done():
future = tasklets.Future()
future.set_result(None)
call = _remote.RemoteCall(future, "testing")
callback = mock.Mock(spec=())
<|code_end|>
, predict the next line using imports from the current file:
from unittest import mock
from google.cloud.ndb import exceptions
from google.cloud.ndb import _remote
from google.cloud.ndb import tasklets
import mock
import grpc
import pytest
and context including class names, function names, and sometimes code from other files:
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_remote.py
# class RemoteCall(object):
# def __init__(self, future, info):
# def record_time(future):
# def __repr__(self):
# def exception(self):
# def result(self):
# def add_done_callback(self, callback):
# def wrapper(rpc):
# def cancel(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | call.add_done_callback(callback) |
Based on the snippet: <|code_start|> assert repr(call) == "a remote call"
@staticmethod
def test_exception():
error = Exception("Spurious error")
future = tasklets.Future()
future.set_exception(error)
call = _remote.RemoteCall(future, "testing")
assert call.exception() is error
@staticmethod
def test_exception_FutureCancelledError():
error = grpc.FutureCancelledError()
future = tasklets.Future()
future.exception = mock.Mock(side_effect=error)
call = _remote.RemoteCall(future, "testing")
assert isinstance(call.exception(), exceptions.Cancelled)
@staticmethod
def test_result():
future = tasklets.Future()
future.set_result("positive")
call = _remote.RemoteCall(future, "testing")
assert call.result() == "positive"
@staticmethod
def test_add_done_callback():
future = tasklets.Future()
call = _remote.RemoteCall(future, "testing")
callback = mock.Mock(spec=())
<|code_end|>
, predict the immediate next line with the help of imports:
from unittest import mock
from google.cloud.ndb import exceptions
from google.cloud.ndb import _remote
from google.cloud.ndb import tasklets
import mock
import grpc
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_remote.py
# class RemoteCall(object):
# def __init__(self, future, info):
# def record_time(future):
# def __repr__(self):
# def exception(self):
# def result(self):
# def add_done_callback(self, callback):
# def wrapper(rpc):
# def cancel(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
. Output only the next line. | call.add_done_callback(callback) |
Here is a snippet: <|code_start|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
except ImportError: # pragma: NO PY3 COVER
class TestRemoteCall:
@staticmethod
def test_constructor():
future = tasklets.Future()
call = _remote.RemoteCall(future, "info")
assert call.future is future
assert call.info == "info"
<|code_end|>
. Write the next line using the current file imports:
from unittest import mock
from google.cloud.ndb import exceptions
from google.cloud.ndb import _remote
from google.cloud.ndb import tasklets
import mock
import grpc
import pytest
and context from other files:
# Path: google/cloud/ndb/exceptions.py
# class Error(Exception):
# class ContextError(Error):
# class BadValueError(Error):
# class BadArgumentError(Error):
# class BadRequestError(Error):
# class Rollback(Error):
# class BadQueryError(Error):
# class BadFilterError(Error):
# class NoLongerImplementedError(NotImplementedError):
# class Cancelled(Error):
# class NestedRetryException(Error):
# def __init__(self):
# def __init__(self, filter):
# def __init__(self):
#
# Path: google/cloud/ndb/_remote.py
# class RemoteCall(object):
# def __init__(self, future, info):
# def record_time(future):
# def __repr__(self):
# def exception(self):
# def result(self):
# def add_done_callback(self, callback):
# def wrapper(rpc):
# def cancel(self):
#
# Path: google/cloud/ndb/tasklets.py
# class Future(object):
# class _TaskletFuture(Future):
# class _MultiFuture(Future):
# class Return(Exception):
# class QueueFuture(object):
# class ReducingFuture(object):
# class SerialQueueFuture(object):
# def __init__(self, info="Unknown"):
# def __repr__(self):
# def done(self):
# def running(self):
# def wait(self):
# def check_success(self):
# def set_result(self, result):
# def set_exception(self, exception):
# def _finish(self):
# def result(self):
# def exception(self):
# def get_traceback(self):
# def add_done_callback(self, callback):
# def cancel(self):
# def cancelled(self):
# def wait_any(futures):
# def wait_all(futures):
# def __init__(self, generator, context, info="Unknown"):
# def _advance_tasklet(self, send_value=None, error=None):
# def done_callback(yielded):
# def cancel(self):
# def _get_return_value(stop):
# def __init__(self, dependencies):
# def __repr__(self):
# def _dependency_done(self, dependency):
# def cancel(self):
# def tasklet(wrapped):
# def tasklet_wrapper(*args, **kwargs):
# def wait_any(futures):
# def wait_all(futures):
# def sleep(seconds):
# def add_flow_exception(*args, **kwargs):
# def make_context(*args, **kwargs):
# def make_default_context(*args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def __init__(self, *args, **kwargs):
# def set_context(*args, **kwargs):
# def synctasklet(wrapped):
# def synctasklet_wrapper(*args, **kwargs):
# def toplevel(wrapped):
# def toplevel_wrapper(*args, **kwargs):
, which may include functions, classes, or code. Output only the next line. | @staticmethod |
Given the following code snippet before the placeholder: <|code_start|>erfinv = primitive(scipy.special.erfinv)
erfcinv = primitive(scipy.special.erfcinv)
defvjp(erfinv,lambda ans, x: lambda g: g * root_pi / 2 * np.exp(erfinv(x)**2))
defvjp(erfcinv,lambda ans, x: lambda g: -g * root_pi / 2 * np.exp(erfcinv(x)**2))
### Logit and Expit ###
logit = primitive(scipy.special.logit)
expit = primitive(scipy.special.expit)
defvjp(logit,lambda ans, x: lambda g: g / ( x * (1 - x)))
defvjp(expit,lambda ans, x: lambda g: g * ans * (1 - ans))
### logsumexp ###
logsumexp = primitive(scipy.special.logsumexp)
def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
shape, dtype = np.shape(x), np.result_type(x)
def vjp(g):
g_repeated, _ = repeat_to_match_shape(g, shape, dtype, axis, keepdims)
ans_repeated, _ = repeat_to_match_shape(ans, shape, dtype, axis, keepdims)
return g_repeated * b * np.exp(x - ans_repeated)
return vjp
defvjp(logsumexp, make_grad_logsumexp)
def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
if not keepdims:
if isinstance(axis, int):
ans = np.expand_dims(ans, axis)
<|code_end|>
, predict the next line using imports from the current file:
import scipy.special
import autograd.numpy as np
from autograd.extend import primitive, defvjp, defjvp
from autograd.numpy.numpy_vjps import unbroadcast_f, repeat_to_match_shape
and context including class names, function names, and sometimes code from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# def repeat_to_match_shape(g, shape, dtype, axis, keepdims):
# """Returns the array g repeated along axis to fit vector space vs.
# Also returns the number of repetitions of the array."""
# if shape == ():
# return g, 1
# axis = list(axis) if isinstance(axis, tuple) else axis
# new_shape = onp.array(shape)
# new_shape[axis] = 1
# num_reps = onp.prod(onp.array(shape)[axis])
# # Can't use broadcast_to because of numpy bug: https://github.com/numpy/numpy/issues/9165
# # return anp.broadcast_to(anp.reshape(g, new_shape), shape), num_reps
# return anp.reshape(g, new_shape) + onp.zeros(shape, dtype=dtype), num_reps
. Output only the next line. | elif isinstance(axis, tuple): |
Using the snippet: <|code_start|>erfinv = primitive(scipy.special.erfinv)
erfcinv = primitive(scipy.special.erfcinv)
defvjp(erfinv,lambda ans, x: lambda g: g * root_pi / 2 * np.exp(erfinv(x)**2))
defvjp(erfcinv,lambda ans, x: lambda g: -g * root_pi / 2 * np.exp(erfcinv(x)**2))
### Logit and Expit ###
logit = primitive(scipy.special.logit)
expit = primitive(scipy.special.expit)
defvjp(logit,lambda ans, x: lambda g: g / ( x * (1 - x)))
defvjp(expit,lambda ans, x: lambda g: g * ans * (1 - ans))
### logsumexp ###
logsumexp = primitive(scipy.special.logsumexp)
def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
shape, dtype = np.shape(x), np.result_type(x)
def vjp(g):
g_repeated, _ = repeat_to_match_shape(g, shape, dtype, axis, keepdims)
ans_repeated, _ = repeat_to_match_shape(ans, shape, dtype, axis, keepdims)
return g_repeated * b * np.exp(x - ans_repeated)
return vjp
defvjp(logsumexp, make_grad_logsumexp)
def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
if not keepdims:
if isinstance(axis, int):
ans = np.expand_dims(ans, axis)
<|code_end|>
, determine the next line of code. You have imports:
import scipy.special
import autograd.numpy as np
from autograd.extend import primitive, defvjp, defjvp
from autograd.numpy.numpy_vjps import unbroadcast_f, repeat_to_match_shape
and context (class names, function names, or code) available:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# def repeat_to_match_shape(g, shape, dtype, axis, keepdims):
# """Returns the array g repeated along axis to fit vector space vs.
# Also returns the number of repetitions of the array."""
# if shape == ():
# return g, 1
# axis = list(axis) if isinstance(axis, tuple) else axis
# new_shape = onp.array(shape)
# new_shape[axis] = 1
# num_reps = onp.prod(onp.array(shape)[axis])
# # Can't use broadcast_to because of numpy bug: https://github.com/numpy/numpy/issues/9165
# # return anp.broadcast_to(anp.reshape(g, new_shape), shape), num_reps
# return anp.reshape(g, new_shape) + onp.zeros(shape, dtype=dtype), num_reps
. Output only the next line. | elif isinstance(axis, tuple): |
Based on the snippet: <|code_start|>defvjp(ive, None, lambda ans, n, x: lambda g: g * (ans * (n / x - np.sign(x)) + ive(n + 1, x)))
### Error Function ###
inv_root_pi = 0.56418958354775627928
erf = primitive(scipy.special.erf)
erfc = primitive(scipy.special.erfc)
defvjp(erf, lambda ans, x: lambda g: 2.*g*inv_root_pi*np.exp(-x**2))
defvjp(erfc,lambda ans, x: lambda g: -2.*g*inv_root_pi*np.exp(-x**2))
### Inverse error function ###
root_pi = 1.7724538509055159
erfinv = primitive(scipy.special.erfinv)
erfcinv = primitive(scipy.special.erfcinv)
defvjp(erfinv,lambda ans, x: lambda g: g * root_pi / 2 * np.exp(erfinv(x)**2))
defvjp(erfcinv,lambda ans, x: lambda g: -g * root_pi / 2 * np.exp(erfcinv(x)**2))
### Logit and Expit ###
logit = primitive(scipy.special.logit)
expit = primitive(scipy.special.expit)
defvjp(logit,lambda ans, x: lambda g: g / ( x * (1 - x)))
defvjp(expit,lambda ans, x: lambda g: g * ans * (1 - ans))
### logsumexp ###
logsumexp = primitive(scipy.special.logsumexp)
def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
<|code_end|>
, predict the immediate next line with the help of imports:
import scipy.special
import autograd.numpy as np
from autograd.extend import primitive, defvjp, defjvp
from autograd.numpy.numpy_vjps import unbroadcast_f, repeat_to_match_shape
and context (classes, functions, sometimes code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# def repeat_to_match_shape(g, shape, dtype, axis, keepdims):
# """Returns the array g repeated along axis to fit vector space vs.
# Also returns the number of repetitions of the array."""
# if shape == ():
# return g, 1
# axis = list(axis) if isinstance(axis, tuple) else axis
# new_shape = onp.array(shape)
# new_shape[axis] = 1
# num_reps = onp.prod(onp.array(shape)[axis])
# # Can't use broadcast_to because of numpy bug: https://github.com/numpy/numpy/issues/9165
# # return anp.broadcast_to(anp.reshape(g, new_shape), shape), num_reps
# return anp.reshape(g, new_shape) + onp.zeros(shape, dtype=dtype), num_reps
. Output only the next line. | shape, dtype = np.shape(x), np.result_type(x) |
Continue the code snippet: <|code_start|>defvjp(beta,
lambda ans, a, b: unbroadcast_f(a, lambda g: g * ans * (psi(a) - psi(a + b))),
lambda ans, a, b: unbroadcast_f(b, lambda g: g * ans * (psi(b) - psi(a + b))))
defvjp(betainc,
lambda ans, a, b, x: unbroadcast_f(x, lambda g: g * np.power(x, a - 1) * np.power(1 - x, b - 1) / beta(a, b)),
argnums=[2])
defvjp(betaln,
lambda ans, a, b: unbroadcast_f(a, lambda g: g * (psi(a) - psi(a + b))),
lambda ans, a, b: unbroadcast_f(b, lambda g: g * (psi(b) - psi(a + b))))
### Gamma functions ###
polygamma = primitive(scipy.special.polygamma)
psi = primitive(scipy.special.psi) # psi(x) is just polygamma(0, x)
digamma = primitive(scipy.special.digamma) # digamma is another name for psi.
gamma = primitive(scipy.special.gamma)
gammaln = primitive(scipy.special.gammaln)
gammainc = primitive(scipy.special.gammainc)
gammaincc = primitive(scipy.special.gammaincc)
gammasgn = primitive(scipy.special.gammasgn)
rgamma = primitive(scipy.special.rgamma)
multigammaln = primitive(scipy.special.multigammaln)
defvjp(gammasgn, None)
defvjp(polygamma, None, lambda ans, n, x: lambda g: g * polygamma(n + 1, x))
defvjp(psi, lambda ans, x: lambda g: g * polygamma(1, x))
defvjp(digamma, lambda ans, x: lambda g: g * polygamma(1, x))
defvjp(gamma, lambda ans, x: lambda g: g * ans * psi(x))
defvjp(gammaln, lambda ans, x: lambda g: g * psi(x))
defvjp(rgamma, lambda ans, x: lambda g: g * psi(x) / -gamma(x))
defvjp(multigammaln,lambda ans, a, d: lambda g:
<|code_end|>
. Use current file imports:
import scipy.special
import autograd.numpy as np
from autograd.extend import primitive, defvjp, defjvp
from autograd.numpy.numpy_vjps import unbroadcast_f, repeat_to_match_shape
and context (classes, functions, or code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# def repeat_to_match_shape(g, shape, dtype, axis, keepdims):
# """Returns the array g repeated along axis to fit vector space vs.
# Also returns the number of repetitions of the array."""
# if shape == ():
# return g, 1
# axis = list(axis) if isinstance(axis, tuple) else axis
# new_shape = onp.array(shape)
# new_shape[axis] = 1
# num_reps = onp.prod(onp.array(shape)[axis])
# # Can't use broadcast_to because of numpy bug: https://github.com/numpy/numpy/issues/9165
# # return anp.broadcast_to(anp.reshape(g, new_shape), shape), num_reps
# return anp.reshape(g, new_shape) + onp.zeros(shape, dtype=dtype), num_reps
. Output only the next line. | g * np.sum(digamma(np.expand_dims(a, -1) - np.arange(d)/2.), -1), |
Predict the next line after this snippet: <|code_start|>from __future__ import absolute_import, division
cdf = primitive(scipy.stats.chi2.cdf)
logpdf = primitive(scipy.stats.chi2.logpdf)
pdf = primitive(scipy.stats.chi2.pdf)
<|code_end|>
using the current file's imports:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma
and any relevant context from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | def grad_chi2_logpdf(x, df): |
Using the snippet: <|code_start|>from __future__ import absolute_import, division
cdf = primitive(scipy.stats.chi2.cdf)
logpdf = primitive(scipy.stats.chi2.logpdf)
pdf = primitive(scipy.stats.chi2.pdf)
<|code_end|>
, determine the next line of code. You have imports:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma
and context (class names, function names, or code) available:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | def grad_chi2_logpdf(x, df): |
Predict the next line for this snippet: <|code_start|>from __future__ import absolute_import, division
cdf = primitive(scipy.stats.chi2.cdf)
logpdf = primitive(scipy.stats.chi2.logpdf)
pdf = primitive(scipy.stats.chi2.pdf)
def grad_chi2_logpdf(x, df):
<|code_end|>
with the help of current file imports:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma
and context from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
, which may contain function names, class names, or code. Output only the next line. | return np.where(df % 1 == 0, (df - x - 2) / (2 * x), 0) |
Next line prediction: <|code_start|>from __future__ import absolute_import
rvs = primitive(scipy.stats.dirichlet.rvs)
pdf = primitive(scipy.stats.dirichlet.pdf)
logpdf = primitive(scipy.stats.dirichlet.logpdf)
defvjp(logpdf,lambda ans, x, alpha: lambda g:
g * (alpha - 1) / x,
<|code_end|>
. Use current file imports:
(import scipy.stats
import autograd.numpy as np
from autograd.scipy.special import digamma
from autograd.extend import primitive, defvjp)
and context including class names, function names, or small code snippets from other files:
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
#
# Path: autograd/extend.py
. Output only the next line. | lambda ans, x, alpha: lambda g: |
Predict the next line after this snippet: <|code_start|>from __future__ import absolute_import
rvs = primitive(scipy.stats.dirichlet.rvs)
pdf = primitive(scipy.stats.dirichlet.pdf)
logpdf = primitive(scipy.stats.dirichlet.logpdf)
defvjp(logpdf,lambda ans, x, alpha: lambda g:
g * (alpha - 1) / x,
lambda ans, x, alpha: lambda g:
g * (digamma(np.sum(alpha)) - digamma(alpha) + np.log(x)))
# Same as log pdf, but multiplied by the pdf (ans).
defvjp(pdf,lambda ans, x, alpha: lambda g:
<|code_end|>
using the current file's imports:
import scipy.stats
import autograd.numpy as np
from autograd.scipy.special import digamma
from autograd.extend import primitive, defvjp
and any relevant context from other files:
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
#
# Path: autograd/extend.py
. Output only the next line. | g * ans * (alpha - 1) / x, |
Continue the code snippet: <|code_start|> shapes = {'A' : {s : (A_shape[i] for i in ax) for s, ax in iteritems(axes['A'])},
'B' : {s : (B_shape[i] for i in ax) for s, ax in iteritems(axes['B'])}}
shapes['out'] = {'ignore_A' : shapes['A']['ignore'],
'ignore_B' : shapes['B']['ignore'],
'conv' : conv_shape}
return axes, shapes
def compute_conv_size(A_size, B_size, mode):
if mode == 'full':
return A_size + B_size - 1
elif mode == 'same':
return A_size
elif mode == 'valid':
return abs(A_size - B_size) + 1
else:
raise Exception("Mode {0} not recognized".format(mode))
def flipped_idxs(ndim, axes):
new_idxs = [slice(None)] * ndim
for ax in axes:
new_idxs[ax] = slice(None, None, -1)
return tuple(new_idxs)
def grad_convolve(argnum, ans, A, B, axes=None, dot_axes=[(),()], mode='full'):
assert mode in ['valid', 'full'], "Grad for mode {0} not yet implemented".format(mode)
axes, shapes = parse_axes(A.shape, B.shape, axes, dot_axes, mode)
if argnum == 0:
X, Y = A, B
_X_, _Y_ = 'A', 'B'
ignore_Y = 'ignore_B'
<|code_end|>
. Use current file imports:
from builtins import range, zip
from functools import partial
from autograd.extend import primitive, defvjp
from numpy.lib.stride_tricks import as_strided
from future.utils import iteritems
import autograd.numpy as np
import numpy as npo # original numpy
and context (classes, functions, or code) from other files:
# Path: autograd/extend.py
. Output only the next line. | elif argnum == 1: |
Based on the snippet: <|code_start|> if mode == 'full':
return A_size + B_size - 1
elif mode == 'same':
return A_size
elif mode == 'valid':
return abs(A_size - B_size) + 1
else:
raise Exception("Mode {0} not recognized".format(mode))
def flipped_idxs(ndim, axes):
new_idxs = [slice(None)] * ndim
for ax in axes:
new_idxs[ax] = slice(None, None, -1)
return tuple(new_idxs)
def grad_convolve(argnum, ans, A, B, axes=None, dot_axes=[(),()], mode='full'):
assert mode in ['valid', 'full'], "Grad for mode {0} not yet implemented".format(mode)
axes, shapes = parse_axes(A.shape, B.shape, axes, dot_axes, mode)
if argnum == 0:
X, Y = A, B
_X_, _Y_ = 'A', 'B'
ignore_Y = 'ignore_B'
elif argnum == 1:
X, Y = B, A
_X_, _Y_ = 'B', 'A'
ignore_Y = 'ignore_A'
else:
raise NotImplementedError("Can't take grad of convolve w.r.t. arg {0}".format(argnum))
if mode == 'full':
<|code_end|>
, predict the immediate next line with the help of imports:
from builtins import range, zip
from functools import partial
from autograd.extend import primitive, defvjp
from numpy.lib.stride_tricks import as_strided
from future.utils import iteritems
import autograd.numpy as np
import numpy as npo # original numpy
and context (classes, functions, sometimes code) from other files:
# Path: autograd/extend.py
. Output only the next line. | new_mode = 'valid' |
Here is a snippet: <|code_start|>
def check_vspace(value):
vs = vspace(value)
# --- required attributes ---
size = vs.size
add = vs.add
scalar_mul = vs.scalar_mul
inner_prod = vs.inner_prod
randn = vs.randn
zeros = vs.zeros
ones = vs.ones
standard_basis = vs.standard_basis
# --- util ---
def randns(N=2):
return [randn() for i in range(N)]
def rand_scalar():
return float(np.random.randn())
def rand_scalars(N=2):
return [rand_scalar() for i in range(N)]
def vector_close(x, y):
z = randn()
return scalar_close(inner_prod(z, x), inner_prod(z, y))
# --- vector space axioms ---
def associativity_of_add(x, y, z):
return vector_close(add(x, add(y, z)),
add(add(x, y), z))
def commutativity_of_add(x, y):
<|code_end|>
. Write the next line using the current file imports:
from functools import reduce
from autograd.core import vspace
from autograd.numpy.numpy_vspaces import ArrayVSpace
from autograd.test_util import check_grads, scalar_close
import numpy as np
import itertools as it
and context from other files:
# Path: autograd/core.py
# def vspace(value):
# try:
# return VSpace.mappings[type(value)](value)
# except KeyError:
# if isbox(value):
# return vspace(getval(value))
# else:
# raise TypeError("Can't find vector space for value {} of type {}. "
# "Valid types are {}".format(
# value, type(value), VSpace.mappings.keys()))
#
# Path: autograd/numpy/numpy_vspaces.py
# class ArrayVSpace(VSpace):
# def __init__(self, value):
# value = np.array(value, copy=False)
# self.shape = value.shape
# self.dtype = value.dtype
#
# @property
# def size(self): return np.prod(self.shape)
# @property
# def ndim(self): return len(self.shape)
# def zeros(self): return np.zeros(self.shape, dtype=self.dtype)
# def ones(self): return np.ones( self.shape, dtype=self.dtype)
#
# def standard_basis(self):
# for idxs in np.ndindex(*self.shape):
# vect = np.zeros(self.shape, dtype=self.dtype)
# vect[idxs] = 1
# yield vect
#
# def randn(self):
# return np.array(np.random.randn(*self.shape)).astype(self.dtype)
#
# def _inner_prod(self, x, y):
# return np.dot(np.ravel(x), np.ravel(y))
, which may include functions, classes, or code. Output only the next line. | return vector_close(add(x, y), add(y, x)) |
Continue the code snippet: <|code_start|> yield self.top
self.top -= 1
trace_stack = TraceStack()
class Box(object):
type_mappings = {}
types = set()
__slots__ = ['_value', '_trace', '_node']
def __init__(self, value, trace, node):
self._value = value
self._node = node
self._trace = trace
def __bool__(self):
return bool(self._value)
__nonzero__ = __bool__
def __str__(self):
return "Autograd {0} with value {1}".format(
type(self).__name__, str(self._value))
@classmethod
def register(cls, value_type):
Box.types.add(cls)
Box.type_mappings[value_type] = cls
Box.type_mappings[cls] = cls
box_type_mappings = Box.type_mappings
<|code_end|>
. Use current file imports:
import warnings
from contextlib import contextmanager
from collections import defaultdict
from .util import subvals, toposort
from .wrap_util import wraps
and context (classes, functions, or code) from other files:
# Path: autograd/util.py
# def subvals(x, ivs):
# x_ = list(x)
# for i, v in ivs:
# x_[i] = v
# return tuple(x_)
#
# def toposort(end_node, parents=operator.attrgetter('parents')):
# child_counts = {}
# stack = [end_node]
# while stack:
# node = stack.pop()
# if node in child_counts:
# child_counts[node] += 1
# else:
# child_counts[node] = 1
# stack.extend(parents(node))
#
# childless_nodes = [end_node]
# while childless_nodes:
# node = childless_nodes.pop()
# yield node
# for parent in parents(node):
# if child_counts[parent] == 1:
# childless_nodes.append(parent)
# else:
# child_counts[parent] -= 1
. Output only the next line. | def new_box(value, trace, node): |
Next line prediction: <|code_start|>"""This file doesn't import the numpy wrapper, to check if core works
on basic operations even without numpy."""
from __future__ import absolute_import
@unary_to_nary
def grad(fun, x):
vjp, _ = make_vjp(fun, x)
return vjp(1.0)
# Non-numpy gradient checking functions.
def nd(f, x, eps=1e-4):
return (f(x + eps/2) - f(x - eps/2)) / eps
def check_close(a, b, atol=1e-4, rtol=1e-4):
assert abs(a - b) < atol + rtol*abs(b), "Diffs are: {0}".format(a - b)
def check_binary_func(fun, independent=False):
with warnings.catch_warnings(record=independent) as w:
x, y = 0.7, 1.8
a = grad(fun)(x, y)
b = nd(lambda x: fun(x, y), x)
check_close(a, b)
a = grad(fun, 1)(x, y)
b = nd(lambda y: fun(x, y), y)
check_close(a, b)
def test_add(): check_binary_func(lambda x, y: x + y)
def test_sub(): check_binary_func(lambda x, y: x - y)
<|code_end|>
. Use current file imports:
(import warnings
from autograd.core import make_vjp
from autograd.wrap_util import unary_to_nary)
and context including class names, function names, or small code snippets from other files:
# Path: autograd/core.py
# def make_vjp(fun, x):
# start_node = VJPNode.new_root()
# end_value, end_node = trace(start_node, fun, x)
# if end_node is None:
# def vjp(g): return vspace(x).zeros()
# else:
# def vjp(g): return backward_pass(g, end_node)
# return vjp, end_value
. Output only the next line. | def test_div(): check_binary_func(lambda x, y: x / y) |
Predict the next line for this snippet: <|code_start|>
class ArrayVSpace(VSpace):
def __init__(self, value):
value = np.array(value, copy=False)
self.shape = value.shape
self.dtype = value.dtype
@property
def size(self): return np.prod(self.shape)
@property
def ndim(self): return len(self.shape)
def zeros(self): return np.zeros(self.shape, dtype=self.dtype)
def ones(self): return np.ones( self.shape, dtype=self.dtype)
def standard_basis(self):
for idxs in np.ndindex(*self.shape):
vect = np.zeros(self.shape, dtype=self.dtype)
vect[idxs] = 1
yield vect
def randn(self):
return np.array(np.random.randn(*self.shape)).astype(self.dtype)
def _inner_prod(self, x, y):
<|code_end|>
with the help of current file imports:
import numpy as np
from autograd.extend import VSpace
and context from other files:
# Path: autograd/extend.py
, which may contain function names, class names, or code. Output only the next line. | return np.dot(np.ravel(x), np.ravel(y)) |
Based on the snippet: <|code_start|>
# Set up figure.
fig = plt.figure(figsize=(12, 8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
def callback(params, t, g):
print("Iteration {} lower bound {}".format(t, -objective(params, t)))
# Sample functions from posterior.
rs = npr.RandomState(0)
mean, log_std = unpack_params(params)
#rs = npr.RandomState(0)
sample_weights = rs.randn(10, num_weights) * np.exp(log_std) + mean
plot_inputs = np.linspace(-8, 8, num=400)
outputs = predictions(sample_weights, np.expand_dims(plot_inputs, 1))
# Plot data and functions.
plt.cla()
ax.plot(inputs.ravel(), targets.ravel(), 'bx')
ax.plot(plot_inputs, outputs[:, :, 0].T)
ax.set_ylim([-2, 3])
plt.draw()
plt.pause(1.0/60.0)
# Initialize variational parameters
rs = npr.RandomState(0)
init_mean = rs.randn(num_weights)
<|code_end|>
, predict the immediate next line with the help of imports:
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
from black_box_svi import black_box_variational_inference
from autograd.misc.optimizers import adam
and context (classes, functions, sometimes code) from other files:
# Path: autograd/misc/optimizers.py
# @unflatten_optimizer
# def adam(grad, x, callback=None, num_iters=100,
# step_size=0.001, b1=0.9, b2=0.999, eps=10**-8):
# """Adam as described in http://arxiv.org/pdf/1412.6980.pdf.
# It's basically RMSprop with momentum and some correction terms."""
# m = np.zeros(len(x))
# v = np.zeros(len(x))
# for i in range(num_iters):
# g = grad(x, i)
# if callback: callback(x, i, g)
# m = (1 - b1) * g + b1 * m # First moment estimate.
# v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
# mhat = m / (1 - b1**(i + 1)) # Bias correction.
# vhat = v / (1 - b2**(i + 1))
# x = x - step_size*mhat/(np.sqrt(vhat) + eps)
# return x
. Output only the next line. | init_log_std = -5 * np.ones(num_weights) |
Here is a snippet: <|code_start|> return vjp
defvjp(sqrtm, _vjp_sqrtm)
def _flip(a, trans):
if anp.iscomplexobj(a):
return 'H' if trans in ('N', 0) else 'N'
else:
return 'T' if trans in ('N', 0) else 'N'
def grad_solve_triangular(ans, a, b, trans=0, lower=False, **kwargs):
tri = anp.tril if (lower ^ (_flip(a, trans) == 'N')) else anp.triu
transpose = lambda x: x if _flip(a, trans) != 'N' else x.T
al2d = lambda x: x if x.ndim > 1 else x[...,None]
def vjp(g):
v = al2d(solve_triangular(a, g, trans=_flip(a, trans), lower=lower))
return -transpose(tri(anp.dot(v, al2d(ans).T)))
return vjp
defvjp(solve_triangular,
grad_solve_triangular,
lambda ans, a, b, trans=0, lower=False, **kwargs:
lambda g: solve_triangular(a, g, trans=_flip(a, trans), lower=lower))
def _jvp_sqrtm(dA, ans, A, disp=True, blocksize=64):
assert disp, "sqrtm jvp not implemented for disp=False"
return solve_sylvester(ans, ans, dA)
defjvp(sqrtm, _jvp_sqrtm)
def _jvp_sylvester(argnums, dms, ans, args, _):
a, b, q = args
<|code_end|>
. Write the next line using the current file imports:
import scipy.linalg
import autograd.numpy as anp
from autograd.numpy.numpy_wrapper import wrap_namespace
from autograd.extend import defvjp, defvjp_argnums, defjvp, defjvp_argnums
and context from other files:
# Path: autograd/numpy/numpy_wrapper.py
# def wrap_namespace(old, new):
# unchanged_types = {float, int, type(None), type}
# int_types = {_np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
# for name, obj in old.items():
# if obj in notrace_functions:
# new[name] = notrace_primitive(obj)
# elif callable(obj) and type(obj) is not type:
# new[name] = primitive(obj)
# elif type(obj) is type and obj in int_types:
# new[name] = wrap_intdtype(obj)
# elif type(obj) in unchanged_types:
# new[name] = obj
#
# Path: autograd/extend.py
, which may include functions, classes, or code. Output only the next line. | if 0 in argnums: |
Given the following code snippet before the placeholder: <|code_start|>from __future__ import division
wrap_namespace(scipy.linalg.__dict__, globals()) # populates module namespace
def _vjp_sqrtm(ans, A, disp=True, blocksize=64):
assert disp, "sqrtm vjp not implemented for disp=False"
ans_transp = anp.transpose(ans)
def vjp(g):
return anp.real(solve_sylvester(ans_transp, ans_transp, g))
return vjp
defvjp(sqrtm, _vjp_sqrtm)
def _flip(a, trans):
if anp.iscomplexobj(a):
return 'H' if trans in ('N', 0) else 'N'
else:
return 'T' if trans in ('N', 0) else 'N'
def grad_solve_triangular(ans, a, b, trans=0, lower=False, **kwargs):
tri = anp.tril if (lower ^ (_flip(a, trans) == 'N')) else anp.triu
transpose = lambda x: x if _flip(a, trans) != 'N' else x.T
al2d = lambda x: x if x.ndim > 1 else x[...,None]
def vjp(g):
v = al2d(solve_triangular(a, g, trans=_flip(a, trans), lower=lower))
return -transpose(tri(anp.dot(v, al2d(ans).T)))
return vjp
defvjp(solve_triangular,
grad_solve_triangular,
<|code_end|>
, predict the next line using imports from the current file:
import scipy.linalg
import autograd.numpy as anp
from autograd.numpy.numpy_wrapper import wrap_namespace
from autograd.extend import defvjp, defvjp_argnums, defjvp, defjvp_argnums
and context including class names, function names, and sometimes code from other files:
# Path: autograd/numpy/numpy_wrapper.py
# def wrap_namespace(old, new):
# unchanged_types = {float, int, type(None), type}
# int_types = {_np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
# for name, obj in old.items():
# if obj in notrace_functions:
# new[name] = notrace_primitive(obj)
# elif callable(obj) and type(obj) is not type:
# new[name] = primitive(obj)
# elif type(obj) is type and obj in int_types:
# new[name] = wrap_intdtype(obj)
# elif type(obj) in unchanged_types:
# new[name] = obj
#
# Path: autograd/extend.py
. Output only the next line. | lambda ans, a, b, trans=0, lower=False, **kwargs: |
Predict the next line after this snippet: <|code_start|>from __future__ import division
wrap_namespace(scipy.linalg.__dict__, globals()) # populates module namespace
def _vjp_sqrtm(ans, A, disp=True, blocksize=64):
assert disp, "sqrtm vjp not implemented for disp=False"
ans_transp = anp.transpose(ans)
def vjp(g):
return anp.real(solve_sylvester(ans_transp, ans_transp, g))
return vjp
defvjp(sqrtm, _vjp_sqrtm)
<|code_end|>
using the current file's imports:
import scipy.linalg
import autograd.numpy as anp
from autograd.numpy.numpy_wrapper import wrap_namespace
from autograd.extend import defvjp, defvjp_argnums, defjvp, defjvp_argnums
and any relevant context from other files:
# Path: autograd/numpy/numpy_wrapper.py
# def wrap_namespace(old, new):
# unchanged_types = {float, int, type(None), type}
# int_types = {_np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
# for name, obj in old.items():
# if obj in notrace_functions:
# new[name] = notrace_primitive(obj)
# elif callable(obj) and type(obj) is not type:
# new[name] = primitive(obj)
# elif type(obj) is type and obj in int_types:
# new[name] = wrap_intdtype(obj)
# elif type(obj) in unchanged_types:
# new[name] = obj
#
# Path: autograd/extend.py
. Output only the next line. | def _flip(a, trans): |
Using the snippet: <|code_start|>from __future__ import division
wrap_namespace(scipy.linalg.__dict__, globals()) # populates module namespace
def _vjp_sqrtm(ans, A, disp=True, blocksize=64):
assert disp, "sqrtm vjp not implemented for disp=False"
ans_transp = anp.transpose(ans)
def vjp(g):
return anp.real(solve_sylvester(ans_transp, ans_transp, g))
return vjp
defvjp(sqrtm, _vjp_sqrtm)
def _flip(a, trans):
if anp.iscomplexobj(a):
return 'H' if trans in ('N', 0) else 'N'
else:
return 'T' if trans in ('N', 0) else 'N'
<|code_end|>
, determine the next line of code. You have imports:
import scipy.linalg
import autograd.numpy as anp
from autograd.numpy.numpy_wrapper import wrap_namespace
from autograd.extend import defvjp, defvjp_argnums, defjvp, defjvp_argnums
and context (class names, function names, or code) available:
# Path: autograd/numpy/numpy_wrapper.py
# def wrap_namespace(old, new):
# unchanged_types = {float, int, type(None), type}
# int_types = {_np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
# for name, obj in old.items():
# if obj in notrace_functions:
# new[name] = notrace_primitive(obj)
# elif callable(obj) and type(obj) is not type:
# new[name] = primitive(obj)
# elif type(obj) is type and obj in int_types:
# new[name] = wrap_intdtype(obj)
# elif type(obj) in unchanged_types:
# new[name] = obj
#
# Path: autograd/extend.py
. Output only the next line. | def grad_solve_triangular(ans, a, b, trans=0, lower=False, **kwargs): |
Next line prediction: <|code_start|>from __future__ import division
wrap_namespace(scipy.linalg.__dict__, globals()) # populates module namespace
def _vjp_sqrtm(ans, A, disp=True, blocksize=64):
assert disp, "sqrtm vjp not implemented for disp=False"
ans_transp = anp.transpose(ans)
def vjp(g):
return anp.real(solve_sylvester(ans_transp, ans_transp, g))
return vjp
defvjp(sqrtm, _vjp_sqrtm)
def _flip(a, trans):
if anp.iscomplexobj(a):
return 'H' if trans in ('N', 0) else 'N'
<|code_end|>
. Use current file imports:
(import scipy.linalg
import autograd.numpy as anp
from autograd.numpy.numpy_wrapper import wrap_namespace
from autograd.extend import defvjp, defvjp_argnums, defjvp, defjvp_argnums)
and context including class names, function names, or small code snippets from other files:
# Path: autograd/numpy/numpy_wrapper.py
# def wrap_namespace(old, new):
# unchanged_types = {float, int, type(None), type}
# int_types = {_np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
# for name, obj in old.items():
# if obj in notrace_functions:
# new[name] = notrace_primitive(obj)
# elif callable(obj) and type(obj) is not type:
# new[name] = primitive(obj)
# elif type(obj) is type and obj in int_types:
# new[name] = wrap_intdtype(obj)
# elif type(obj) in unchanged_types:
# new[name] = obj
#
# Path: autograd/extend.py
. Output only the next line. | else: |
Given the following code snippet before the placeholder: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.poisson.cdf)
logpmf = primitive(scipy.stats.poisson.logpmf)
pmf = primitive(scipy.stats.poisson.pmf)
<|code_end|>
, predict the next line using imports from the current file:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
and context including class names, function names, and sometimes code from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
. Output only the next line. | def grad_poisson_logpmf(k, mu): |
Given the code snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.poisson.cdf)
logpmf = primitive(scipy.stats.poisson.logpmf)
pmf = primitive(scipy.stats.poisson.pmf)
def grad_poisson_logpmf(k, mu):
<|code_end|>
, generate the next line using the imports in this file:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
and context (functions, classes, or occasionally code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
. Output only the next line. | return np.where(k % 1 == 0, k / mu - 1, 0) |
Continue the code snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.gamma.cdf)
logpdf = primitive(scipy.stats.gamma.logpdf)
pdf = primitive(scipy.stats.gamma.pdf)
def grad_gamma_logpdf_arg0(x, a):
<|code_end|>
. Use current file imports:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma, psi
and context (classes, functions, or code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | return (a - x - 1) / x |
Given the code snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.gamma.cdf)
logpdf = primitive(scipy.stats.gamma.logpdf)
pdf = primitive(scipy.stats.gamma.pdf)
<|code_end|>
, generate the next line using the imports in this file:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma, psi
and context (functions, classes, or occasionally code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | def grad_gamma_logpdf_arg0(x, a): |
Based on the snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.gamma.cdf)
logpdf = primitive(scipy.stats.gamma.logpdf)
pdf = primitive(scipy.stats.gamma.pdf)
def grad_gamma_logpdf_arg0(x, a):
<|code_end|>
, predict the immediate next line with the help of imports:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma, psi
and context (classes, functions, sometimes code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | return (a - x - 1) / x |
Next line prediction: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.gamma.cdf)
logpdf = primitive(scipy.stats.gamma.logpdf)
pdf = primitive(scipy.stats.gamma.pdf)
def grad_gamma_logpdf_arg0(x, a):
return (a - x - 1) / x
<|code_end|>
. Use current file imports:
(import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma, psi)
and context including class names, function names, or small code snippets from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | def grad_gamma_logpdf_arg1(x, a): |
Here is a snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.beta.cdf)
logpdf = primitive(scipy.stats.beta.logpdf)
pdf = primitive(scipy.stats.beta.pdf)
def grad_beta_logpdf_arg0(x, a, b):
<|code_end|>
. Write the next line using the current file imports:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import beta, psi
and context from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
, which may include functions, classes, or code. Output only the next line. | return (1 + a * (x-1) + x * (b-2)) / (x * (x-1)) |
Given the code snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.beta.cdf)
logpdf = primitive(scipy.stats.beta.logpdf)
pdf = primitive(scipy.stats.beta.pdf)
<|code_end|>
, generate the next line using the imports in this file:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import beta, psi
and context (functions, classes, or occasionally code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | def grad_beta_logpdf_arg0(x, a, b): |
Continue the code snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.beta.cdf)
logpdf = primitive(scipy.stats.beta.logpdf)
pdf = primitive(scipy.stats.beta.pdf)
def grad_beta_logpdf_arg0(x, a, b):
return (1 + a * (x-1) + x * (b-2)) / (x * (x-1))
<|code_end|>
. Use current file imports:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import beta, psi
and context (classes, functions, or code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | def grad_beta_logpdf_arg1(x, a, b): |
Given the code snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.beta.cdf)
logpdf = primitive(scipy.stats.beta.logpdf)
pdf = primitive(scipy.stats.beta.pdf)
def grad_beta_logpdf_arg0(x, a, b):
return (1 + a * (x-1) + x * (b-2)) / (x * (x-1))
def grad_beta_logpdf_arg1(x, a, b):
return np.log(x) - psi(a) + psi(a + b)
<|code_end|>
, generate the next line using the imports in this file:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import beta, psi
and context (functions, classes, or occasionally code) from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
. Output only the next line. | def grad_beta_logpdf_arg2(x, a, b): |
Here is a snippet: <|code_start|>from __future__ import absolute_import
cdf = primitive(scipy.stats.beta.cdf)
logpdf = primitive(scipy.stats.beta.logpdf)
pdf = primitive(scipy.stats.beta.pdf)
def grad_beta_logpdf_arg0(x, a, b):
<|code_end|>
. Write the next line using the current file imports:
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import beta, psi
and context from other files:
# Path: autograd/extend.py
#
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/scipy/special.py
# def make_gammainc_vjp_arg1(sign):
# def gammainc_vjp_arg1(ans, a, x):
# def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
# def vjp(g):
# def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
, which may include functions, classes, or code. Output only the next line. | return (1 + a * (x-1) + x * (b-2)) / (x * (x-1)) |
Continue the code snippet: <|code_start|>
def generalized_outer_product(x):
if np.ndim(x) == 1:
return np.outer(x, x)
return np.matmul(x, np.swapaxes(x, -1, -2))
def covgrad(x, mean, cov, allow_singular=False):
if allow_singular:
raise NotImplementedError("The multivariate normal pdf is not "
"differentiable w.r.t. a singular covariance matix")
J = np.linalg.inv(cov)
solved = np.matmul(J, np.expand_dims(x - mean, -1))
return 1./2 * (generalized_outer_product(solved) - J)
def solve(allow_singular):
if allow_singular:
return lambda A, x: np.dot(np.linalg.pinv(A), x)
else:
return np.linalg.solve
defvjp(logpdf,
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(x, lambda g: -np.expand_dims(np.atleast_1d(g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(mean, lambda g: np.expand_dims(np.atleast_1d(g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(cov, lambda g: np.reshape(g, np.shape(g) + (1, 1)) * covgrad(x, mean, cov, allow_singular)))
# Same as log pdf, but multiplied by the pdf (ans).
defvjp(pdf,
<|code_end|>
. Use current file imports:
import scipy.stats
import autograd.numpy as np
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.extend import primitive, defvjp
and context (classes, functions, or code) from other files:
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/extend.py
. Output only the next line. | lambda ans, x, mean, cov, allow_singular=False: |
Using the snippet: <|code_start|> raise NotImplementedError("The multivariate normal pdf is not "
"differentiable w.r.t. a singular covariance matix")
J = np.linalg.inv(cov)
solved = np.matmul(J, np.expand_dims(x - mean, -1))
return 1./2 * (generalized_outer_product(solved) - J)
def solve(allow_singular):
if allow_singular:
return lambda A, x: np.dot(np.linalg.pinv(A), x)
else:
return np.linalg.solve
defvjp(logpdf,
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(x, lambda g: -np.expand_dims(np.atleast_1d(g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(mean, lambda g: np.expand_dims(np.atleast_1d(g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(cov, lambda g: np.reshape(g, np.shape(g) + (1, 1)) * covgrad(x, mean, cov, allow_singular)))
# Same as log pdf, but multiplied by the pdf (ans).
defvjp(pdf,
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(x, lambda g: -np.expand_dims(np.atleast_1d(ans * g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(mean, lambda g: np.expand_dims(np.atleast_1d(ans * g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(cov, lambda g: np.reshape(ans * g, np.shape(g) + (1, 1)) * covgrad(x, mean, cov, allow_singular)))
defvjp(entropy, None,
<|code_end|>
, determine the next line of code. You have imports:
import scipy.stats
import autograd.numpy as np
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.extend import primitive, defvjp
and context (class names, function names, or code) available:
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/extend.py
. Output only the next line. | lambda ans, mean, cov: |
Continue the code snippet: <|code_start|> raise NotImplementedError("The multivariate normal pdf is not "
"differentiable w.r.t. a singular covariance matix")
J = np.linalg.inv(cov)
solved = np.matmul(J, np.expand_dims(x - mean, -1))
return 1./2 * (generalized_outer_product(solved) - J)
def solve(allow_singular):
if allow_singular:
return lambda A, x: np.dot(np.linalg.pinv(A), x)
else:
return np.linalg.solve
defvjp(logpdf,
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(x, lambda g: -np.expand_dims(np.atleast_1d(g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(mean, lambda g: np.expand_dims(np.atleast_1d(g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(cov, lambda g: np.reshape(g, np.shape(g) + (1, 1)) * covgrad(x, mean, cov, allow_singular)))
# Same as log pdf, but multiplied by the pdf (ans).
defvjp(pdf,
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(x, lambda g: -np.expand_dims(np.atleast_1d(ans * g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(mean, lambda g: np.expand_dims(np.atleast_1d(ans * g), 1) * solve(allow_singular)(cov, (x - mean).T).T),
lambda ans, x, mean, cov, allow_singular=False:
unbroadcast_f(cov, lambda g: np.reshape(ans * g, np.shape(g) + (1, 1)) * covgrad(x, mean, cov, allow_singular)))
defvjp(entropy, None,
<|code_end|>
. Use current file imports:
import scipy.stats
import autograd.numpy as np
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.extend import primitive, defvjp
and context (classes, functions, or code) from other files:
# Path: autograd/numpy/numpy_vjps.py
# def unbroadcast_f(target, f):
# target_meta = anp.metadata(target)
# return lambda g: unbroadcast(f(g), target_meta)
#
# Path: autograd/extend.py
. Output only the next line. | lambda ans, mean, cov: |
Using the snippet: <|code_start|>def concatenate_args(axis, *args):
return _np.concatenate(args, axis).view(ndarray)
concatenate = lambda arr_list, axis=0 : concatenate_args(axis, *arr_list)
vstack = row_stack = lambda tup: concatenate([atleast_2d(_m) for _m in tup], axis=0)
def hstack(tup):
arrs = [atleast_1d(_m) for _m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
def column_stack(tup):
arrays = []
for v in tup:
arr = array(v)
if arr.ndim < 2:
arr = array(arr, ndmin=2).T
arrays.append(arr)
return concatenate(arrays, 1)
def array(A, *args, **kwargs):
t = builtins.type(A)
if t in (list, tuple):
return array_from_args(args, kwargs, *map(array, A))
else:
return _array_from_scalar_or_array(args, kwargs, A)
def wrap_if_boxes_inside(raw_array, slow_op_name=None):
if raw_array.dtype is _np.dtype('O'):
if slow_op_name:
warnings.warn("{0} is slow for array inputs. "
<|code_end|>
, determine the next line of code. You have imports:
import types
import warnings
import numpy as _np
import autograd.builtins as builtins
from autograd.extend import primitive, notrace_primitive
from numpy.core.einsumfunc import _parse_einsum_input
and context (class names, function names, or code) available:
# Path: autograd/extend.py
. Output only the next line. | "np.concatenate() is faster.".format(slow_op_name)) |
Predict the next line for this snippet: <|code_start|>
def wrap_namespace(old, new):
unchanged_types = {float, int, type(None), type}
int_types = {_np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
for name, obj in old.items():
if obj in notrace_functions:
new[name] = notrace_primitive(obj)
elif callable(obj) and type(obj) is not type:
new[name] = primitive(obj)
elif type(obj) is type and obj in int_types:
new[name] = wrap_intdtype(obj)
elif type(obj) in unchanged_types:
new[name] = obj
wrap_namespace(_np.__dict__, globals())
# ----- Special treatment of list-input functions -----
@primitive
def concatenate_args(axis, *args):
return _np.concatenate(args, axis).view(ndarray)
concatenate = lambda arr_list, axis=0 : concatenate_args(axis, *arr_list)
vstack = row_stack = lambda tup: concatenate([atleast_2d(_m) for _m in tup], axis=0)
def hstack(tup):
arrs = [atleast_1d(_m) for _m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
def column_stack(tup):
<|code_end|>
with the help of current file imports:
import types
import warnings
import numpy as _np
import autograd.builtins as builtins
from autograd.extend import primitive, notrace_primitive
from numpy.core.einsumfunc import _parse_einsum_input
and context from other files:
# Path: autograd/extend.py
, which may contain function names, class names, or code. Output only the next line. | arrays = [] |
Given the code snippet: <|code_start|>
def test_check_vjp_1st_order_fail():
@primitive
def foo(x):
return x * 2.0
defvjp(foo, lambda ans, x : lambda g: g * 2.001)
with raises(AssertionError, match="\\(VJP\\) check of foo failed"):
check_grads(foo, modes=['rev'])(1.0)
def test_check_vjp_2nd_order_fail():
@primitive
def foo(x):
return x * 2.0
defvjp(foo, lambda ans, x : lambda g: bar(g) * 2)
@primitive
def bar(x):
return x
defvjp(bar, lambda ans, x : lambda g: g * 1.001)
<|code_end|>
, generate the next line using the imports in this file:
from autograd.tracer import primitive, getval
from autograd.extend import defvjp
from autograd.test_util import check_grads
from pytest import raises
and context (functions, classes, or occasionally code) from other files:
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# def initialize_root(self, *args, **kwargs):
# def new_root(cls, *args, **kwargs):
# def primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def register_notrace(trace_type, primitive_fun):
# def notrace_primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def find_top_boxed_args(args):
# def __init__(self):
# def new_trace(self):
# def __init__(self, value, trace, node):
# def __bool__(self):
# def __str__(self):
# def register(cls, value_type):
# def new_box(value, trace, node):
# class Node(object):
# class TraceStack(object):
# class Box(object):
#
# Path: autograd/extend.py
. Output only the next line. | with raises(AssertionError, match="\\(VJP\\) check of vjp_foo failed"): |
Here is a snippet: <|code_start|>
def test_check_vjp_1st_order_fail():
@primitive
def foo(x):
return x * 2.0
defvjp(foo, lambda ans, x : lambda g: g * 2.001)
with raises(AssertionError, match="\\(VJP\\) check of foo failed"):
check_grads(foo, modes=['rev'])(1.0)
def test_check_vjp_2nd_order_fail():
@primitive
def foo(x):
return x * 2.0
defvjp(foo, lambda ans, x : lambda g: bar(g) * 2)
@primitive
def bar(x):
return x
defvjp(bar, lambda ans, x : lambda g: g * 1.001)
with raises(AssertionError, match="\\(VJP\\) check of vjp_foo failed"):
<|code_end|>
. Write the next line using the current file imports:
from autograd.tracer import primitive, getval
from autograd.extend import defvjp
from autograd.test_util import check_grads
from pytest import raises
and context from other files:
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# def initialize_root(self, *args, **kwargs):
# def new_root(cls, *args, **kwargs):
# def primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def register_notrace(trace_type, primitive_fun):
# def notrace_primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def find_top_boxed_args(args):
# def __init__(self):
# def new_trace(self):
# def __init__(self, value, trace, node):
# def __bool__(self):
# def __str__(self):
# def register(cls, value_type):
# def new_box(value, trace, node):
# class Node(object):
# class TraceStack(object):
# class Box(object):
#
# Path: autograd/extend.py
, which may include functions, classes, or code. Output only the next line. | check_grads(foo, modes=['rev'])(1.0) |
Given snippet: <|code_start|>
class ConstGraphNode(Node):
__slots__ = ['parents', 'partial_fun']
def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
args = subvals(args, zip(parent_argnums, repeat(None)))
def partial_fun(partial_args):
return fun(*subvals(args, zip(parent_argnums, partial_args)), **kwargs)
self.parents = parents
self.partial_fun = partial_fun
def initialize_root(self):
self.parents = []
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from itertools import repeat
from autograd.wrap_util import wraps
from autograd.util import subvals, toposort
from autograd.tracer import trace, Node
from functools import partial
and context:
# Path: autograd/util.py
# def subvals(x, ivs):
# x_ = list(x)
# for i, v in ivs:
# x_[i] = v
# return tuple(x_)
#
# def toposort(end_node, parents=operator.attrgetter('parents')):
# child_counts = {}
# stack = [end_node]
# while stack:
# node = stack.pop()
# if node in child_counts:
# child_counts[node] += 1
# else:
# child_counts[node] = 1
# stack.extend(parents(node))
#
# childless_nodes = [end_node]
# while childless_nodes:
# node = childless_nodes.pop()
# yield node
# for parent in parents(node):
# if child_counts[parent] == 1:
# childless_nodes.append(parent)
# else:
# child_counts[parent] -= 1
#
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# with trace_stack.new_trace() as t:
# start_box = new_box(x, t, start_node)
# end_box = fun(start_box)
# if isbox(end_box) and end_box._trace == start_box._trace:
# return end_box._value, end_box._node
# else:
# warnings.warn("Output seems independent of input.")
# return end_box, None
#
# class Node(object):
# __slots__ = []
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# assert False
#
# def initialize_root(self, *args, **kwargs):
# assert False
#
# @classmethod
# def new_root(cls, *args, **kwargs):
# root = cls.__new__(cls)
# root.initialize_root(*args, **kwargs)
# return root
which might include code, classes, or functions. Output only the next line. | def const_graph_unary(fun): |
Here is a snippet: <|code_start|>
class ConstGraphNode(Node):
__slots__ = ['parents', 'partial_fun']
def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
args = subvals(args, zip(parent_argnums, repeat(None)))
def partial_fun(partial_args):
return fun(*subvals(args, zip(parent_argnums, partial_args)), **kwargs)
self.parents = parents
self.partial_fun = partial_fun
def initialize_root(self):
self.parents = []
def const_graph_unary(fun):
graph = []
<|code_end|>
. Write the next line using the current file imports:
from itertools import repeat
from autograd.wrap_util import wraps
from autograd.util import subvals, toposort
from autograd.tracer import trace, Node
from functools import partial
and context from other files:
# Path: autograd/util.py
# def subvals(x, ivs):
# x_ = list(x)
# for i, v in ivs:
# x_[i] = v
# return tuple(x_)
#
# def toposort(end_node, parents=operator.attrgetter('parents')):
# child_counts = {}
# stack = [end_node]
# while stack:
# node = stack.pop()
# if node in child_counts:
# child_counts[node] += 1
# else:
# child_counts[node] = 1
# stack.extend(parents(node))
#
# childless_nodes = [end_node]
# while childless_nodes:
# node = childless_nodes.pop()
# yield node
# for parent in parents(node):
# if child_counts[parent] == 1:
# childless_nodes.append(parent)
# else:
# child_counts[parent] -= 1
#
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# with trace_stack.new_trace() as t:
# start_box = new_box(x, t, start_node)
# end_box = fun(start_box)
# if isbox(end_box) and end_box._trace == start_box._trace:
# return end_box._value, end_box._node
# else:
# warnings.warn("Output seems independent of input.")
# return end_box, None
#
# class Node(object):
# __slots__ = []
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# assert False
#
# def initialize_root(self, *args, **kwargs):
# assert False
#
# @classmethod
# def new_root(cls, *args, **kwargs):
# root = cls.__new__(cls)
# root.initialize_root(*args, **kwargs)
# return root
, which may include functions, classes, or code. Output only the next line. | _fun = [fun] # Allow fun to be freed, since it may have bound args |
Given the code snippet: <|code_start|>
self.parents = parents
self.partial_fun = partial_fun
def initialize_root(self):
self.parents = []
def const_graph_unary(fun):
graph = []
_fun = [fun] # Allow fun to be freed, since it may have bound args
def maybe_cached_fun(x):
if graph:
_graph = graph[0]
vals = {_graph[0] : x}
for node in _graph[1:]:
vals[node] = node.partial_fun([vals[p] for p in node.parents])
return vals[node]
else:
start_node = ConstGraphNode.new_root()
end_value, end_node = trace(start_node, _fun.pop(), x)
if end_node is None:
raise Exception("Output is independent of input")
graph.append(list(toposort(end_node))[::-1])
return end_value
return maybe_cached_fun
def const_graph(fun, *args, **kwargs):
partial_fun = partial(fun, *args, **kwargs)
unary_fun = lambda args: partial_fun(*args)
maybe_cached_unary_fun = const_graph_unary(unary_fun)
<|code_end|>
, generate the next line using the imports in this file:
from itertools import repeat
from autograd.wrap_util import wraps
from autograd.util import subvals, toposort
from autograd.tracer import trace, Node
from functools import partial
and context (functions, classes, or occasionally code) from other files:
# Path: autograd/util.py
# def subvals(x, ivs):
# x_ = list(x)
# for i, v in ivs:
# x_[i] = v
# return tuple(x_)
#
# def toposort(end_node, parents=operator.attrgetter('parents')):
# child_counts = {}
# stack = [end_node]
# while stack:
# node = stack.pop()
# if node in child_counts:
# child_counts[node] += 1
# else:
# child_counts[node] = 1
# stack.extend(parents(node))
#
# childless_nodes = [end_node]
# while childless_nodes:
# node = childless_nodes.pop()
# yield node
# for parent in parents(node):
# if child_counts[parent] == 1:
# childless_nodes.append(parent)
# else:
# child_counts[parent] -= 1
#
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# with trace_stack.new_trace() as t:
# start_box = new_box(x, t, start_node)
# end_box = fun(start_box)
# if isbox(end_box) and end_box._trace == start_box._trace:
# return end_box._value, end_box._node
# else:
# warnings.warn("Output seems independent of input.")
# return end_box, None
#
# class Node(object):
# __slots__ = []
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# assert False
#
# def initialize_root(self, *args, **kwargs):
# assert False
#
# @classmethod
# def new_root(cls, *args, **kwargs):
# root = cls.__new__(cls)
# root.initialize_root(*args, **kwargs)
# return root
. Output only the next line. | @wraps(fun) |
Given the following code snippet before the placeholder: <|code_start|>
# -------------------- reverse mode --------------------
def make_vjp(fun, x):
start_node = VJPNode.new_root()
end_value, end_node = trace(start_node, fun, x)
if end_node is None:
def vjp(g): return vspace(x).zeros()
else:
def vjp(g): return backward_pass(g, end_node)
return vjp, end_value
<|code_end|>
, predict the next line using imports from the current file:
from itertools import count
from functools import reduce
from .tracer import trace, primitive, toposort, Node, Box, isbox, getval
from .util import func, subval
import warnings
and context including class names, function names, and sometimes code from other files:
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# def initialize_root(self, *args, **kwargs):
# def new_root(cls, *args, **kwargs):
# def primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def register_notrace(trace_type, primitive_fun):
# def notrace_primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def find_top_boxed_args(args):
# def __init__(self):
# def new_trace(self):
# def __init__(self, value, trace, node):
# def __bool__(self):
# def __str__(self):
# def register(cls, value_type):
# def new_box(value, trace, node):
# class Node(object):
# class TraceStack(object):
# class Box(object):
#
# Path: autograd/util.py
# def func(f): return f
#
# def subval(x, i, v):
# x_ = list(x)
# x_[i] = v
# return tuple(x_)
. Output only the next line. | def backward_pass(g, end_node): |
Based on the snippet: <|code_start|>
# -------------------- reverse mode --------------------
def make_vjp(fun, x):
start_node = VJPNode.new_root()
end_value, end_node = trace(start_node, fun, x)
if end_node is None:
def vjp(g): return vspace(x).zeros()
else:
def vjp(g): return backward_pass(g, end_node)
return vjp, end_value
<|code_end|>
, predict the immediate next line with the help of imports:
from itertools import count
from functools import reduce
from .tracer import trace, primitive, toposort, Node, Box, isbox, getval
from .util import func, subval
import warnings
and context (classes, functions, sometimes code) from other files:
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# def initialize_root(self, *args, **kwargs):
# def new_root(cls, *args, **kwargs):
# def primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def register_notrace(trace_type, primitive_fun):
# def notrace_primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def find_top_boxed_args(args):
# def __init__(self):
# def new_trace(self):
# def __init__(self, value, trace, node):
# def __bool__(self):
# def __str__(self):
# def register(cls, value_type):
# def new_box(value, trace, node):
# class Node(object):
# class TraceStack(object):
# class Box(object):
#
# Path: autograd/util.py
# def func(f): return f
#
# def subval(x, i, v):
# x_ = list(x)
# x_[i] = v
# return tuple(x_)
. Output only the next line. | def backward_pass(g, end_node): |
Given snippet: <|code_start|> vjp = vjpfun(ans, *args, **kwargs)
return lambda g: (vjp(g),)
elif L == 2:
argnum_0, argnum_1 = argnums
try:
vjp_0_fun = vjps_dict[argnum_0]
vjp_1_fun = vjps_dict[argnum_1]
except KeyError:
raise NotImplementedError(
"VJP of {} wrt argnums 0, 1 not defined".format(fun.__name__))
vjp_0 = vjp_0_fun(ans, *args, **kwargs)
vjp_1 = vjp_1_fun(ans, *args, **kwargs)
return lambda g: (vjp_0(g), vjp_1(g))
else:
vjps = [vjps_dict[argnum](ans, *args, **kwargs) for argnum in argnums]
return lambda g: (vjp(g) for vjp in vjps)
defvjp_argnums(fun, vjp_argnums)
def translate_vjp(vjpfun, fun, argnum):
if vjpfun is None:
return lambda ans, *args, **kwargs: lambda g: vspace(args[argnum]).zeros()
elif callable(vjpfun):
return vjpfun
else:
raise Exception("Bad VJP '{}' for '{}'".format(vjpfun, fun.__name__))
# -------------------- forward mode --------------------
def make_jvp(fun, x):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from itertools import count
from functools import reduce
from .tracer import trace, primitive, toposort, Node, Box, isbox, getval
from .util import func, subval
import warnings
and context:
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# def initialize_root(self, *args, **kwargs):
# def new_root(cls, *args, **kwargs):
# def primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def register_notrace(trace_type, primitive_fun):
# def notrace_primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def find_top_boxed_args(args):
# def __init__(self):
# def new_trace(self):
# def __init__(self, value, trace, node):
# def __bool__(self):
# def __str__(self):
# def register(cls, value_type):
# def new_box(value, trace, node):
# class Node(object):
# class TraceStack(object):
# class Box(object):
#
# Path: autograd/util.py
# def func(f): return f
#
# def subval(x, i, v):
# x_ = list(x)
# x_[i] = v
# return tuple(x_)
which might include code, classes, or functions. Output only the next line. | def jvp(g): |
Given the code snippet: <|code_start|>
def deprecated_defvjp(primitive_fun):
deprecation_msg = deprecated_defvjp_message.format('defvjp')
vjpfuns = {}
def defvjp_unstaged(vjpmaker, argnum=0):
warnings.warn(deprecation_msg)
def staged_vjpmaker(ans, *args, **kwargs):
def vjp(g):
vs, gvs = vspace(args[argnum]), vspace(g)
return vjpmaker(g, ans, vs, gvs, *args, **kwargs)
return vjp
vjpfuns[argnum] = staged_vjpmaker
argnums, vjpmakers = zip(*[(argnum, vjpfuns[argnum])
for argnum in sorted(vjpfuns.keys())])
defvjp(primitive_fun, *vjpmakers, argnums=argnums)
return defvjp_unstaged
def deprecated_defvjp_is_zero(primitive_fun):
deprecation_msg = deprecated_defvjp_message.format('defvjp_is_zero')
zero_vjps = [set()]
def defvjp_is_zero(argnums=(0,)):
warnings.warn(deprecation_msg)
zero_vjps[0] |= set(argnums)
nones = [None] * len(zero_vjps[0])
defvjp(primitive_fun, *nones, argnums=sorted(zero_vjps[0]))
return defvjp_is_zero
def deprecated_defgrad(primitive_fun):
deprecation_msg = deprecated_defvjp_message.format('defgrad')
gradfuns = {}
<|code_end|>
, generate the next line using the imports in this file:
from itertools import count
from functools import reduce
from .tracer import trace, primitive, toposort, Node, Box, isbox, getval
from .util import func, subval
import warnings
and context (functions, classes, or occasionally code) from other files:
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# def initialize_root(self, *args, **kwargs):
# def new_root(cls, *args, **kwargs):
# def primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def register_notrace(trace_type, primitive_fun):
# def notrace_primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def find_top_boxed_args(args):
# def __init__(self):
# def new_trace(self):
# def __init__(self, value, trace, node):
# def __bool__(self):
# def __str__(self):
# def register(cls, value_type):
# def new_box(value, trace, node):
# class Node(object):
# class TraceStack(object):
# class Box(object):
#
# Path: autograd/util.py
# def func(f): return f
#
# def subval(x, i, v):
# x_ = list(x)
# x_[i] = v
# return tuple(x_)
. Output only the next line. | def defgrad(gradfun, argnum=0): |
Predict the next line after this snippet: <|code_start|>defvjp(func(VSpace.mut_add), None, identity_vjp, identity_vjp)
defvjp(func(VSpace.inner_prod), None,
lambda ans, vs, x, y: lambda g: vs.covector(vs.scalar_mul(y, g)),
lambda ans, vs, x, y: lambda g: vs.covector(vs.scalar_mul(x, g)))
defvjp(func(VSpace.covector), None,
lambda ans, vs, x: lambda g: vs.covector(g))
defvjp(func(VSpace.scalar_mul), None,
lambda ans, vs, x, a: lambda g: vs.covector(vs.scalar_mul(vs.covector(g), a)),
lambda ans, vs, x, a: lambda g: vs.inner_prod(g, vs.covector(x)))
# -------------------- core forward mode grads --------------------
identity_jvp = lambda g, *args, **kwargs: g
defjvp(sparse_add, None, identity_jvp, identity_jvp)
defjvp(func(VSpace.mut_add), None, identity_jvp, identity_jvp)
defjvp(func(VSpace.add), None, identity_jvp, identity_jvp)
defjvp(func(VSpace.scalar_mul), None, 'same', 'same')
defjvp(func(VSpace.inner_prod), None, 'same', 'same')
defjvp(func(VSpace.covector), None, 'same')
# -------------------- deprecation warnings -----------------------
deprecated_defvjp_message = '''
The {} method is deprecated. See the update guide and tutorial:
https://github.com/HIPS/autograd/blob/master/docs/updateguide.md
https://github.com/HIPS/autograd/blob/master/docs/tutorial.md'''
def deprecated_defvjp(primitive_fun):
deprecation_msg = deprecated_defvjp_message.format('defvjp')
vjpfuns = {}
<|code_end|>
using the current file's imports:
from itertools import count
from functools import reduce
from .tracer import trace, primitive, toposort, Node, Box, isbox, getval
from .util import func, subval
import warnings
and any relevant context from other files:
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# def initialize_root(self, *args, **kwargs):
# def new_root(cls, *args, **kwargs):
# def primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def register_notrace(trace_type, primitive_fun):
# def notrace_primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def find_top_boxed_args(args):
# def __init__(self):
# def new_trace(self):
# def __init__(self, value, trace, node):
# def __bool__(self):
# def __str__(self):
# def register(cls, value_type):
# def new_box(value, trace, node):
# class Node(object):
# class TraceStack(object):
# class Box(object):
#
# Path: autograd/util.py
# def func(f): return f
#
# def subval(x, i, v):
# x_ = list(x)
# x_[i] = v
# return tuple(x_)
. Output only the next line. | def defvjp_unstaged(vjpmaker, argnum=0): |
Continue the code snippet: <|code_start|> return vjp
vjpfuns[argnum] = staged_vjpmaker
argnums, vjpmakers = zip(*[(argnum, vjpfuns[argnum])
for argnum in sorted(vjpfuns.keys())])
defvjp(primitive_fun, *vjpmakers, argnums=argnums)
return defvjp_unstaged
def deprecated_defvjp_is_zero(primitive_fun):
deprecation_msg = deprecated_defvjp_message.format('defvjp_is_zero')
zero_vjps = [set()]
def defvjp_is_zero(argnums=(0,)):
warnings.warn(deprecation_msg)
zero_vjps[0] |= set(argnums)
nones = [None] * len(zero_vjps[0])
defvjp(primitive_fun, *nones, argnums=sorted(zero_vjps[0]))
return defvjp_is_zero
def deprecated_defgrad(primitive_fun):
deprecation_msg = deprecated_defvjp_message.format('defgrad')
gradfuns = {}
def defgrad(gradfun, argnum=0):
warnings.warn(deprecation_msg)
gradfuns[argnum] = gradfun
argnums, vjpmakers = zip(*[(argnum, gradfuns[argnum])
for argnum in sorted(gradfuns.keys())])
defvjp(primitive_fun, *vjpmakers, argnums=argnums)
return defgrad
primitive_ = primitive
<|code_end|>
. Use current file imports:
from itertools import count
from functools import reduce
from .tracer import trace, primitive, toposort, Node, Box, isbox, getval
from .util import func, subval
import warnings
and context (classes, functions, or code) from other files:
# Path: autograd/tracer.py
# def trace(start_node, fun, x):
# def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
# def initialize_root(self, *args, **kwargs):
# def new_root(cls, *args, **kwargs):
# def primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def register_notrace(trace_type, primitive_fun):
# def notrace_primitive(f_raw):
# def f_wrapped(*args, **kwargs):
# def find_top_boxed_args(args):
# def __init__(self):
# def new_trace(self):
# def __init__(self, value, trace, node):
# def __bool__(self):
# def __str__(self):
# def register(cls, value_type):
# def new_box(value, trace, node):
# class Node(object):
# class TraceStack(object):
# class Box(object):
#
# Path: autograd/util.py
# def func(f): return f
#
# def subval(x, i, v):
# x_ = list(x)
# x_[i] = v
# return tuple(x_)
. Output only the next line. | def primitive_with_deprecation_warnings(f_raw): |
Using the snippet: <|code_start|>
@login_required
def chooseorg(request, template='threebot/chooseorg.html'):
orgs = Organization.objects.filter(users=request.user)
return render(request, template, {'orgs': orgs})
def user_login(request, template='threebot/login.html'):
next = request.GET.get("next", getattr(settings, "LOGIN_REDIRECT_URL", "/"))
auth_form = AuthenticationForm(None, request.POST or None)
if request.user.is_authenticated():
# no need to login again, just redirect
return redirect(next)
if auth_form.is_valid():
# The form itself handles authentication and checking to make sure passowrd and such are supplied.
next = request.POST.get("next", getattr(settings, "LOGIN_REDIRECT_URL", "/"))
login(request, auth_form.get_user())
return HttpResponseRedirect(next)
return render(request, template, {'auth_form': auth_form})
def user_logout(request):
logout(request)
return redirect("/")
<|code_end|>
, determine the next line of code. You have imports:
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout
from django.conf import settings
from organizations.models import Organization
from threebot.utils import get_my_orgs, filter_workflow_log_history
and context (class names, function names, or code) available:
# Path: threebot/utils.py
# @login_required
# def get_my_orgs(request, an_user=None):
# if request is not None and an_user is None:
# an_user = request.user
#
# default_org, created = Organization.objects.get_or_create(slug='3bot', name="3bot")
#
# if not OrganizationUser.objects.filter(organization=default_org, user=an_user).exists():
# # we create an OrganizationUser, so each user is member of our default_org
# # if default_org was just created, we also mark this user as admin and create an OrganizationOwner
# org_user = OrganizationUser(organization=default_org, user=an_user, is_admin=created)
# org_user.save()
# if created:
# org_owner = OrganizationOwner(organization=default_org, organization_user=org_user)
# org_owner.save()
#
# orgs = Organization.objects.filter(users=an_user)
#
# return orgs
#
# def filter_workflow_log_history(workflow=None, teams=None, exit_code=None, user=None, worker=None, quantity=None):
# """returns a queryset of workflow-logs filtered by given parameters"""
# _filter = {}
# if workflow:
# _filter['workflow'] = workflow
# if teams:
# _filter['workflow__owner__in'] = teams
# if user:
# _filter['performed_by'] = user
# if worker:
# _filter['performed_on'] = worker
#
# logs = WorkflowLog.objects.filter(**_filter).select_related('workflow', 'performed_by', 'performed_on')[:quantity]
# return logs
. Output only the next line. | @login_required |
Continue the code snippet: <|code_start|>
@login_required
def chooseorg(request, template='threebot/chooseorg.html'):
orgs = Organization.objects.filter(users=request.user)
return render(request, template, {'orgs': orgs})
def user_login(request, template='threebot/login.html'):
next = request.GET.get("next", getattr(settings, "LOGIN_REDIRECT_URL", "/"))
auth_form = AuthenticationForm(None, request.POST or None)
if request.user.is_authenticated():
# no need to login again, just redirect
return redirect(next)
if auth_form.is_valid():
# The form itself handles authentication and checking to make sure passowrd and such are supplied.
next = request.POST.get("next", getattr(settings, "LOGIN_REDIRECT_URL", "/"))
login(request, auth_form.get_user())
return HttpResponseRedirect(next)
return render(request, template, {'auth_form': auth_form})
def user_logout(request):
logout(request)
return redirect("/")
<|code_end|>
. Use current file imports:
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout
from django.conf import settings
from organizations.models import Organization
from threebot.utils import get_my_orgs, filter_workflow_log_history
and context (classes, functions, or code) from other files:
# Path: threebot/utils.py
# @login_required
# def get_my_orgs(request, an_user=None):
# if request is not None and an_user is None:
# an_user = request.user
#
# default_org, created = Organization.objects.get_or_create(slug='3bot', name="3bot")
#
# if not OrganizationUser.objects.filter(organization=default_org, user=an_user).exists():
# # we create an OrganizationUser, so each user is member of our default_org
# # if default_org was just created, we also mark this user as admin and create an OrganizationOwner
# org_user = OrganizationUser(organization=default_org, user=an_user, is_admin=created)
# org_user.save()
# if created:
# org_owner = OrganizationOwner(organization=default_org, organization_user=org_user)
# org_owner.save()
#
# orgs = Organization.objects.filter(users=an_user)
#
# return orgs
#
# def filter_workflow_log_history(workflow=None, teams=None, exit_code=None, user=None, worker=None, quantity=None):
# """returns a queryset of workflow-logs filtered by given parameters"""
# _filter = {}
# if workflow:
# _filter['workflow'] = workflow
# if teams:
# _filter['workflow__owner__in'] = teams
# if user:
# _filter['performed_by'] = user
# if worker:
# _filter['performed_on'] = worker
#
# logs = WorkflowLog.objects.filter(**_filter).select_related('workflow', 'performed_by', 'performed_on')[:quantity]
# return logs
. Output only the next line. | @login_required |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
logger = logging.getLogger('3bot')
FLAGS = 0
@background(schedule=1)
<|code_end|>
, generate the next line using the imports in this file:
import logging
import zmq
import threebot_crypto
from copy import deepcopy
from django.template.defaultfilters import slugify
from django.utils import timezone
from background_task import background
from threebot.utils import render_template, order_workflow_tasks, importCode
from threebot.botconnection import BotConnection
from threebot.models import WorkflowLog
from threebot.utils import send_failiure_notification
and context (functions, classes, or occasionally code) from other files:
# Path: threebot/utils.py
# def render_template(workflow_log, workflow_task, mask=False):
# # mask = True -> replace sensitive data like passwords
# inputs = workflow_log.inputs[str(workflow_task.id)]
#
# if mask and 'password' in inputs:
# # replace sensitive data with '***'
# for key, value in inputs['password'].iteritems():
# inputs['password'][key] = '***'
#
# # Update reserved identifiers /keywords
# inputs['payload'] = workflow_log.inputs.get('payload', {})
# inputs['log'] = {}
# inputs['log']['url'] = workflow_log.get_absolute_url()
#
# # Script/tempate rendering
# wf_context = Context(inputs)
# unrendered = workflow_task.task.template
# template = Template(unrendered)
# rendered = template.render(wf_context)
# return rendered
#
# def order_workflow_tasks(workflow):
# try:
# curr = workflow.workflowtask_set.get(prev_workflow_task=None)
# workflow_tasks = [curr]
# while curr.next_workflow_task:
# curr = curr.next_workflow_task
# workflow_tasks.append(curr)
# except WorkflowTask.DoesNotExist:
# workflow_tasks = []
#
# return workflow_tasks
#
# def importCode(code, name, add_to_sys_modules=0):
# """
# Import dynamically generated code as a module. code is the
# object containing the code (a string, a file handle or an
# actual compiled code object, same types as accepted by an
# exec statement). The name is the name to give to the module,
# and the final argument says wheter to add it to sys.modules
# or not. If it is added, a subsequent import statement using
# name will return this module. If it is not added to sys.modules
# import will try to load it in the normal fashion.
#
# import foo
#
# is equivalent to
#
# foofile = open("/path/to/foo.py")
# foo = importCode(foofile,"foo",1)
#
# Returns a newly generated module.
# """
# import sys
# import imp
#
# module = imp.new_module(name)
#
# exec(code)
# if add_to_sys_modules:
# sys.modules[name] = module
#
# return module
#
# Path: threebot/botconnection.py
# class BotConnection:
# """
# A generic BotConnection class.
# Usage: Create a new BotConnection before sending messages make sure you
# run the connect() method.
# """
# def __init__(self, endpoint, secret_key):
# self.endpoint = endpoint
# self.secret_key = str(secret_key)
# self.context = zmq.Context(1)
#
# def connect(self):
# self.client = self.context.socket(zmq.REQ)
# self.client.connect(self.endpoint)
#
# self.poll = zmq.Poller()
# self.poll.register(self.client, zmq.POLLIN)
#
# def disconnect(self):
# self.client.setsockopt(zmq.LINGER, 0)
# self.client.close()
# self.poll.unregister(self.client)
#
# def reconnect(self):
# self.disconnect()
# self.connect()
#
# def close(self):
# self.disconnect()
# self.context.term()
#
# Path: threebot/models.py
# class WorkflowLog(models.Model):
# SUCCESS = 0
# ERROR = 1
# PENDING = 2
#
# EXIT_CODE_CHOICES = (
# (SUCCESS, 'Success'),
# (ERROR, 'Error'),
# (PENDING, 'Pending'),
# )
#
# workflow = models.ForeignKey(Workflow, verbose_name=_("Workflow"))
# date_created = models.DateTimeField(auto_now_add=True, help_text='Date the workflow was performed')
# date_started = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# date_finished = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# exit_code = models.PositiveIntegerField(choices=EXIT_CODE_CHOICES, default=PENDING)
# performed_by = models.ForeignKey(settings.AUTH_USER_MODEL, help_text="The User who performed the Worfkflow")
# performed_on = models.ForeignKey(Worker, help_text="The Worker Worfkflow was performed on")
#
# inputs = JSONField()
# outputs = JSONField(null=True, blank=True)
#
# class Meta():
# ordering = ['-date_created', ]
# verbose_name = _("Workflow Log")
# verbose_name_plural = _("Workflow Logs")
#
# @models.permalink
# def get_absolute_url(self):
# return ('core_workflow_log_detail', (), {
# 'slug': self.workflow.slug,
# 'id': self.id})
#
# def permalink(self):
# return reverse('core_workflow_log_permalink', kwargs={'id': self.id})
#
# def __str__(self):
# return "%s - %s logged %s" % (self.date_created.strftime('%d.%m.%y %H:%M'), str(self.performed_by), self.workflow.title, )
#
# Path: threebot/utils.py
# def send_failiure_notification(workflow_log):
# """Notify user in case of failure."""
# subject = "[3BOT] Workflow '%s' has failed" % (workflow_log.workflow.title)
# message = "Your workflow %s%s has failed.\n -- 3bot" % (Site.objects.get_current(), workflow_log.get_absolute_url())
# workflow_log.performed_by.email_user(subject, message)
. Output only the next line. | def run_workflow(workflow_log_id): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
logger = logging.getLogger('3bot')
FLAGS = 0
@background(schedule=1)
<|code_end|>
, determine the next line of code. You have imports:
import logging
import zmq
import threebot_crypto
from copy import deepcopy
from django.template.defaultfilters import slugify
from django.utils import timezone
from background_task import background
from threebot.utils import render_template, order_workflow_tasks, importCode
from threebot.botconnection import BotConnection
from threebot.models import WorkflowLog
from threebot.utils import send_failiure_notification
and context (class names, function names, or code) available:
# Path: threebot/utils.py
# def render_template(workflow_log, workflow_task, mask=False):
# # mask = True -> replace sensitive data like passwords
# inputs = workflow_log.inputs[str(workflow_task.id)]
#
# if mask and 'password' in inputs:
# # replace sensitive data with '***'
# for key, value in inputs['password'].iteritems():
# inputs['password'][key] = '***'
#
# # Update reserved identifiers /keywords
# inputs['payload'] = workflow_log.inputs.get('payload', {})
# inputs['log'] = {}
# inputs['log']['url'] = workflow_log.get_absolute_url()
#
# # Script/tempate rendering
# wf_context = Context(inputs)
# unrendered = workflow_task.task.template
# template = Template(unrendered)
# rendered = template.render(wf_context)
# return rendered
#
# def order_workflow_tasks(workflow):
# try:
# curr = workflow.workflowtask_set.get(prev_workflow_task=None)
# workflow_tasks = [curr]
# while curr.next_workflow_task:
# curr = curr.next_workflow_task
# workflow_tasks.append(curr)
# except WorkflowTask.DoesNotExist:
# workflow_tasks = []
#
# return workflow_tasks
#
# def importCode(code, name, add_to_sys_modules=0):
# """
# Import dynamically generated code as a module. code is the
# object containing the code (a string, a file handle or an
# actual compiled code object, same types as accepted by an
# exec statement). The name is the name to give to the module,
# and the final argument says wheter to add it to sys.modules
# or not. If it is added, a subsequent import statement using
# name will return this module. If it is not added to sys.modules
# import will try to load it in the normal fashion.
#
# import foo
#
# is equivalent to
#
# foofile = open("/path/to/foo.py")
# foo = importCode(foofile,"foo",1)
#
# Returns a newly generated module.
# """
# import sys
# import imp
#
# module = imp.new_module(name)
#
# exec(code)
# if add_to_sys_modules:
# sys.modules[name] = module
#
# return module
#
# Path: threebot/botconnection.py
# class BotConnection:
# """
# A generic BotConnection class.
# Usage: Create a new BotConnection before sending messages make sure you
# run the connect() method.
# """
# def __init__(self, endpoint, secret_key):
# self.endpoint = endpoint
# self.secret_key = str(secret_key)
# self.context = zmq.Context(1)
#
# def connect(self):
# self.client = self.context.socket(zmq.REQ)
# self.client.connect(self.endpoint)
#
# self.poll = zmq.Poller()
# self.poll.register(self.client, zmq.POLLIN)
#
# def disconnect(self):
# self.client.setsockopt(zmq.LINGER, 0)
# self.client.close()
# self.poll.unregister(self.client)
#
# def reconnect(self):
# self.disconnect()
# self.connect()
#
# def close(self):
# self.disconnect()
# self.context.term()
#
# Path: threebot/models.py
# class WorkflowLog(models.Model):
# SUCCESS = 0
# ERROR = 1
# PENDING = 2
#
# EXIT_CODE_CHOICES = (
# (SUCCESS, 'Success'),
# (ERROR, 'Error'),
# (PENDING, 'Pending'),
# )
#
# workflow = models.ForeignKey(Workflow, verbose_name=_("Workflow"))
# date_created = models.DateTimeField(auto_now_add=True, help_text='Date the workflow was performed')
# date_started = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# date_finished = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# exit_code = models.PositiveIntegerField(choices=EXIT_CODE_CHOICES, default=PENDING)
# performed_by = models.ForeignKey(settings.AUTH_USER_MODEL, help_text="The User who performed the Worfkflow")
# performed_on = models.ForeignKey(Worker, help_text="The Worker Worfkflow was performed on")
#
# inputs = JSONField()
# outputs = JSONField(null=True, blank=True)
#
# class Meta():
# ordering = ['-date_created', ]
# verbose_name = _("Workflow Log")
# verbose_name_plural = _("Workflow Logs")
#
# @models.permalink
# def get_absolute_url(self):
# return ('core_workflow_log_detail', (), {
# 'slug': self.workflow.slug,
# 'id': self.id})
#
# def permalink(self):
# return reverse('core_workflow_log_permalink', kwargs={'id': self.id})
#
# def __str__(self):
# return "%s - %s logged %s" % (self.date_created.strftime('%d.%m.%y %H:%M'), str(self.performed_by), self.workflow.title, )
#
# Path: threebot/utils.py
# def send_failiure_notification(workflow_log):
# """Notify user in case of failure."""
# subject = "[3BOT] Workflow '%s' has failed" % (workflow_log.workflow.title)
# message = "Your workflow %s%s has failed.\n -- 3bot" % (Site.objects.get_current(), workflow_log.get_absolute_url())
# workflow_log.performed_by.email_user(subject, message)
. Output only the next line. | def run_workflow(workflow_log_id): |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
logger = logging.getLogger('3bot')
FLAGS = 0
@background(schedule=1)
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import zmq
import threebot_crypto
from copy import deepcopy
from django.template.defaultfilters import slugify
from django.utils import timezone
from background_task import background
from threebot.utils import render_template, order_workflow_tasks, importCode
from threebot.botconnection import BotConnection
from threebot.models import WorkflowLog
from threebot.utils import send_failiure_notification
and context (classes, functions, sometimes code) from other files:
# Path: threebot/utils.py
# def render_template(workflow_log, workflow_task, mask=False):
# # mask = True -> replace sensitive data like passwords
# inputs = workflow_log.inputs[str(workflow_task.id)]
#
# if mask and 'password' in inputs:
# # replace sensitive data with '***'
# for key, value in inputs['password'].iteritems():
# inputs['password'][key] = '***'
#
# # Update reserved identifiers /keywords
# inputs['payload'] = workflow_log.inputs.get('payload', {})
# inputs['log'] = {}
# inputs['log']['url'] = workflow_log.get_absolute_url()
#
# # Script/tempate rendering
# wf_context = Context(inputs)
# unrendered = workflow_task.task.template
# template = Template(unrendered)
# rendered = template.render(wf_context)
# return rendered
#
# def order_workflow_tasks(workflow):
# try:
# curr = workflow.workflowtask_set.get(prev_workflow_task=None)
# workflow_tasks = [curr]
# while curr.next_workflow_task:
# curr = curr.next_workflow_task
# workflow_tasks.append(curr)
# except WorkflowTask.DoesNotExist:
# workflow_tasks = []
#
# return workflow_tasks
#
# def importCode(code, name, add_to_sys_modules=0):
# """
# Import dynamically generated code as a module. code is the
# object containing the code (a string, a file handle or an
# actual compiled code object, same types as accepted by an
# exec statement). The name is the name to give to the module,
# and the final argument says wheter to add it to sys.modules
# or not. If it is added, a subsequent import statement using
# name will return this module. If it is not added to sys.modules
# import will try to load it in the normal fashion.
#
# import foo
#
# is equivalent to
#
# foofile = open("/path/to/foo.py")
# foo = importCode(foofile,"foo",1)
#
# Returns a newly generated module.
# """
# import sys
# import imp
#
# module = imp.new_module(name)
#
# exec(code)
# if add_to_sys_modules:
# sys.modules[name] = module
#
# return module
#
# Path: threebot/botconnection.py
# class BotConnection:
# """
# A generic BotConnection class.
# Usage: Create a new BotConnection before sending messages make sure you
# run the connect() method.
# """
# def __init__(self, endpoint, secret_key):
# self.endpoint = endpoint
# self.secret_key = str(secret_key)
# self.context = zmq.Context(1)
#
# def connect(self):
# self.client = self.context.socket(zmq.REQ)
# self.client.connect(self.endpoint)
#
# self.poll = zmq.Poller()
# self.poll.register(self.client, zmq.POLLIN)
#
# def disconnect(self):
# self.client.setsockopt(zmq.LINGER, 0)
# self.client.close()
# self.poll.unregister(self.client)
#
# def reconnect(self):
# self.disconnect()
# self.connect()
#
# def close(self):
# self.disconnect()
# self.context.term()
#
# Path: threebot/models.py
# class WorkflowLog(models.Model):
# SUCCESS = 0
# ERROR = 1
# PENDING = 2
#
# EXIT_CODE_CHOICES = (
# (SUCCESS, 'Success'),
# (ERROR, 'Error'),
# (PENDING, 'Pending'),
# )
#
# workflow = models.ForeignKey(Workflow, verbose_name=_("Workflow"))
# date_created = models.DateTimeField(auto_now_add=True, help_text='Date the workflow was performed')
# date_started = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# date_finished = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# exit_code = models.PositiveIntegerField(choices=EXIT_CODE_CHOICES, default=PENDING)
# performed_by = models.ForeignKey(settings.AUTH_USER_MODEL, help_text="The User who performed the Worfkflow")
# performed_on = models.ForeignKey(Worker, help_text="The Worker Worfkflow was performed on")
#
# inputs = JSONField()
# outputs = JSONField(null=True, blank=True)
#
# class Meta():
# ordering = ['-date_created', ]
# verbose_name = _("Workflow Log")
# verbose_name_plural = _("Workflow Logs")
#
# @models.permalink
# def get_absolute_url(self):
# return ('core_workflow_log_detail', (), {
# 'slug': self.workflow.slug,
# 'id': self.id})
#
# def permalink(self):
# return reverse('core_workflow_log_permalink', kwargs={'id': self.id})
#
# def __str__(self):
# return "%s - %s logged %s" % (self.date_created.strftime('%d.%m.%y %H:%M'), str(self.performed_by), self.workflow.title, )
#
# Path: threebot/utils.py
# def send_failiure_notification(workflow_log):
# """Notify user in case of failure."""
# subject = "[3BOT] Workflow '%s' has failed" % (workflow_log.workflow.title)
# message = "Your workflow %s%s has failed.\n -- 3bot" % (Site.objects.get_current(), workflow_log.get_absolute_url())
# workflow_log.performed_by.email_user(subject, message)
. Output only the next line. | def run_workflow(workflow_log_id): |
Predict the next line after this snippet: <|code_start|>
@login_required
def detail(request, id):
orgs = get_my_orgs(request)
log = get_object_or_404(WorkflowLog, id=id)
return JsonResponse({
<|code_end|>
using the current file's imports:
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from threebot.models import WorkflowLog
from threebot.utils import get_my_orgs
and any relevant context from other files:
# Path: threebot/models.py
# class WorkflowLog(models.Model):
# SUCCESS = 0
# ERROR = 1
# PENDING = 2
#
# EXIT_CODE_CHOICES = (
# (SUCCESS, 'Success'),
# (ERROR, 'Error'),
# (PENDING, 'Pending'),
# )
#
# workflow = models.ForeignKey(Workflow, verbose_name=_("Workflow"))
# date_created = models.DateTimeField(auto_now_add=True, help_text='Date the workflow was performed')
# date_started = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# date_finished = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# exit_code = models.PositiveIntegerField(choices=EXIT_CODE_CHOICES, default=PENDING)
# performed_by = models.ForeignKey(settings.AUTH_USER_MODEL, help_text="The User who performed the Worfkflow")
# performed_on = models.ForeignKey(Worker, help_text="The Worker Worfkflow was performed on")
#
# inputs = JSONField()
# outputs = JSONField(null=True, blank=True)
#
# class Meta():
# ordering = ['-date_created', ]
# verbose_name = _("Workflow Log")
# verbose_name_plural = _("Workflow Logs")
#
# @models.permalink
# def get_absolute_url(self):
# return ('core_workflow_log_detail', (), {
# 'slug': self.workflow.slug,
# 'id': self.id})
#
# def permalink(self):
# return reverse('core_workflow_log_permalink', kwargs={'id': self.id})
#
# def __str__(self):
# return "%s - %s logged %s" % (self.date_created.strftime('%d.%m.%y %H:%M'), str(self.performed_by), self.workflow.title, )
#
# Path: threebot/utils.py
# @login_required
# def get_my_orgs(request, an_user=None):
# if request is not None and an_user is None:
# an_user = request.user
#
# default_org, created = Organization.objects.get_or_create(slug='3bot', name="3bot")
#
# if not OrganizationUser.objects.filter(organization=default_org, user=an_user).exists():
# # we create an OrganizationUser, so each user is member of our default_org
# # if default_org was just created, we also mark this user as admin and create an OrganizationOwner
# org_user = OrganizationUser(organization=default_org, user=an_user, is_admin=created)
# org_user.save()
# if created:
# org_owner = OrganizationOwner(organization=default_org, organization_user=org_user)
# org_owner.save()
#
# orgs = Organization.objects.filter(users=an_user)
#
# return orgs
. Output only the next line. | 'id': log.id, |
Continue the code snippet: <|code_start|>
@login_required
def detail(request, id):
orgs = get_my_orgs(request)
log = get_object_or_404(WorkflowLog, id=id)
return JsonResponse({
<|code_end|>
. Use current file imports:
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from threebot.models import WorkflowLog
from threebot.utils import get_my_orgs
and context (classes, functions, or code) from other files:
# Path: threebot/models.py
# class WorkflowLog(models.Model):
# SUCCESS = 0
# ERROR = 1
# PENDING = 2
#
# EXIT_CODE_CHOICES = (
# (SUCCESS, 'Success'),
# (ERROR, 'Error'),
# (PENDING, 'Pending'),
# )
#
# workflow = models.ForeignKey(Workflow, verbose_name=_("Workflow"))
# date_created = models.DateTimeField(auto_now_add=True, help_text='Date the workflow was performed')
# date_started = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# date_finished = models.DateTimeField(help_text='Date the workflow was performed', blank=True, null=True)
# exit_code = models.PositiveIntegerField(choices=EXIT_CODE_CHOICES, default=PENDING)
# performed_by = models.ForeignKey(settings.AUTH_USER_MODEL, help_text="The User who performed the Worfkflow")
# performed_on = models.ForeignKey(Worker, help_text="The Worker Worfkflow was performed on")
#
# inputs = JSONField()
# outputs = JSONField(null=True, blank=True)
#
# class Meta():
# ordering = ['-date_created', ]
# verbose_name = _("Workflow Log")
# verbose_name_plural = _("Workflow Logs")
#
# @models.permalink
# def get_absolute_url(self):
# return ('core_workflow_log_detail', (), {
# 'slug': self.workflow.slug,
# 'id': self.id})
#
# def permalink(self):
# return reverse('core_workflow_log_permalink', kwargs={'id': self.id})
#
# def __str__(self):
# return "%s - %s logged %s" % (self.date_created.strftime('%d.%m.%y %H:%M'), str(self.performed_by), self.workflow.title, )
#
# Path: threebot/utils.py
# @login_required
# def get_my_orgs(request, an_user=None):
# if request is not None and an_user is None:
# an_user = request.user
#
# default_org, created = Organization.objects.get_or_create(slug='3bot', name="3bot")
#
# if not OrganizationUser.objects.filter(organization=default_org, user=an_user).exists():
# # we create an OrganizationUser, so each user is member of our default_org
# # if default_org was just created, we also mark this user as admin and create an OrganizationOwner
# org_user = OrganizationUser(organization=default_org, user=an_user, is_admin=created)
# org_user.save()
# if created:
# org_owner = OrganizationOwner(organization=default_org, organization_user=org_user)
# org_owner.save()
#
# orgs = Organization.objects.filter(users=an_user)
#
# return orgs
. Output only the next line. | 'id': log.id, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.