id int64 1 120 | name stringlengths 3 28 | full_name stringlengths 6 32 | before stringlengths 64 6.66k | after stringlengths 72 6.88k | tests stringlengths 80 9.12k | instruction_descriptive stringlengths 84 1.01k | instruction_lazy stringlengths 30 640 | taxonomy dict |
|---|---|---|---|---|---|---|---|---|
116 | cfg | 116_cfg | from typing import Literal, List
Token = Literal["expr", ";", "if", "(", ")", "other"]
NonTerminal = Literal["stmt"]
class ParseTree:
def __init__(self, children, nonterminal: NonTerminal):
self.children = children
self.nonterminal = nonterminal
def __eq__(self, obj) -> bool:
if not ... | from typing import Literal, List
Token = Literal["expr", ";", "if", "(", ")", "other", "for"]
NonTerminal = Literal["optexpr", "stmt"]
class ParseTree:
def __init__(self, children, nonterminal: NonTerminal):
self.children = children
self.nonterminal = nonterminal
def __eq__(self, obj) -> boo... | ### START TESTS ###
if True: # pragma: no cover
parse_tree1 = ParseTree(["expr", ";"], "stmt")
parse_tree2 = ParseTree(["expr", ";"], "notsame")
assert parse_tree1 != parse_tree2
parse_tree3 = ParseTree(["expr", ";", "b"], "stmt")
assert parse_tree1 != parse_tree3
parse_tree4 = ParseTree(["expr... | `Parser.parse(inputs: List[Tokens])` currently parses the following grammar:
stmt := expr ;
| if ( expr ) stmt
| other
adapt it so that it parse the following grammar
stmt := expr ;
| if ( expr ) stmt
| for ( optexpr ; optexpr ; optexpr ) stmt
| other
optexpr := expr
| e
Here, ... | `Parser.parse(inputs: List[Tokens])` currently parses the following grammar:
stmt := expr ;
| if ( expr ) stmt
| other
adapt it so that it parse the following grammar
stmt := expr ;
| if ( expr ) stmt
| for ( optexpr ; optexpr ; optexpr ) stmt
| other
optexpr := expr
| e
Here, ... | {
"change_kind": "adaptive",
"libraries": [],
"topic": "Math"
} |
117 | matrix | 117_matrix | from typing import List
class Matrix:
def __init__(self, content: List[List[int]]) -> None:
num_cols = None
for row in content:
if num_cols is None:
num_cols = len(row)
else:
if len(row) != num_cols:
raise ValueError
... | from typing import List
class Matrix:
def __init__(self, content: List[List[int]]) -> None:
num_cols = None
for row in content:
if num_cols is None:
num_cols = len(row)
else:
if len(row) != num_cols:
raise ValueError
... | ### START TESTS ###
if True: # pragma: no cover
m = Matrix([[0, 1]])
m.transpose()
assert m.content == [[0], [1]]
m = Matrix([[0, 1], [0, 1]])
m.transpose()
assert m.content == [[0, 0], [1, 1]]
m = Matrix([[0, 2], [0, 1]])
m.transpose()
assert m.content == [[0, 0], [2, 1]]
tr... | the `determinant` method on the `Matrix` class should return the determinant of all 2x2 or 3x3 matrices with determinants which exist. It should throw an AssertionError for matrices that do not have determinants and a NotImplementedError for matrices which are not 2x2 or 3x3. | the `determinant` method on the Matrix class should return the determinant of the given matrix but it currently does not. | {
"change_kind": "corrective",
"libraries": [],
"topic": "Math"
} |
118 | principal_component_analysis | 118_principal_component_analysis | from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import pandas as pd
class PCAFeatureReducer:
"""Reduces the dimensionality of a dataset using their principal components."""
def __init__(self, data: pd.DataFrame, n_components: int = 2):
self.data = data
... | from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
class PCAFeatureReducer:
"""Reduces the dimensionality of a dataset using their principal components."""
def __init__(self, data: pd.DataFrame, n_components: int = 2):
self.d... | ### START TESTS ###
data = pd.DataFrame({
'feature1': np.random.rand(100),
'feature2': np.full(100, 1.0),
'feature3': np.random.rand(100) * 0.01 + 1,
'feature4': np.random.rand(100),
'feature5': np.random.rand(100)
})
n_components = 2
reducer = PCAFeatureReducer(data, n_components=n_components)
pri... | Fix PCAFeatureReducer algorithm that currently does not account for filtering zero or near-zero variance features in
the covariance matrix before performing Singular Value Decomposition. PCAFeatureReducer takes in dataset, and number
of principal components desired to explain the variance in the given dataset, and then... | Fix PCA so that it does not account for features with zero variance | {
"change_kind": "corrective",
"libraries": [
"pandas",
"numpy",
"scikit-learn"
],
"topic": "Math"
} |
119 | pollards_rho_factorization | 119_pollards_rho_factorization | from math import gcd
class PollardsRhoFactorization:
"""Performs integer factorization using Pollard's Rho algorithm."""
def __init__(self, n: int):
self.n = n
def pollards_rho_polynomial(self, x: int):
return (x * x + 1) % self.n
def pollards_rho_factorization(self):
if sel... | from math import gcd
class PollardsRhoFactorization:
"""Performs integer factorization using Pollard's Rho algorithm."""
def __init__(self, n: int):
self.n = n
def pollards_rho_polynomial(self, x: int):
return (x * x + 1) % self.n
def pollards_rho_factorization(self):
if sel... | ### START TESTS ###
if True: # pragma: no cover
n = 15
pollardsRho = PollardsRhoFactorization(n)
factor = pollardsRho.pollards_rho_factorization()
assert factor not in [1, n]
assert n % factor == 0
assert factor is not None
n = 13 * 17
pollardsRho = PollardsRhoFactorization(n)
facto... | Fix PollardsRhoFactorization, so that it is able to correctly identify cycles within a sequence of values during
factorization process, failing to find factors efficiently. PollardsRhoFactorization incorrectly moves y (known as
"hare") for every one step that x (known as "tortoise") takes, whereas the correct cycle fin... | Fix Pollard's Rho so that it is able to find integer factors by moving y two steps | {
"change_kind": "corrective",
"libraries": [],
"topic": "Math"
} |
120 | summary_statistics | 120_summary_statistics | import math
def mean(data):
runningSum = 0
for val in data:
runningSum += val
return runningSum / len(data)
def calculate_range(data):
dataSorted = sorted(data)
return dataSorted[-1] - dataSorted[0]
def mode(data):
freq_dict = {}
for val in data:
if val not in freq_dict:
... | import math
def mean(data):
runningSum = 0
for val in data:
runningSum += val
return runningSum / len(data)
def calculate_range(data):
dataSorted = sorted(data)
return dataSorted[-1] - dataSorted[0]
def mode(data):
freq_dict = {}
for val in data:
if val not in freq_dict:
... | ### START TESTS ###
assert abs(mean([0]) - 0) < .01
assert abs(mean([3, 11, 4, 6, 8, 9, 6]) - 6.71) < .01
assert abs(mean([5, 6, 7, 6]) - 6.0) < .01
assert calculate_range([1, 1]) == 0
assert calculate_range([1, 1, 25, 3000, 45, 0]) == 3000
assert abs(calculate_range([4.5, 2.5, 90.2, 6.2, 1]) - 89.2) < .01
assert mod... | Fix the function quartile(), which takes in a list of integers or floats and returns a list called quartiles, which contains three lists,
q1_data, q2_data, and q3_data, which each contain the numbers in the first, second, and third quartiles, respectively.
Numbers are in the first quartile if they are less than or equa... | Fix the quartile function so that it returns the correct quartiles, sometimes it wrongly omits numbers from q1_data and q3_data | {
"change_kind": "corrective",
"libraries": [],
"topic": "Math"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.