function stringlengths 18 3.86k | intent_category stringlengths 5 24 |
|---|---|
def euclidean_distance_sqr(point1, point2):
return (point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2 | divide_and_conquer |
def column_based_sort(array, column=0):
return sorted(array, key=lambda x: x[column]) | divide_and_conquer |
def dis_between_closest_pair(points, points_counts, min_dis=float("inf")):
for i in range(points_counts - 1):
for j in range(i + 1, points_counts):
current_dis = euclidean_distance_sqr(points[i], points[j])
if current_dis < min_dis:
min_dis = current_dis
return m... | divide_and_conquer |
def dis_between_closest_in_strip(points, points_counts, min_dis=float("inf")):
for i in range(min(6, points_counts - 1), points_counts):
for j in range(max(0, i - 6), i):
current_dis = euclidean_distance_sqr(points[i], points[j])
if current_dis < min_dis:
min_dis = c... | divide_and_conquer |
def closest_pair_of_points_sqr(points_sorted_on_x, points_sorted_on_y, points_counts):
# base case
if points_counts <= 3:
return dis_between_closest_pair(points_sorted_on_x, points_counts)
# recursion
mid = points_counts // 2
closest_in_left = closest_pair_of_points_sqr(
points_sor... | divide_and_conquer |
def closest_pair_of_points(points, points_counts):
points_sorted_on_x = column_based_sort(points, column=0)
points_sorted_on_y = column_based_sort(points, column=1)
return (
closest_pair_of_points_sqr(
points_sorted_on_x, points_sorted_on_y, points_counts
)
) ** 0.5 | divide_and_conquer |
def default_matrix_multiplication(a: list, b: list) -> list:
if len(a) != 2 or len(a[0]) != 2 or len(b) != 2 or len(b[0]) != 2:
raise Exception("Matrices are not 2x2")
new_matrix = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1]... | divide_and_conquer |
def matrix_addition(matrix_a: list, matrix_b: list):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(matrix_a))
] | divide_and_conquer |
def matrix_subtraction(matrix_a: list, matrix_b: list):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(matrix_a))
] | divide_and_conquer |
def split_matrix(a: list) -> tuple[list, list, list, list]:
if len(a) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception("Odd matrices are not supported!")
matrix_length = len(a)
mid = matrix_length // 2
top_right = [[a[i][j] for j in range(mid, matrix_length)] for i in range(mid)]
bot_right ... | divide_and_conquer |
def matrix_dimensions(matrix: list) -> tuple[int, int]:
return len(matrix), len(matrix[0]) | divide_and_conquer |
def print_matrix(matrix: list) -> None:
print("\n".join(str(line) for line in matrix)) | divide_and_conquer |
def actual_strassen(matrix_a: list, matrix_b: list) -> list:
if matrix_dimensions(matrix_a) == (2, 2):
return default_matrix_multiplication(matrix_a, matrix_b)
a, b, c, d = split_matrix(matrix_a)
e, f, g, h = split_matrix(matrix_b)
t1 = actual_strassen(a, matrix_subtraction(f, h))
t2 = act... | divide_and_conquer |
def strassen(matrix1: list, matrix2: list) -> list:
if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]:
raise Exception(
"Unable to multiply these matrices, please check the dimensions. \n"
f"Matrix A:{matrix1} \nMatrix B:{matrix2}"
)
dimension1 = matrix_di... | divide_and_conquer |
def random_pivot(lst):
return choice(lst) | divide_and_conquer |
def kth_number(lst: list[int], k: int) -> int:
# pick a pivot and separate into list based on pivot.
pivot = random_pivot(lst)
# partition based on pivot
# linear time
small = [e for e in lst if e < pivot]
big = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we... | divide_and_conquer |
def merge(left_half: list, right_half: list) -> list:
sorted_array = [None] * (len(right_half) + len(left_half))
pointer1 = 0 # pointer to current index for left Half
pointer2 = 0 # pointer to current index for the right Half
index = 0 # pointer to current index for the sorted array Half
while ... | divide_and_conquer |
def merge_sort(array: list) -> list:
if len(array) <= 1:
return array
# the actual formula to calculate the middle element = left + (right - left) // 2
# this avoids integer overflow in case of large N
middle = 0 + (len(array) - 0) // 2
# Split the array into halves till the array length be... | divide_and_conquer |
def actual_power(a: int, b: int):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(a, int(b / 2)) * actual_power(a, int(b / 2))
else:
return a * actual_power(a, int(b / 2)) * actual_power(a, int(b / 2)) | divide_and_conquer |
def power(a: int, b: int) -> float:
if b < 0:
return 1 / actual_power(a, b)
return actual_power(a, b) | divide_and_conquer |
def electric_power(voltage: float, current: float, power: float) -> tuple:
result = namedtuple("result", "name value")
if (voltage, current, power).count(0) != 1:
raise ValueError("Only one argument must be 0")
elif power < 0:
raise ValueError(
"Power cannot be negative in any el... | electronics |
def electrical_impedance(
resistance: float, reactance: float, impedance: float
) -> dict[str, float]:
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if resistance == 0:
return {"resistance": sqrt(pow(impedance, 2) - pow(reactance... | electronics |
def ohms_law(voltage: float, current: float, resistance: float) -> dict[str, float]:
if (voltage, current, resistance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if resistance < 0:
raise ValueError("Resistance cannot be negative")
if voltage == 0:
return {... | electronics |
def electric_conductivity(
conductivity: float,
electron_conc: float,
mobility: float,
) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif conductivity < 0:
raise ValueError("Conductiv... | electronics |
def couloumbs_law(
force: float, charge1: float, charge2: float, distance: float
) -> dict[str, float]:
charge_product = abs(charge1 * charge2)
if (force, charge1, charge2, distance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if distance < 0:
raise ValueErro... | electronics |
def builtin_voltage(
donor_conc: float, # donor concentration
acceptor_conc: float, # acceptor concentration
intrinsic_conc: float, # intrinsic concentration
) -> float:
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive")
elif acceptor_conc <= 0:
raise ... | electronics |
def carrier_concentration(
electron_conc: float,
hole_conc: float,
intrinsic_conc: float,
) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron conce... | electronics |
def resistor_parallel(resistors: list[float]) -> float:
first_sum = 0.00
index = 0
for resistor in resistors:
if resistor <= 0:
raise ValueError(f"Resistor at index {index} has a negative or zero value!")
first_sum += 1 / float(resistor)
index += 1
return 1 / first_s... | electronics |
def resistor_series(resistors: list[float]) -> float:
sum_r = 0.00
index = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
raise ValueError(f"Resistor at index {index} has a negative value!")
index += 1
return sum_r | electronics |
def resonant_frequency(inductance: float, capacitance: float) -> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative")
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative")
else:
return (
"Resonant frequency",
... | electronics |
def ind_reactance(
inductance: float, frequency: float, reactance: float
) -> dict[str, float]:
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if inductance < 0:
raise ValueError("Inductance cannot be negative")
if frequency ... | electronics |
def __init__(self) -> None:
self.first_signal = [2, 1, 2, -1]
self.second_signal = [1, 2, 3, 4] | electronics |
def circular_convolution(self) -> list[float]:
length_first_signal = len(self.first_signal)
length_second_signal = len(self.second_signal)
max_length = max(length_first_signal, length_second_signal)
# create a zero matrix of max_length x max_length
matrix = [[0] * max_length f... | electronics |
def mae(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
difference = abs(predict - actual)
score = difference.mean()
return score | machine_learning |
def mse(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
square_diff = np.square(difference)
score = square_diff.mean()
return score | machine_learning |
def rmse(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
square_diff = np.square(difference)
mean_square_diff = square_diff.mean()
score = np.sqrt(mean_square_diff)
return score | machine_learning |
def rmsle(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
log_predict = np.log(predict + 1)
log_actual = np.log(actual + 1)
difference = log_predict - log_actual
square_diff = np.square(difference)
mean_square_diff = square_diff.mean()
score = np.sqrt(mean_squa... | machine_learning |
def mbd(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
numerator = np.sum(difference) / len(predict)
denumerator = np.sum(actual) / len(predict)
# print(numerator, denumerator)
score = float(numerator) / denumerator * 100
return sc... | machine_learning |
def data_handling(data: dict) -> tuple:
# Split dataset into features and target
# data is features
return (data["data"], data["target"]) | machine_learning |
def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier:
classifier = XGBClassifier()
classifier.fit(features, target)
return classifier | machine_learning |
def main() -> None:
# Load Iris dataset
iris = load_iris()
features, targets = data_handling(iris)
x_train, x_test, y_train, y_test = train_test_split(
features, targets, test_size=0.25
)
names = iris["target_names"]
# Create an XGBoost Classifier from the training data
xgboos... | machine_learning |
def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list:
seed(1)
return [gauss(mean, std_dev) for _ in range(instance_count)] | machine_learning |
def y_generator(class_count: int, instance_count: list) -> list:
return [k for k in range(class_count) for _ in range(instance_count[k])] | machine_learning |
def calculate_mean(instance_count: int, items: list) -> float:
# the sum of all items divided by number of instances
return sum(items) / instance_count | machine_learning |
def calculate_probabilities(instance_count: int, total_count: int) -> float:
# number of instances in specific class divided by number of all instances
return instance_count / total_count | machine_learning |
def calculate_variance(items: list, means: list, total_count: int) -> float:
squared_diff = [] # An empty list to store all squared differences
# iterate over number of elements in items
for i in range(len(items)):
# for loop iterates over number of elements in inner layer of items
for j in... | machine_learning |
def predict_y_values(
x_items: list, means: list, variance: float, probabilities: list
) -> list:
# An empty list to store generated discriminant values of all items in dataset for
# each class
results = []
# for loop iterates over number of elements in list
for i in range(len(x_items)):
... | machine_learning |
def accuracy(actual_y: list, predicted_y: list) -> float:
# iterate over one element of each list at a time (zip mode)
# prediction is correct if actual Y value equals to predicted Y value
correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
# percentage of accuracy equals to number of corr... | machine_learning |
def valid_input(
input_type: Callable[[object], num], # Usually float or int
input_msg: str,
err_msg: str,
condition: Callable[[num], bool] = lambda x: True,
default: str | None = None,
) -> num:
while True:
try:
user_input = input_type(input(input_msg).strip() or default)
... | machine_learning |
def _error(example_no, data_set="train"):
return calculate_hypothesis_value(example_no, data_set) - output(
example_no, data_set
) | machine_learning |
def _hypothesis_value(data_input_tuple):
hyp_val = 0
for i in range(len(parameter_vector) - 1):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val | machine_learning |
def output(example_no, data_set):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None | machine_learning |
def calculate_hypothesis_value(example_no, data_set):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0])
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0])
return None | machine_learning |
def summation_of_cost_derivative(index, end=m):
summation_value = 0
for i in range(end):
if index == -1:
summation_value += _error(i)
else:
summation_value += _error(i) * train_data[i][0][index]
return summation_value | machine_learning |
def get_cost_derivative(index):
cost_derivative_value = summation_of_cost_derivative(index, m) / m
return cost_derivative_value | machine_learning |
def run_gradient_descent():
global parameter_vector
# Tune these values to set a tolerance value for predicted output
absolute_error_limit = 0.000002
relative_error_limit = 0
j = 0
while True:
j += 1
temp_parameter_vector = [0, 0, 0, 0]
for i in range(0, len(parameter_vec... | machine_learning |
def test_gradient_descent():
for i in range(len(test_data)):
print(("Actual output value:", output(i, "test")))
print(("Hypothesis output:", calculate_hypothesis_value(i, "test"))) | machine_learning |
def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float:
return math.sqrt(sum(pow(a - b, 2) for a, b in zip(input_a, input_b))) | machine_learning |
def similarity_search(
dataset: np.ndarray, value_array: np.ndarray
) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
raise ValueError(
f"Wrong input data's dimensions... dataset : {dataset.ndim}, "
f"value_array : {value_array.ndim}"
)
try:... | machine_learning |
def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float:
return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) | machine_learning |
def euclidean_distance(a, b):
return np.linalg.norm(np.array(a) - np.array(b)) | machine_learning |
def classifier(train_data, train_target, classes, point, k=5):
data = zip(train_data, train_target)
# List of distances of all points from the point to be classified
distances = []
for data_point in data:
distance = euclidean_distance(data_point[0], point)
distances.append((distance, dat... | machine_learning |
def __init__(self, depth=5, min_leaf_size=5):
self.depth = depth
self.decision_boundary = 0
self.left = None
self.right = None
self.min_leaf_size = min_leaf_size
self.prediction = None | machine_learning |
def mean_squared_error(self, labels, prediction):
if labels.ndim != 1:
print("Error: Input labels must be one dimensional")
return np.mean((labels - prediction) ** 2) | machine_learning |
def train(self, x, y):
if x.ndim != 1:
print("Error: Input data set must be one dimensional")
return
if len(x) != len(y):
print("Error: X and y have different lengths")
return
if y.ndim != 1:
print("Error: Data set labels must be one d... | machine_learning |
def predict(self, x):
if self.prediction is not None:
return self.prediction
elif self.left or self.right is not None:
if x >= self.decision_boundary:
return self.right.predict(x)
else:
return self.left.predict(x)
else:
... | machine_learning |
def __init__(self):
self.position = (0, 0)
self.parent = None
self.g = 0
self.h = 0
self.f = 0 | machine_learning |
def __eq__(self, cell):
return self.position == cell.position | machine_learning |
def showcell(self):
print(self.position) | machine_learning |
def __init__(self, world_size=(5, 5)):
self.w = np.zeros(world_size)
self.world_x_limit = world_size[0]
self.world_y_limit = world_size[1] | machine_learning |
def show(self):
print(self.w) | machine_learning |
def get_neigbours(self, cell):
neughbour_cord = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
current_x = cell.position[0]
current_y = cell.position[1]
neig... | machine_learning |
def astar(world, start, goal):
_open = []
_closed = []
_open.append(start)
while _open:
min_f = np.argmin([n.f for n in _open])
current = _open[min_f]
_closed.append(_open.pop(min_f))
if current == goal:
break
for n in world.get_neigbours(current):
... | machine_learning |
def sigmoid_function(z):
return 1 / (1 + np.exp(-z)) | machine_learning |
def cost_function(h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() | machine_learning |
def log_likelihood(x, y, weights):
scores = np.dot(x, weights)
return np.sum(y * scores - np.log(1 + np.exp(scores))) | machine_learning |
def logistic_reg(alpha, x, y, max_iterations=70000):
theta = np.zeros(x.shape[1])
for iterations in range(max_iterations):
z = np.dot(x, theta)
h = sigmoid_function(z)
gradient = np.dot(x.T, h - y) / y.size
theta = theta - alpha * gradient # updating the weights
z = np.... | machine_learning |
def predict_prob(x):
return sigmoid_function(
np.dot(x, theta)
) # predicting the value of probability from the logistic regression algorithm | machine_learning |
def get_winner(self, weights: list[list[float]], sample: list[int]) -> int:
d0 = 0.0
d1 = 0.0
for i in range(len(sample)):
d0 += math.pow((sample[i] - weights[0][i]), 2)
d1 += math.pow((sample[i] - weights[1][i]), 2)
return 0 if d0 > d1 else 1
return 0 | machine_learning |
def update(
self, weights: list[list[int | float]], sample: list[int], j: int, alpha: float
) -> list[list[int | float]]:
for i in range(len(weights)):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights | machine_learning |
def main() -> None:
# Training Examples ( m, n )
training_samples = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
weights = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
self_organizing_map = SelfOrganizingMap()
epochs = 3
alpha = 0... | machine_learning |
def term_frequency(term: str, document: str) -> int:
# strip all punctuation and newlines and replace it with ''
document_without_punctuation = document.translate(
str.maketrans("", "", string.punctuation)
).replace("\n", "")
tokenize_document = document_without_punctuation.split(" ") # word to... | machine_learning |
def document_frequency(term: str, corpus: str) -> tuple[int, int]:
corpus_without_punctuation = corpus.lower().translate(
str.maketrans("", "", string.punctuation)
) # strip all punctuation and replace it with ''
docs = corpus_without_punctuation.split("\n")
term = term.lower()
return (len(... | machine_learning |
def inverse_document_frequency(df: int, n: int, smoothing=False) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined.")
return round(1 + log10(n / (1 + df)), 3)
if df == 0:
raise ZeroDivisionError("df must be > 0")
elif n == 0:
raise ValueE... | machine_learning |
def norm_squared(vector: ndarray) -> float:
return np.dot(vector, vector) | machine_learning |
def __init__(
self,
*,
regularization: float = np.inf,
kernel: str = "linear",
gamma: float = 0,
) -> None:
self.regularization = regularization
self.gamma = gamma
if kernel == "linear":
self.kernel = self.__linear
elif kernel == "r... | machine_learning |
def normalization(data: list, ndigits: int = 3) -> list:
# variables for calculation
x_min = min(data)
x_max = max(data)
# normalize data
return [round((x - x_min) / (x_max - x_min), ndigits) for x in data] | machine_learning |
def __init__(
self,
train,
kernel_func,
alpha_list=None,
cost=0.4,
b=0.0,
tolerance=0.001,
auto_norm=True,
):
self._init = True
self._auto_norm = auto_norm
self._c = np.float64(cost)
self._b = np.float64(b)
self.... | machine_learning |
def fit(self):
k = self._k
state = None
while True:
# 1: Find alpha1, alpha2
try:
i1, i2 = self.choose_alpha.send(state)
state = None
except StopIteration:
print("Optimization done!\nEvery sample satisfy the KKT ... | machine_learning |
def predict(self, test_samples, classify=True):
if test_samples.shape[1] > self.samples.shape[1]:
raise ValueError(
"Test samples' feature length does not equal to that of train samples"
)
if self._auto_norm:
test_samples = self._norm(test_samples)
... | machine_learning |
def _check_obey_kkt(self, index):
alphas = self.alphas
tol = self._tol
r = self._e(index) * self.tags[index]
c = self._c
return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0) | machine_learning |
def _k(self, i1, i2):
# for test samples,use Kernel function
if isinstance(i2, np.ndarray):
return self.Kernel(self.samples[i1], i2)
# for train samples,Kernel values have been saved in matrix
else:
return self._K_matrix[i1, i2] | machine_learning |
def _e(self, index):
# get from error data
if self._is_unbound(index):
return self._error[index]
# get by g(xi) - yi
else:
gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b
yi = self.tags[index]
return gx - yi | machine_learning |
def _calculate_k_matrix(self):
k_matrix = np.zeros([self.length, self.length])
for i in self._all_samples:
for j in self._all_samples:
k_matrix[i, j] = np.float64(
self.Kernel(self.samples[i, :], self.samples[j, :])
)
return k_matri... | machine_learning |
def _predict(self, sample):
k = self._k
predicted_value = (
np.sum(
[
self.alphas[i1] * self.tags[i1] * k(i1, sample)
for i1 in self._all_samples
]
)
+ self._b
)
return predicted_v... | machine_learning |
def _choose_alphas(self):
locis = yield from self._choose_a1()
if not locis:
return None
return locis | machine_learning |
def _choose_a1(self):
while True:
all_not_obey = True
# all sample
print("scanning all sample!")
for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]:
all_not_obey = False
yield from self._choose_a2(i1)
#... | machine_learning |
def _choose_a2(self, i1):
self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
if len(self.unbound) > 0:
tmp_error = self._error.copy().tolist()
tmp_error_dict = {
index: value
for index, value in enumerate(tmp_error)
... | machine_learning |
def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2):
k = self._k
if i1 == i2:
return None, None
# calculate L and H which bound the new alpha2
s = y1 * y2
if s == -1:
l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1)
else:
... | machine_learning |
def _norm(self, data):
if self._init:
self._min = np.min(data, axis=0)
self._max = np.max(data, axis=0)
self._init = False
return (data - self._min) / (self._max - self._min)
else:
return (data - self._min) / (self._max - self._min) | machine_learning |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.