Spaces:
Running
Running
| """ | |
| Curated task bank for the Research → Interactive Explainer environment. | |
| Tasks are organized by difficulty (easy/medium/hard) and tier (beginner/intermediate/ | |
| advanced). Each task optionally specifies a preferred format (marimo or manim); when | |
| None, the SLM must infer the best format and gets full format_match reward either way. | |
| """ | |
| from dataclasses import dataclass | |
| from typing import Literal | |
| class Task: | |
| topic: str | |
| content: str | |
| tier: Literal["beginner", "intermediate", "advanced"] | |
| keywords: str | |
| data_available: bool | |
| difficulty: Literal["easy", "medium", "hard"] | |
| preferred_format: Literal["marimo", "manim"] | None = None | |
| # ---------- ML Concepts (Marimo-biased) ---------- | |
| ML_CONCEPTS: list[Task] = [ | |
| Task( | |
| topic="Linear Regression", | |
| content="Linear regression fits a line to data by minimizing squared errors. Given input features X and target y, it finds weights w such that y ≈ Xw. The loss function is MSE = (1/n) Σ(yi - ŷi)².", | |
| tier="beginner", | |
| keywords="linear regression,least squares,MSE,gradient descent,weights,bias", | |
| data_available=True, | |
| preferred_format="marimo", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Gradient Descent", | |
| content="Gradient descent iteratively updates parameters by moving in the direction of steepest decrease of the loss function. Update rule: θ = θ - α∇L(θ), where α is the learning rate. Variants include SGD, mini-batch, and Adam.", | |
| tier="beginner", | |
| keywords="gradient descent,learning rate,loss function,SGD,convergence,optimization", | |
| data_available=True, | |
| preferred_format="marimo", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Decision Trees", | |
| content="Decision trees split data recursively based on feature thresholds that maximize information gain (or minimize Gini impurity). Each leaf node represents a class label or regression value.", | |
| tier="beginner", | |
| keywords="decision tree,information gain,Gini impurity,splitting,leaf node,classification", | |
| data_available=True, | |
| preferred_format="marimo", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="K-Means Clustering", | |
| content="K-means partitions n observations into k clusters by iteratively assigning points to nearest centroid and updating centroids to cluster means. Converges to local optimum. Sensitive to initialization — use k-means++ for better starts.", | |
| tier="intermediate", | |
| keywords="k-means,clustering,centroid,Euclidean distance,convergence,k-means++", | |
| data_available=True, | |
| preferred_format="marimo", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Attention Mechanism", | |
| content="The attention mechanism computes a weighted sum of values (V) where weights come from compatibility of queries (Q) and keys (K): Attention(Q,K,V) = softmax(QK^T/√dk)V. Self-attention allows each position to attend to all positions in the input.", | |
| tier="intermediate", | |
| keywords="attention,self-attention,query,key,value,softmax,transformer,scaled dot-product", | |
| data_available=False, | |
| preferred_format="marimo", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Backpropagation", | |
| content="Backpropagation computes gradients of the loss with respect to each weight by applying the chain rule layer by layer from output to input. It enables efficient training of deep networks by reusing intermediate computations.", | |
| tier="intermediate", | |
| keywords="backpropagation,chain rule,gradient,computational graph,forward pass,backward pass", | |
| data_available=False, | |
| preferred_format="marimo", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Convolutional Neural Networks", | |
| content="CNNs use learnable filters that slide over input (convolution) to detect local patterns like edges, textures, and shapes. Key operations: convolution, pooling, and fully-connected layers. Translation equivariance is a key inductive bias.", | |
| tier="intermediate", | |
| keywords="CNN,convolution,pooling,filter,feature map,stride,padding,translation equivariance", | |
| data_available=False, | |
| preferred_format="marimo", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Batch Normalization", | |
| content="Batch normalization normalizes activations within a mini-batch: x̂ = (x - μ_B) / √(σ²_B + ε), then scales and shifts: y = γx̂ + β. Reduces internal covariate shift, enables higher learning rates, and acts as a regularizer.", | |
| tier="advanced", | |
| keywords="batch normalization,internal covariate shift,running mean,running variance,gamma,beta", | |
| data_available=False, | |
| preferred_format="marimo", | |
| difficulty="hard", | |
| ), | |
| Task( | |
| topic="Variational Autoencoders", | |
| content="VAEs learn a probabilistic latent space by encoding inputs to distributions q(z|x) and decoding samples p(x|z). The ELBO loss = reconstruction + KL divergence. Reparameterization trick enables backprop through sampling: z = μ + σ⊙ε.", | |
| tier="advanced", | |
| keywords="VAE,ELBO,KL divergence,reparameterization,latent space,encoder,decoder,generative", | |
| data_available=False, | |
| preferred_format="marimo", | |
| difficulty="hard", | |
| ), | |
| Task( | |
| topic="Reinforcement Learning Basics", | |
| content="An agent interacts with an environment, observing states, taking actions, and receiving rewards. The goal is to learn a policy π(a|s) that maximizes cumulative discounted reward. Key concepts: value function V(s), Q-function Q(s,a), Bellman equation.", | |
| tier="beginner", | |
| keywords="reinforcement learning,agent,environment,reward,policy,value function,Q-function,Bellman", | |
| data_available=False, | |
| difficulty="easy", | |
| ), | |
| ] | |
| # ---------- Math Topics (Manim-biased) ---------- | |
| MATH_TOPICS: list[Task] = [ | |
| Task( | |
| topic="Fourier Transform", | |
| content="The Fourier transform decomposes a function into its constituent frequencies: F(ω) = ∫f(t)e^(-iωt)dt. Any periodic signal can be represented as a sum of sines and cosines. The DFT computes this for discrete samples.", | |
| tier="intermediate", | |
| keywords="Fourier transform,frequency,sine,cosine,DFT,spectrum,decomposition,harmonics", | |
| data_available=True, | |
| preferred_format="manim", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Eigenvalues and Eigenvectors", | |
| content="For a matrix A, eigenvector v satisfies Av = λv where λ is the eigenvalue. Eigenvectors represent directions unchanged by the transformation (only scaled). PCA uses eigenvectors of the covariance matrix.", | |
| tier="intermediate", | |
| keywords="eigenvalue,eigenvector,matrix,linear transformation,PCA,covariance,diagonalization", | |
| data_available=False, | |
| preferred_format="manim", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Taylor Series", | |
| content="The Taylor series expands a function as an infinite sum of terms: f(x) = Σ f^(n)(a)/n! · (x-a)^n. Provides polynomial approximations to functions. Convergence depends on the radius of convergence.", | |
| tier="beginner", | |
| keywords="Taylor series,polynomial approximation,derivative,convergence,Maclaurin,expansion", | |
| data_available=False, | |
| preferred_format="manim", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Bayes' Theorem", | |
| content="Bayes' theorem relates conditional probabilities: P(A|B) = P(B|A)P(A)/P(B). It enables updating beliefs given new evidence. Foundation of Bayesian inference, spam filters, and medical diagnosis.", | |
| tier="beginner", | |
| keywords="Bayes theorem,conditional probability,prior,posterior,likelihood,evidence,Bayesian", | |
| data_available=True, | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Gradient and Directional Derivatives", | |
| content="The gradient ∇f points in the direction of steepest ascent. The directional derivative Duf = ∇f · u gives the rate of change in direction u. Gradient descent follows -∇f to minimize functions.", | |
| tier="intermediate", | |
| keywords="gradient,directional derivative,steepest ascent,contour,level set,multivariable calculus", | |
| data_available=False, | |
| preferred_format="manim", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Matrix Multiplication as Linear Transformation", | |
| content="Multiplying a vector by a matrix transforms it: the columns of A define where basis vectors land. Composition of transformations = matrix multiplication. Determinant measures area/volume scaling.", | |
| tier="beginner", | |
| keywords="matrix multiplication,linear transformation,basis vectors,determinant,composition", | |
| data_available=False, | |
| preferred_format="manim", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Central Limit Theorem", | |
| content="The CLT states that the distribution of sample means approaches a normal distribution as sample size increases, regardless of the population distribution. Requires finite variance. Rate: O(1/√n).", | |
| tier="intermediate", | |
| keywords="central limit theorem,normal distribution,sample mean,variance,convergence,CLT", | |
| data_available=True, | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Singular Value Decomposition", | |
| content="SVD decomposes any matrix A = UΣV^T where U,V are orthogonal and Σ is diagonal with singular values. Used in dimensionality reduction, matrix completion, and computing pseudoinverse. Truncated SVD approximates with k largest singular values.", | |
| tier="advanced", | |
| keywords="SVD,singular value,orthogonal,dimensionality reduction,low-rank approximation,pseudoinverse", | |
| data_available=False, | |
| preferred_format="manim", | |
| difficulty="hard", | |
| ), | |
| ] | |
| # ---------- Algorithms (Manim-biased) ---------- | |
| ALGORITHMS: list[Task] = [ | |
| Task( | |
| topic="Merge Sort", | |
| content="Merge sort divides the array in half, recursively sorts each half, then merges the sorted halves. Time complexity O(n log n), space O(n). Stable sort. Divide-and-conquer paradigm.", | |
| tier="beginner", | |
| keywords="merge sort,divide and conquer,recursion,O(n log n),stable sort,merging", | |
| data_available=True, | |
| preferred_format="manim", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Binary Search", | |
| content="Binary search finds a target in a sorted array by repeatedly halving the search space. Compare target with middle element; eliminate half. Time O(log n). Requires sorted input.", | |
| tier="beginner", | |
| keywords="binary search,sorted array,O(log n),divide and conquer,search space,comparison", | |
| data_available=True, | |
| preferred_format="manim", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Dijkstra's Algorithm", | |
| content="Dijkstra's algorithm finds shortest paths from a source vertex to all others in a weighted graph with non-negative edges. Uses a priority queue. Greedily selects the nearest unvisited vertex. Time O((V+E) log V) with binary heap.", | |
| tier="intermediate", | |
| keywords="Dijkstra,shortest path,graph,priority queue,greedy,weighted edges,relaxation", | |
| data_available=False, | |
| preferred_format="manim", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="A* Search Algorithm", | |
| content="A* combines Dijkstra's algorithm with heuristics: f(n) = g(n) + h(n), where g is cost-so-far and h is estimated cost-to-goal. Optimal if h is admissible (never overestimates). Used in pathfinding and game AI.", | |
| tier="intermediate", | |
| keywords="A-star,heuristic,admissible,pathfinding,f-score,g-score,h-score,optimal", | |
| data_available=False, | |
| preferred_format="manim", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Quick Sort", | |
| content="Quick sort selects a pivot, partitions elements into less-than and greater-than groups, then recursively sorts each. Average O(n log n), worst O(n²). In-place. Pivot selection strategy matters (median-of-three).", | |
| tier="beginner", | |
| keywords="quick sort,pivot,partition,in-place,O(n log n),recursion,divide and conquer", | |
| data_available=True, | |
| preferred_format="manim", | |
| difficulty="easy", | |
| ), | |
| ] | |
| # ---------- Statistics / Data (Marimo-biased) ---------- | |
| STATISTICS_TASKS: list[Task] = [ | |
| Task( | |
| topic="Exploratory Data Analysis", | |
| content="EDA uses summary statistics and visualizations to understand data distributions, correlations, and anomalies before modeling. Key tools: histograms, box plots, scatter matrices, correlation heatmaps.", | |
| tier="beginner", | |
| keywords="EDA,histogram,box plot,correlation,scatter plot,distribution,outliers,summary statistics", | |
| data_available=True, | |
| preferred_format="marimo", | |
| difficulty="easy", | |
| ), | |
| Task( | |
| topic="Hypothesis Testing", | |
| content="Hypothesis testing determines if observed data provides sufficient evidence against a null hypothesis H0. Steps: formulate H0/H1, choose significance level α, compute test statistic, compare with critical value or p-value.", | |
| tier="intermediate", | |
| keywords="hypothesis testing,null hypothesis,p-value,significance level,t-test,type I error,type II error", | |
| data_available=True, | |
| preferred_format="marimo", | |
| difficulty="medium", | |
| ), | |
| Task( | |
| topic="Principal Component Analysis", | |
| content="PCA reduces dimensionality by projecting data onto directions of maximum variance. Steps: center data, compute covariance matrix, find eigenvectors (principal components), project. Explained variance ratio guides k selection.", | |
| tier="intermediate", | |
| keywords="PCA,principal components,variance,dimensionality reduction,eigenvector,covariance,projection", | |
| data_available=True, | |
| preferred_format="marimo", | |
| difficulty="medium", | |
| ), | |
| ] | |
| ALL_TASKS: list[Task] = ML_CONCEPTS + MATH_TOPICS + ALGORITHMS + STATISTICS_TASKS | |
| EASY_TASKS = [t for t in ALL_TASKS if t.difficulty == "easy"] | |
| MEDIUM_TASKS = [t for t in ALL_TASKS if t.difficulty == "medium"] | |
| HARD_TASKS = [t for t in ALL_TASKS if t.difficulty == "hard"] | |