|
|
|
|
|
|
|
|
|
|
|
|
|
|
| mata: mata set matastrict on
|
| mata: mata set mataoptimize on
|
|
|
|
|
|
|
| cap findfile "ftools.mata"
|
| if (_rc) {
|
| di as error "reghdfe requires the {bf:ftools} package, which is not installed"
|
| di as error `" - install from {stata ssc install ftools:SSC}"'
|
| di as error `" - install from {stata `"net install ftools, from("https://github.com/sergiocorreia/ftools/raw/master/src/")"':Github}"'
|
| exit 9
|
| }
|
| include "`r(fn)'"
|
|
|
|
|
| loc Factor class Factor scalar
|
| loc OptionalFactor class Factor
|
| loc Factors class Factor rowvector
|
| loc FactorPointer pointer(`Factor') scalar
|
| loc FactorPointers pointer(`Factor') rowvector
|
|
|
| loc FE_Factor class FE_Factor scalar
|
| loc Optional_FE_Factor class FE_Factor
|
| loc FE_Factors class FE_Factor rowvector
|
| loc FE_FactorPointer pointer(`FE_Factor') scalar
|
| loc FE_FactorPointers pointer(`FE_Factor') rowvector
|
|
|
| loc Individual_Factor class Individual_Factor scalar
|
|
|
|
|
|
|
|
|
|
|
| loc FixedEffects class FixedEffects scalar
|
| loc BipartiteGraph class BipartiteGraph scalar
|
| loc Solution class Solution scalar
|
|
|
| loc DEBUG ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| class FE_Factor extends Factor
|
| {
|
|
|
| `String' absvar
|
| `Varlist' ivars
|
| `Varlist' cvars
|
| `Boolean' has_intercept
|
| `Integer' num_slopes
|
| `String' target
|
| `Boolean' save_fe
|
| `Integer' num_singletons
|
|
|
|
|
| `Boolean' has_weights
|
| `Vector' weights
|
| `Vector' weighted_counts
|
|
|
|
|
| `Matrix' unsorted_x
|
| `RowVector' x_means
|
| `RowVector' x_stdevs
|
| `Matrix' inv_xx
|
| `Matrix' xx_info
|
|
|
|
|
| `String' preconditioner
|
| `Matrix' preconditioner_intercept, preconditioner_slopes
|
|
|
|
|
| `Vector' tmp_alphas
|
| `Vector' alphas
|
|
|
| `Boolean' is_individual_fe
|
| `String' individual_fe_function
|
|
|
|
|
|
|
|
|
| `Vector' group_p, group_inv_p
|
|
|
|
|
| `Void' new()
|
| virtual `Vector' drop_singletons()
|
| virtual `Void' init_diagonal_preconditioner()
|
| virtual `Void' init_block_diag_preconditioner()
|
| virtual `Void' mult()
|
| virtual `Vector' mult_transpose()
|
| virtual `Void' undo_preconditioning()
|
| virtual `Matrix' panelsum()
|
| virtual `Matrix' panelmean()
|
| virtual `Void' set_weights()
|
|
|
|
|
| virtual `Void' drop_obs()
|
| virtual `Void' panelsetup()
|
|
|
| virtual `Void' cleanup_before_saving()
|
| }
|
|
|
|
|
| `Void' FE_Factor::new()
|
| {
|
| is_individual_fe = 0
|
| num_singletons = 0
|
| has_weights = 0
|
| weights = 1
|
| weighted_counts = .
|
| }
|
|
|
|
|
| `Vector' FE_Factor::drop_singletons(| `Vector' fweight,
|
| `Boolean' zero_threshold)
|
| {
|
| `Integer' num_singletons
|
| `Vector' mask, idx
|
| `Boolean' has_fweight
|
| `Vector' w_counts
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if (zero_threshold == .) zero_threshold = 0
|
|
|
| if (counts == J(0, 1, .)) {
|
| _error(123, "drop_singletons() requires the -counts- vector")
|
| }
|
|
|
| has_fweight = (args()>=1) & (fweight!=. & rows(fweight)>0)
|
|
|
| if (has_fweight) {
|
| assert(rows(fweight)==num_obs)
|
| this.panelsetup()
|
| w_counts = this.panelsum(fweight, 1, 1)
|
| if (zero_threshold) {
|
| mask = (!w_counts :| (counts :== 1)) :* counts
|
| }
|
| else {
|
| mask = w_counts :== 1
|
| }
|
| }
|
| else {
|
| mask = (counts :== 1)
|
| }
|
|
|
| num_singletons = sum(mask)
|
| if (num_singletons == 0) return(J(0, 1, .))
|
| counts = counts - mask
|
| idx = selectindex(mask[levels, .])
|
|
|
|
|
| if (has_fweight) {
|
| fweight = num_singletons == num_obs ? J(0, 1, .) : select(fweight, (!mask)[levels])
|
| }
|
|
|
|
|
| __inner_drop(idx)
|
| return(idx)
|
| }
|
|
|
|
|
| `Void' FE_Factor::panelsetup()
|
| {
|
| super.panelsetup()
|
| assert(this.panel_is_setup==1)
|
| }
|
|
|
|
|
| `Matrix' FE_Factor::panelsum(`Matrix' x, | `Boolean' sort_input, `Boolean' ignore_weights)
|
| {
|
| if (args()<2 | sort_input == .) sort_input = 0
|
| if (args()<3 | ignore_weights == .) ignore_weights = 0
|
|
|
| assert_boolean(sort_input)
|
| assert_boolean(ignore_weights)
|
| assert_boolean(this.has_weights)
|
| assert_msg(this.panel_is_setup == 1, "must call .panelsetup() before .panelsum()")
|
| assert_msg(this.has_weights==1 | this.weights==1)
|
|
|
| if (ignore_weights) {
|
| return(::panelsum(sort_input ? this.sort(x) : x, this.info))
|
| }
|
| else {
|
| return(::panelsum(sort_input ? this.sort(x) : x, this.weights, this.info))
|
| }
|
| }
|
|
|
|
|
| `Variables' FE_Factor::panelmean(`Variables' y, | `Boolean' sort_input)
|
| {
|
| `Variables' ans
|
|
|
| if (args()<2 | sort_input == .) sort_input = 0
|
| assert_boolean(this.has_weights)
|
| assert_msg(this.panel_is_setup == 1, "this.panel_setup() must be run beforehand")
|
| assert_msg(this.has_weights==1 | this.weights==1)
|
|
|
| ans = ::panelsum(sort_input ? this.sort(y) : y, this.weights, this.info)
|
|
|
| if (this.has_weights) {
|
| return(editmissing(ans :/ this.weighted_counts, 0))
|
| }
|
| else {
|
| return(ans :/ this.counts)
|
| }
|
| }
|
|
|
|
|
| `Void' FE_Factor::set_weights(`Vector' input_weights, | `Boolean' verbose)
|
| {
|
| if (args()<2 | verbose == .) verbose = 0
|
| if (verbose > 0) printf("{txt} - sorting weights for factor {res}%s{txt}\n", this.absvar)
|
|
|
| assert_msg(this.has_weights == 0)
|
| assert_msg(this.weights == 1)
|
| assert_msg(this.weighted_counts == .)
|
| assert_msg(this.num_obs == rows(input_weights))
|
|
|
| this.has_weights = 1
|
| this.weights = this.sort(input_weights)
|
| this.weighted_counts = this.panelsum(this.weights, 0, 1)
|
| }
|
|
|
|
|
| `Void' FE_Factor::drop_obs(`Vector' idx)
|
| {
|
| super.drop_obs(idx)
|
| }
|
|
|
|
|
| `Void' FE_Factor::init_diagonal_preconditioner()
|
| {
|
| `Matrix' precond
|
|
|
| if (this.has_intercept) {
|
| precond = this.has_weights ? this.weighted_counts : this.counts
|
| this.preconditioner_intercept = sqrt(1 :/ precond)
|
| }
|
|
|
| if (this.num_slopes) {
|
| precond = this.panelsum(this.unsorted_x :^ 2, 1)
|
| this.preconditioner_slopes = 1 :/ sqrt(precond)
|
| }
|
| }
|
|
|
|
|
| `Void' FE_Factor::init_block_diag_preconditioner(`String' weight_type, `Real' weights_scale, `Vector' true_weights)
|
| {
|
| `Matrix' precond
|
| `Matrix' sorted_x
|
| `Matrix' x_means, inv_xx
|
| `Matrix' tmp_inv_xx, tmp_x, tmp_xx
|
| `RowVector' tmp_x_means
|
| `Vector' tmp_w
|
| `Integer' i, offset, L, K, FullK, N
|
|
|
| `Integer' B11
|
| `RowVector' B12
|
|
|
| `Matrix' V
|
| `Vector' d
|
| `Vector' sorted_true_weights
|
|
|
| L = this.num_levels
|
| K = this.num_slopes
|
| FullK = this.has_intercept + this.num_slopes
|
|
|
|
|
| if (FullK == 1) {
|
| this.init_diagonal_preconditioner()
|
| return
|
| }
|
|
|
|
|
|
|
|
|
| sorted_x = this.sort(this.unsorted_x)
|
| if (this.has_intercept) this.x_means = this.panelmean(sorted_x, 0)
|
|
|
| inv_xx = J(L*FullK, FullK, .)
|
| if (rows(true_weights)) sorted_true_weights = this.sort(true_weights)
|
|
|
| for (i=1; i<=L; i++) {
|
|
|
| tmp_x = panelsubmatrix(sorted_x, i, this.info)
|
| tmp_w = has_weights ? panelsubmatrix(this.weights, i, this.info) : 1
|
| offset = FullK * (i - 1)
|
|
|
| if (rows(true_weights)) {
|
|
|
| assert_msg(weight_type == "aweight")
|
| N = quadsum(panelsubmatrix(sorted_true_weights, i, this.info))
|
| }
|
| else if (weight_type=="fweight") {
|
| N = quadsum(panelsubmatrix(this.weights, i, this.info))
|
| }
|
| else if (weight_type=="" | weight_type=="aweight" | weight_type=="pweight") {
|
| N = rows(tmp_x)
|
| }
|
| else {
|
| assert_msg(0, "preconditioner missing one case - iweight???")
|
| }
|
|
|
| if (1) {
|
| if (this.has_intercept) {
|
| tmp_x_means = K > 1 ? this.x_means[i, .] : this.x_means[i]
|
| tmp_inv_xx = invsym(quadcrossdev(tmp_x, tmp_x_means, tmp_w, tmp_x, tmp_x_means))
|
|
|
| B11 = 1 / N + tmp_x_means * tmp_inv_xx * tmp_x_means'
|
| B12 = -tmp_x_means * tmp_inv_xx
|
| tmp_inv_xx = (B11, B12 \ B12', tmp_inv_xx)
|
| }
|
| else {
|
| tmp_inv_xx = invsym(quadcross(tmp_x, tmp_w, tmp_x))
|
| }
|
|
|
|
|
|
|
|
|
| eigensystem(tmp_inv_xx, V=., d=.)
|
| tmp_inv_xx = makesymmetric(Re(V * diag(sqrt(d)) * V'))
|
|
|
| inv_xx[|offset + 1, 1 \ offset + FullK , . |] = tmp_inv_xx
|
| }
|
| else {
|
|
|
| tmp_xx = quadcross(tmp_x, tmp_w, tmp_x)
|
| if (this.has_intercept) {
|
| tmp_x_means = K > 1 ? this.x_means[i, .] : this.x_means[i]
|
| tmp_xx = N, N*tmp_x_means \ N*tmp_x_means' , tmp_xx
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| eigensystem(tmp_xx, V=., d=.)
|
| inv_xx[|offset + 1, 1 \ offset + FullK , . |] = makesymmetric(Re(V * diag(1:/sqrt(d)) * V'))
|
| }
|
| }
|
|
|
| if (this.has_intercept) ::swap(x_means, this.x_means)
|
| ::swap(inv_xx, this.inv_xx)
|
| }
|
|
|
|
|
| `Void' FE_Factor::mult(`Vector' coefs, `String' suggested_preconditioner, `Vector' ans)
|
| {
|
|
|
|
|
|
|
| `Integer' NI, NS, NN, FullK
|
| `Boolean' intercept_only
|
| `Matrix' tmp
|
|
|
| NI = num_levels * has_intercept
|
| NS = num_levels * num_slopes
|
| NN = NI + NS
|
| FullK = has_intercept + num_slopes
|
| assert(num_levels * FullK == NN)
|
|
|
| intercept_only = this.has_intercept& !this.num_slopes
|
| this.preconditioner = (FullK == 1 & suggested_preconditioner == "block_diagonal") ? "diagonal" : suggested_preconditioner
|
|
|
| if (this.preconditioner == "none") {
|
| if (intercept_only) {
|
| ans = ans + (this.num_levels > 1 ? coefs[this.levels] : coefs[this.levels, .])
|
| }
|
| else if (!this.has_intercept) {
|
| ans = ans + rowsum( inv_vec(coefs, FullK)[this.levels, .] :* this.unsorted_x )
|
| }
|
| else {
|
| ans = ans + rowsum(inv_vec(coefs, FullK)[this.levels, .] :* (J(this.num_obs, 1, 1), this.unsorted_x))
|
| }
|
| }
|
| else if (this.preconditioner == "diagonal") {
|
| if (this.has_intercept) {
|
| ans = ans + (coefs[|1\NI|] :* this.preconditioner_intercept)[this.levels, .]
|
| }
|
| if (this.num_slopes) {
|
| ans = ans + rowsum( (inv_vec(coefs[|NI+1\NN|], this.num_slopes) :* this.preconditioner_slopes)[this.levels, .] :* this.unsorted_x )
|
| }
|
| }
|
| else if (this.preconditioner == "block_diagonal") {
|
| assert_msg(this.has_intercept + this.num_slopes > 1)
|
| FullK = has_intercept + num_slopes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| tmp = inv_vec(coefs, FullK)
|
|
|
| tmp = tmp # J(FullK, 1, 1)
|
| tmp = this.inv_xx :* tmp
|
| tmp = rowsum(tmp)
|
| assert(rows(tmp) == num_levels * FullK)
|
| assert(cols(tmp) == 1)
|
|
|
|
|
|
|
| tmp = inv_vec(tmp, FullK)
|
| assert(rows(tmp) == num_levels)
|
| assert(cols(tmp) == FullK)
|
| tmp = tmp[this.levels, .]
|
| assert(rows(tmp) == this.num_obs)
|
| if (this.has_intercept) {
|
| tmp = tmp :* (J(this.num_obs, 1, 1) , this.unsorted_x)
|
| }
|
| else {
|
| tmp = tmp :* this.unsorted_x
|
| }
|
|
|
| tmp = rowsum(tmp)
|
|
|
| ans = ans + tmp
|
| }
|
| else {
|
| _error(3498, sprintf("invalid preconditioner %s", this.preconditioner))
|
| }
|
| }
|
|
|
|
|
| `Vector' FE_Factor::mult_transpose(`Vector' y, `String' suggested_preconditioner)
|
| {
|
| `Integer' NI, NS, N, FullK
|
| `Matrix' alphas
|
| `Boolean' intercept_only
|
|
|
|
|
| NI = num_levels * has_intercept
|
| NS = num_levels * num_slopes
|
| N = NI + NS
|
| FullK = has_intercept + num_slopes
|
|
|
| intercept_only = this.has_intercept& !this.num_slopes
|
| this.preconditioner = (FullK == 1 & suggested_preconditioner == "block_diagonal") ? "diagonal" : suggested_preconditioner
|
|
|
| assert(num_levels * FullK == N)
|
|
|
|
|
|
|
|
|
| if (this.preconditioner == "none") {
|
|
|
| if (intercept_only) {
|
| alphas = this.panelsum(y, 1)
|
| }
|
| else if (!this.has_intercept) {
|
| alphas = fast_vec( this.panelsum(y :* this.unsorted_x, 1) )
|
| }
|
| else {
|
| alphas = fast_vec( this.panelsum(y :* (J(this.num_obs, 1, 1), this.unsorted_x), 1) )
|
| }
|
|
|
|
|
| }
|
| else if (this.preconditioner == "diagonal") {
|
| alphas = J(N, 1, .)
|
| if (this.has_intercept) {
|
| alphas[|1\NI|] = this.panelsum(y, 1) :* this.preconditioner_intercept
|
| }
|
| if (this.num_slopes) {
|
| alphas[|NI+1\N|] = fast_vec( this.panelsum(y :* this.unsorted_x, 1) :* this.preconditioner_slopes )
|
| }
|
| }
|
| else if (this.preconditioner == "block_diagonal") {
|
| assert_msg(this.has_intercept + this.num_slopes > 1)
|
|
|
|
|
|
|
|
|
|
|
| if (this.has_intercept) {
|
| alphas = this.panelsum( (y, y :* this.unsorted_x) , 1)
|
| }
|
| else {
|
| alphas = this.panelsum(y :* this.unsorted_x, 1)
|
| }
|
|
|
| assert(rows(alphas) == num_levels)
|
| assert(cols(alphas) == FullK)
|
|
|
|
|
| alphas = alphas # J(FullK, 1, 1)
|
| assert(rows(alphas) == FullK * num_levels)
|
| assert(cols(alphas) == FullK)
|
| alphas = this.inv_xx :* alphas
|
| alphas = rowsum(alphas)
|
| assert(rows(alphas) == num_levels * FullK)
|
| assert(cols(alphas) == 1)
|
| }
|
| else {
|
| _error(3498, sprintf("invalid preconditioner %s", this.preconditioner))
|
| }
|
| return(alphas)
|
| }
|
|
|
|
|
|
|
| `Void' FE_Factor::undo_preconditioning(`Matrix' alphas)
|
| {
|
| `Integer' FullK, num_rows, num_cols
|
| `Boolean' intercept_only
|
|
|
| assert_in(this.preconditioner, ("none", "diagonal", "block_diagonal"))
|
|
|
| FullK = has_intercept + num_slopes
|
|
|
| if (this.preconditioner == "none") {
|
|
|
| }
|
| else if (this.preconditioner == "diagonal") {
|
| if (this.has_intercept & !this.num_slopes) {
|
| alphas = alphas :* this.preconditioner_intercept
|
| }
|
| else if (!this.has_intercept & this.num_slopes) {
|
| alphas = alphas :* this.preconditioner_slopes
|
| }
|
| else {
|
| alphas = alphas :* (this.preconditioner_intercept , this.preconditioner_slopes )
|
| }
|
| }
|
| else if (this.preconditioner == "block_diagonal") {
|
| num_rows = rows(alphas)
|
| num_cols = cols(alphas)
|
| alphas = alphas # J(FullK, 1, 1)
|
| alphas = alphas :* this.inv_xx
|
| alphas = rowsum(alphas)
|
| alphas = inv_vec(alphas, num_cols)
|
|
|
| assert(num_rows == rows(alphas))
|
| assert(num_cols == cols(alphas))
|
| }
|
|
|
| }
|
|
|
|
|
| `Void' FE_Factor::cleanup_before_saving()
|
| {
|
| super.cleanup_before_saving()
|
|
|
|
|
| }
|
|
|
|
|
| `FE_Factor' fe_factor(`Varlist' varnames,
|
| | `DataCol' touse,
|
| `Boolean' verbose,
|
| `String' method,
|
| `Boolean' sort_levels,
|
| `Boolean' count_levels,
|
| `Integer' hash_ratio,
|
| `Boolean' save_keys)
|
| {
|
| `Factor' F
|
| `FE_Factor' FE_F
|
|
|
| if (args()<2) touse = ""
|
| F = factor(varnames, touse, verbose, method, sort_levels, count_levels, hash_ratio, save_keys)
|
| F.swap(FE_F)
|
| return(FE_F)
|
| }
|
|
|
|
|
| `FE_Factor' _fe_factor(`DataFrame' data,
|
| | `Boolean' integers_only,
|
| `Boolean' verbose,
|
| `String' method,
|
| `Boolean' sort_levels,
|
| `Boolean' count_levels,
|
| `Integer' hash_ratio,
|
| `Boolean' save_keys,
|
| `Varlist' vars,
|
| `DataCol' touse)
|
| {
|
| `Factor' F
|
| `FE_Factor' FE_F
|
| F = _factor(data, integers_only, verbose, method, sort_levels, count_levels, hash_ratio, save_keys, vars, touse)
|
| F.swap(FE_F)
|
| return(FE_F)
|
| }
|
|
|
| `FE_Factor' fe_join_factors(`FE_Factor' F1,
|
| `FE_Factor' F2,
|
| | `Boolean' count_levels,
|
| `Boolean' save_keys,
|
| `Boolean' levels_as_keys)
|
| {
|
|
|
|
|
|
|
|
|
|
|
|
|
| return(join_factors(F1, F2, count_levels, save_keys, levels_as_keys))
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| class BipartiteGraph
|
| {
|
|
|
| `Boolean' verbose
|
| `Integer' N
|
| `Integer' N1
|
| `Integer' N2
|
| `Integer' N12
|
| `FE_FactorPointer' PF1
|
| `FE_FactorPointer' PF2
|
| `Factor' F12
|
| `Factor' F12_1
|
| `Factor' F12_2
|
|
|
|
|
| `Vector' queue
|
| `Vector' stack
|
| `Vector' keys1_by_2
|
| `Vector' keys2_by_1
|
| `Integer' num_subgraphs
|
| `Variable' subgraph_id
|
|
|
|
|
| `Vector' cores
|
| `Vector' drop_order
|
|
|
|
|
| `Integer' N_drop
|
| `Variable' mask
|
| `Boolean' prune
|
| `Vector' drop2idx
|
| `Matrix' drop2info
|
| `Variable' sorted_w
|
| `Boolean' has_weights
|
| `Variable' sorted_true_weight
|
|
|
|
|
| `Void' init()
|
| `Real' init_zigzag()
|
| `Void' compute_cores()
|
| `Void' prune_1core()
|
| `Variables' expand_1core()
|
| `Variables' partial_out()
|
| `Variables' __partial_out_map()
|
| `Variables' __partial_out_laplacian()
|
|
|
| `Void' cleanup_before_saving()
|
| }
|
|
|
|
|
| `Void' BipartiteGraph::init(`FE_FactorPointer' PF1,
|
| `FE_FactorPointer' PF2,
|
| `Boolean' verbose)
|
| {
|
| if (verbose>0) {
|
| printf("\n{txt}# Initializing bipartite graph\n")
|
| printf(" - FE #1: {res}%s{txt}\n", invtokens((*PF1).varlist))
|
| printf(" - FE #2: {res}%s{txt}\n", invtokens((*PF2).varlist))
|
| }
|
| this.verbose = verbose
|
| this.PF1 = PF1
|
| this.PF2 = PF2
|
|
|
| assert( (*PF1).num_obs == (*PF2).num_obs )
|
|
|
| N = (*PF1).num_obs
|
| N1 = (*PF1).num_levels
|
| N2 = (*PF2).num_levels
|
|
|
| N12 = N1 + N2
|
| (*PF1).panelsetup()
|
| (*PF2).panelsetup()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if (verbose>0) printf("{txt} - computing F12: ")
|
|
|
| F12 = fe_join_factors((*PF1), (*PF2), ., ., 1)
|
| if (verbose>0) printf("{txt} edges found: {res}%-10.0gc{txt}\n", F12.num_levels)
|
| F12.panelsetup()
|
|
|
| if (verbose>0) printf("{txt} - computing F12_1:")
|
|
|
| F12_1 = _fe_factor(F12.keys[., 1], 1, 0, "", 1, 1, ., 0)
|
| if (verbose>0) printf("{txt} edges found: {res}%-10.0gc{txt}\n", F12_1.num_levels)
|
| F12_1.panelsetup()
|
|
|
| if (verbose>0) printf("{txt} - computing F12_2:")
|
| F12_2 = _fe_factor(F12.keys[., 2], 1, 0, "", 1, 1, ., 0)
|
| if (verbose>0) printf("{txt} edges found: {res}%-10.0gc{txt}\n", F12_2.num_levels)
|
| F12_2.panelsetup()
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Real' BipartiteGraph::init_zigzag(| `Boolean' save_subgraphs)
|
| {
|
| `Vector' counter1
|
| `Vector' counter2
|
| `Vector' done1
|
| `Vector' done2
|
|
|
| `Integer' i_stack
|
| `Integer' last_i
|
| `Integer' start_j
|
| `Integer' i_queue
|
| `Integer' id
|
| `Integer' j
|
| `Integer' k
|
| `Integer' c
|
| `Integer' i
|
|
|
| `Matrix' matches
|
|
|
| if (verbose>0) printf("\n{txt}# Initializing zigzag iterator for bipartite graph\n")
|
| assert(F12_1.panel_is_setup == 1)
|
| assert(F12_2.panel_is_setup == 1)
|
|
|
|
|
|
|
| if (args()==0 | save_subgraphs==.) save_subgraphs = 0
|
| if (save_subgraphs) {
|
| subgraph_id = J(N2, 1, .)
|
| }
|
|
|
| queue = J(N12, 1, 0)
|
| stack = J(F12.num_levels + N12, 1, .)
|
| counter1 = J(N1, 1, 0)
|
| counter2 = J(N2, 1, 0)
|
|
|
| keys1_by_2 = F12_2.sort(F12.keys[., 1])
|
| keys2_by_1 = F12_1.sort(F12.keys[., 2])
|
| done1 = J(N1, 1, 0)
|
| done2 = J(N2, 1, 0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| i_stack = 0
|
| last_i = 0
|
| start_j = 1
|
| num_subgraphs = 0
|
|
|
| for (i_queue=1; i_queue<=N12; i_queue++) {
|
| id = queue[i_queue]
|
| j = k = .
|
|
|
|
|
| if (id == 0) {
|
| assert(last_i + 1 == i_queue)
|
| for (j=start_j; j<=N1; j++) {
|
| if (!done1[j]) {
|
| queue[i_queue] = id = j
|
| start_j = j + 1
|
| ++last_i
|
| break
|
| }
|
| }
|
|
|
| ++num_subgraphs
|
| assert(id != 0)
|
| }
|
|
|
| if (id > 0) {
|
|
|
| j = id
|
| done1[j] = 1
|
| matches = panelsubmatrix(keys2_by_1, j, F12_1.info)
|
| for (i=1; i<=rows(matches); i++) {
|
| k = matches[i]
|
| c = counter2[k]
|
| counter2[k] = c + 1
|
| if (!done2[k]) {
|
| if (!c) {
|
| queue[++last_i] = -k
|
| }
|
| stack[++i_stack] = k
|
| }
|
| }
|
| stack[++i_stack] = 0
|
| }
|
| else {
|
|
|
| k = -id
|
| done2[k] = 1
|
| matches = panelsubmatrix(keys1_by_2, k, F12_2.info)
|
| for (i=1; i<=rows(matches); i++) {
|
| j = matches[i]
|
| c = counter1[j]
|
| counter1[j] = c + 1
|
| if (!done1[j]) {
|
| if (!c) {
|
| queue[++last_i] = j
|
| }
|
| stack[++i_stack] = j
|
| }
|
| }
|
| stack[++i_stack] = 0
|
| if (save_subgraphs) subgraph_id[k] = num_subgraphs
|
| }
|
| }
|
|
|
|
|
| assert(counter1 == F12_1.counts)
|
| assert(counter2 == F12_2.counts)
|
| assert(!anyof(queue, 0))
|
| assert(allof(done1, 1))
|
| assert(allof(done2, 1))
|
| assert(!missing(queue))
|
| assert(!missing(stack))
|
|
|
| if (save_subgraphs) subgraph_id = subgraph_id[(*PF2).levels]
|
|
|
| if (verbose>0) printf("{txt} - disjoint subgraphs found: {res}%g{txt}\n", num_subgraphs)
|
| return(num_subgraphs)
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Void' BipartiteGraph::compute_cores()
|
| {
|
| `Factor' Fbin
|
| `Boolean' is_firm
|
| `Integer' M, ND, j, jj
|
| `Integer' i_v, i_u, i_w
|
| `Integer' pv, pu, pw
|
| `Integer' v, u, w
|
| `Integer' dv, du
|
| `Vector' bin, deg, pos, invpos, vert, neighbors
|
|
|
| if (verbose>0) printf("{txt}# Computing vertex core numbers\n\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| assert_msg(F12_1.panel_is_setup==1, "F12_1 not set up")
|
| assert_msg(F12_2.panel_is_setup==1, "F12_2 not set up")
|
| assert_msg(rows(queue)==N12, "Wrong number of rows in queue")
|
| assert_msg(rows(keys1_by_2)==F12.num_levels, "Wrong number of rows in keys1")
|
| assert_msg(rows(keys2_by_1)==F12.num_levels, "Wrong number of rows in keys2")
|
|
|
| deg = F12_1.counts \ F12_2.counts
|
| ND = max(deg)
|
|
|
| Fbin = _factor(deg, 1, 0)
|
| Fbin.panelsetup()
|
|
|
| bin = J(ND, 1, 0)
|
| bin[Fbin.keys] = Fbin.counts
|
| bin = rows(bin) > 1 ? runningsum(1 \ bin[1..ND-1]) : 1
|
|
|
| pos = Fbin.p
|
| invpos = invorder(Fbin.p)
|
|
|
| vert = Fbin.sort(( (1::N1) \ (-1::-N2) ))
|
|
|
| for (i_v=1; i_v<=N12; i_v++) {
|
| v = vert[i_v]
|
| is_firm = (v > 0)
|
|
|
| neighbors = is_firm ? panelsubmatrix(keys2_by_1, v, F12_1.info) : panelsubmatrix(keys1_by_2, -v, F12_2.info)
|
| M = rows(neighbors)
|
|
|
| for (j=1; j<=M; j++) {
|
| pv = pos[i_v]
|
| jj = neighbors[j]
|
| pu = is_firm ? N1 + jj : jj
|
| dv = deg[pv]
|
| du = deg[pu]
|
|
|
| if (dv < du) {
|
| i_w = bin[du]
|
| w = vert[i_w]
|
| u = is_firm ? -jj : jj
|
| if (u != w) {
|
| pw = pos[i_w]
|
| i_u = invpos[pu]
|
| pos[i_u] = pw
|
| pos[i_w] = pu
|
| vert[i_u] = w
|
| vert[i_w] = u
|
| invpos[pu] = i_w
|
| invpos[pw] = i_u
|
| }
|
| bin[du] = bin[du] + 1
|
| deg[pu] = deg[pu] - 1
|
| }
|
| }
|
| }
|
|
|
| if (verbose>0) {
|
|
|
| Fbin = _factor(deg, 1, 0)
|
|
|
| mm_matlist(Fbin.counts, "%-8.0gc", 2, strofreal(Fbin.keys), "Freq.", "Core #")
|
| printf("\n")
|
| }
|
|
|
|
|
|
|
|
|
| swap(this.drop_order, vert)
|
| swap(this.cores, deg)
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Void' BipartiteGraph::prune_1core(| `Variable' weight)
|
| {
|
| `Integer' N_drop2, i, j, i1, i2, j1, j2, K_drop2
|
| `Vector' drop1, drop2
|
| `Vector' tmp_mask
|
| `Vector' proj1, proj2
|
| `Variable' w, tmp_weight
|
|
|
| has_weights = (args()>0 & rows(weight) > 1)
|
| if (has_weights) sorted_true_weight = (*PF1).sort(weight)
|
| tmp_weight = has_weights ? weight : J(N, 1, 1)
|
|
|
| N_drop = sum(cores :== 1)
|
| if (!N_drop) {
|
| if (verbose>0) printf("{txt} - no 1-core vertices found\n")
|
| prune = 0
|
| return
|
| }
|
| if (verbose>0) printf("{txt} - 1-core vertices found: {res}%g{txt}\n", N_drop)
|
|
|
| drop_order = drop_order[1..N_drop]
|
| drop1 = selectindex(cores[1..N1] :== 1)
|
| cores = .
|
| drop1 = (1::N1)[drop1]
|
| drop2 = -select(drop_order, drop_order:<0)
|
|
|
| K_drop2 = rows(drop2)
|
| N_drop2 = K_drop2 ? sum((*PF2).info[drop2, 2] :- (*PF2).info[drop2, 1] :+ 1) : 0
|
|
|
| tmp_mask = J(N1, 1, 0)
|
| if (rows(drop1)) tmp_mask[drop1] = J(rows(drop1), 1, 1)
|
| mask = tmp_mask[(*PF1).levels, 1]
|
| tmp_mask = J(N2, 1, 0)
|
| if (K_drop2) tmp_mask[drop2] = J(K_drop2, 1, 1)
|
| mask = mask :| tmp_mask[(*PF2).levels, 1]
|
| tmp_mask = .
|
|
|
| drop2idx = J(N_drop2, 1, .)
|
| drop2info = J(N2, 2, .)
|
|
|
| j1 = 1
|
| for (i=1; i<=K_drop2; i++) {
|
| j = drop2[i]
|
| i1 = (*PF2).info[j, 1]
|
| i2 = (*PF2).info[j, 2]
|
|
|
| j2 = j1 + i2 - i1
|
| drop2idx[j1::j2] = i1::i2
|
| drop2info[j, .] = (j1, j2)
|
| j1 = j2 + 1
|
| }
|
|
|
| if (!(*PF2).is_sorted) {
|
| assert(((*PF2).p != J(0, 1, .)))
|
| drop2idx = (*PF2).p[drop2idx, .]
|
| }
|
|
|
| if (!(*PF1).is_sorted) {
|
| assert(((*PF1).inv_p != J(0, 1, .)))
|
| drop2idx = invorder((*PF1).p)[drop2idx, .]
|
| }
|
|
|
|
|
|
|
|
|
| tmp_weight[selectindex(mask)] = J(sum(mask), 1, 0)
|
|
|
|
|
| PF1->set_weights(tmp_weight)
|
| PF2->set_weights(tmp_weight)
|
| tmp_weight = .
|
|
|
|
|
| sorted_w = J(N, 1, 1)
|
|
|
| proj1 = PF1->panelmean(sorted_w)[PF1->levels, .]
|
| proj2 = PF2->panelmean(sorted_w)[PF2->levels, .]
|
|
|
| sorted_w = ((sorted_w - proj1) :!= 1) :| ((sorted_w - proj2) :!= 1)
|
| proj1 = proj2 = .
|
| sorted_w = (*PF1).sort(sorted_w)
|
|
|
| prune = 1
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Variables' BipartiteGraph::expand_1core(`Variables' y)
|
| {
|
| `Boolean' zero_weights
|
| `Variable' sorted_y
|
| `Integer' i, j, j1, j2, i2, k1, k2, nk
|
| `Matrix' tmp_y
|
| `Vector' tmp_w, tmp_idx, new_w
|
| `RowVector' tmp_mean
|
|
|
| if (prune==0) return(y)
|
| if (verbose>0) printf("{txt}# Expanding 2-core into original dataset\n\n")
|
| assert(N_drop == rows(drop_order))
|
|
|
| sorted_y = (*PF1).sort(y)
|
|
|
| i2 = 0
|
| for (i=N_drop; i>=1; i--) {
|
| j = drop_order[i]
|
| if (j > 0) {
|
| j1 = (*PF1).info[j, 1]
|
| j2 = (*PF1).info[j, 2]
|
|
|
| tmp_y = sorted_y[| j1 , 1 \ j2 , . |]
|
| tmp_w = sorted_w[|j1, 1 \ j2, .|]
|
| new_w = has_weights ? sorted_true_weight[|j1, 1 \ j2, .|] : J(j2-j1+1, 1, 1)
|
| zero_weights = !sum(tmp_w)
|
| if (!zero_weights) {
|
| tmp_mean = mean(tmp_y, tmp_w)
|
| assert(!missing(tmp_mean))
|
| sorted_y[| j1 , 1 \ j2 , . |] = tmp_y :- tmp_mean
|
| }
|
| sorted_w[| j1 , 1 \ j2 , 1 |] = new_w
|
| }
|
| else {
|
| ++i2
|
| j1 = drop2info[-j, 1]
|
| j2 = drop2info[-j, 2]
|
| tmp_idx = drop2idx[| j1 , 1 \ j2 , 1 |]
|
| tmp_y = sorted_y[tmp_idx, .]
|
| tmp_w = sorted_w[tmp_idx]
|
| zero_weights = !sum(tmp_w)
|
| if (zero_weights) {
|
| tmp_w = has_weights ? sorted_true_weight[tmp_idx] : J(j2-j1+1, 1, 1)
|
| }
|
| tmp_mean = mean(tmp_y, tmp_w)
|
| assert(!missing(tmp_mean))
|
| nk = rows(tmp_idx)
|
| for (k1=1; k1<=nk; k1++) {
|
| k2 = tmp_idx[k1]
|
| sorted_y[k2, .] = sorted_y[k2, .] - tmp_mean
|
| sorted_w[k2] = has_weights ? sorted_true_weight[k2] : 1
|
| }
|
| }
|
| }
|
|
|
| if (verbose>0) printf("{txt} - number of coefficients solved triangularly: {res}%s{txt}\n", strofreal(rows(drop_order)))
|
| return((*PF1).invsort(sorted_y))
|
| }
|
|
|
|
|
| `Variables' BipartiteGraph::partial_out(`Variables' y)
|
| {
|
|
|
| }
|
|
|
|
|
| `Variables' BipartiteGraph::__partial_out_map(`Variables' y)
|
| {
|
|
|
| }
|
|
|
|
|
| `Variables' BipartiteGraph::__partial_out_laplacian(`Variables' y)
|
| {
|
|
|
| }
|
|
|
|
|
| `Void' BipartiteGraph::cleanup_before_saving()
|
| {
|
| this.F12.cleanup_before_saving()
|
| this.F12_1.cleanup_before_saving()
|
| this.F12_2.cleanup_before_saving()
|
|
|
|
|
|
|
|
|
|
|
|
|
| PF1 = NULL
|
| PF2 = NULL
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| class Individual_Factor extends FE_Factor
|
| {
|
|
|
| `String' aggregate_function
|
| `FE_Factor' FG, FI
|
| `BipartiteGraph' bg
|
| `Integer' verbose
|
| `Vector' group_p, group_inv_p
|
|
|
|
|
| `Void' new()
|
| virtual `Vector' drop_singletons()
|
| virtual `Void' init_diagonal_preconditioner()
|
| virtual `Void' init_block_diag_preconditioner()
|
| virtual `Void' mult()
|
| virtual `Vector' mult_transpose()
|
| virtual `Void' undo_preconditioning()
|
| virtual `Matrix' panelsum()
|
| virtual `Void' set_weights()
|
|
|
| virtual `Void' drop_obs()
|
| virtual `Void' panelsetup()
|
| `DataFrame' sort()
|
|
|
| virtual `Void' cleanup_before_saving()
|
| }
|
|
|
|
|
| `Void' Individual_Factor::new()
|
| {
|
| is_individual_fe = 1
|
| }
|
|
|
|
|
| `Vector' Individual_Factor::drop_singletons(`Vector' sample, `Vector' indiv_sample)
|
| {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if (verbose>2) printf("{txt} - running individual_factor.drop_singletons()\n")
|
| `Vector' mask, drop_mask, idx, indiv_idx
|
| `Integer' N_drop
|
|
|
|
|
| assert(bg.F12_1.panel_is_setup==1)
|
| assert(allof(bg.F12_1.counts, 2))
|
| bg.compute_cores()
|
| assert(rows(bg.cores) == FG.num_levels + FI.num_levels)
|
|
|
|
|
| drop_mask = bg.cores[1..FG.num_levels] :== 1
|
| drop_mask = drop_mask[FG.levels]
|
| assert_msg(rows(drop_mask) == rows(indiv_sample))
|
| N_drop = sum(drop_mask)
|
| if (verbose>2) printf("{txt} - 1-core vertices found (i.e. singleton individuals): {res}%g{txt}\n", N_drop)
|
|
|
|
|
|
|
|
|
|
|
| if (N_drop) {
|
|
|
|
|
|
|
|
|
|
|
| mask = J(st_nobs(), 1, 0)
|
| mask[sample] = 1::rows(sample)
|
| mask = mask :* create_mask(st_nobs(), 0, select(indiv_sample, drop_mask), 1)
|
| idx = select(mask, mask)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| indiv_idx = selectindex(drop_mask)
|
|
|
| if (this.FG.num_levels == rows(idx)) {
|
|
|
| printf("{err}insufficient observations (dropped %f singletons due to individual fixed effects; check input to indiv() option)\n", rows(idx))
|
| exit(2001)
|
| }
|
| drop_obs(indiv_idx)
|
| }
|
| else {
|
| idx = J(0, 1, .)
|
| }
|
|
|
| return(idx)
|
| }
|
|
|
|
|
| `DataFrame' Individual_Factor::sort(`DataFrame' data)
|
| {
|
| return(this.FI.sort(data))
|
| }
|
|
|
|
|
| `Void' Individual_Factor::drop_obs(`Vector' idx)
|
| {
|
| this.FG.drop_obs(idx)
|
| this.FI.drop_obs(idx)
|
|
|
|
|
| bg = BipartiteGraph()
|
| bg.init(&FG, &FI, verbose-2)
|
| (void) bg.init_zigzag(0)
|
| assert(rows(bg.F12_1.counts) == bg.F12_1.num_levels)
|
| if (verbose>2) printf("\n{txt}# Tricking Bipartite() into dropping singletons instead of doing 1-core prunning\n\n")
|
| bg.F12_1.counts = J(bg.F12_1.num_levels, 1, 2)
|
| this.num_levels = this.FI.num_levels
|
| this.num_obs = this.FG.num_levels
|
|
|
| this.group_p = this.group_inv_p = .
|
| }
|
|
|
|
|
| `Void' Individual_Factor::panelsetup()
|
| {
|
|
|
| this.FG.panelsetup()
|
| this.FI.panelsetup()
|
| assert(this.bg.F12_1.panel_is_setup==1)
|
| assert(this.bg.F12_2.panel_is_setup==1)
|
| }
|
|
|
|
|
| `Matrix' Individual_Factor::panelsum(`Matrix' x, | `Boolean' sort_input, `Boolean' ignore_weights)
|
| {
|
| return(this.FI.panelsum(x, sort_input, ignore_weights))
|
| }
|
|
|
|
|
| `Void' Individual_Factor::set_weights(`Vector' input_weights, | `Boolean' verbose)
|
| {
|
| if (args()<2 | verbose == .) verbose = 0
|
| if (verbose > 0) printf("{txt} - sorting weights for indiv. factor {res}%s{txt}\n", this.absvar)
|
|
|
| assert_msg(this.has_weights == 0)
|
| assert_msg(this.weights == 1)
|
| assert_msg(this.weighted_counts == .)
|
| assert_msg(this.FG.weights == 1)
|
| assert_msg(this.FI.weights == 1)
|
| assert_msg(this.FI.num_obs == rows(input_weights))
|
|
|
| this.has_weights = this.FG.has_weights = this.FI.has_weights = 1
|
| this.weights = this.FG.weights = this.FI.weights = this.FI.sort(input_weights)
|
| this.weighted_counts = this.FI.panelsum(this.weights, 0, 1)
|
| }
|
|
|
|
|
| `Void' Individual_Factor::init_diagonal_preconditioner()
|
| {
|
| `Matrix' precond
|
| `Matrix' sorted_x
|
|
|
| assert(is_individual_fe == 1)
|
|
|
| if (this.has_weights) {
|
| assert(rows(this.weighted_counts) == this.FI.num_levels)
|
| }
|
|
|
| if (this.has_intercept) {
|
| precond = this.has_weights ? this.weighted_counts : this.FI.counts
|
| this.preconditioner_intercept = sqrt(1 :/ precond)
|
| }
|
|
|
| if (this.num_slopes) {
|
| precond = this.panelsum(this.unsorted_x :^ 2, 1)
|
| this.preconditioner_slopes = 1 :/ sqrt(precond)
|
| }
|
| }
|
|
|
|
|
| `Void' Individual_Factor::init_block_diag_preconditioner(`String' weight_type, `Real' weights_scale, `Vector' true_weights)
|
| {
|
| assert(0)
|
|
|
| }
|
|
|
|
|
| `Void' Individual_Factor::mult(`Vector' coefs, `String' suggested_preconditioner, `Vector' ans)
|
| {
|
|
|
|
|
| `Integer' NI, NS, NN, FullK
|
| `Boolean' intercept_only
|
| `Matrix' tmp
|
| `Vector' expanded_coefs, sum_coefs
|
|
|
| NI = num_levels * has_intercept
|
| NS = num_levels * num_slopes
|
| NN = NI + NS
|
| FullK = has_intercept + num_slopes
|
|
|
| assert(num_levels == FI.num_levels)
|
| assert(num_levels * FullK == NN)
|
| assert(aggregate_function == "sum" | aggregate_function == "mean")
|
| assert(rows(coefs) == NN)
|
| assert(rows(ans) == FG.num_levels)
|
|
|
| intercept_only = this.has_intercept & !this.num_slopes
|
| this.preconditioner = (FullK == 1 & suggested_preconditioner == "block_diagonal") ? "diagonal" : suggested_preconditioner
|
|
|
|
|
| assert(this.preconditioner == "none" | preconditioner == "diagonal")
|
|
|
| if (this.preconditioner == "none") {
|
|
|
| if (intercept_only) {
|
| expanded_coefs = this.FI.num_levels > 1 ? coefs[this.FI.levels] : coefs[this.FI.levels, .]
|
| sum_coefs = this.FG.panelsum(expanded_coefs, 1)
|
| if (aggregate_function == "mean") {
|
| sum_coefs = sum_coefs :/ this.FG.counts
|
| }
|
| }
|
| else {
|
| expanded_coefs = inv_vec(coefs, FullK)[this.FI.levels, .]
|
| sum_coefs = rowsum(expanded_coefs :* (J(this.FG.num_obs, this.has_intercept, 1), this.unsorted_x))
|
| sum_coefs = this.FG.panelsum(sum_coefs, 1)
|
| if (aggregate_function == "mean") {
|
| sum_coefs = sum_coefs :/ this.FG.counts
|
| }
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| }
|
| else if (this.preconditioner == "diagonal") {
|
| sum_coefs = J(this.num_obs, 1, 0)
|
|
|
| if (this.has_intercept) {
|
| expanded_coefs = (coefs[|1\NI|] :* this.preconditioner_intercept)[this.FI.levels, .]
|
| sum_coefs = sum_coefs + this.FG.panelsum(expanded_coefs, 1)
|
| }
|
|
|
| if (this.num_slopes) {
|
| expanded_coefs = inv_vec(coefs[|NI+1\NN|], this.num_slopes)
|
| expanded_coefs = (expanded_coefs :* this.preconditioner_slopes)[this.FI.levels, .] :* this.unsorted_x
|
| if (this.num_slopes > 1) expanded_coefs = rowsum(expanded_coefs)
|
| sum_coefs = sum_coefs + this.FG.panelsum(expanded_coefs, 1)
|
| }
|
|
|
| if (aggregate_function == "mean") {
|
| sum_coefs = sum_coefs :/ this.FG.counts
|
| }
|
| }
|
| else if (this.preconditioner == "block_diagonal") {
|
| assert(0)
|
| assert_msg(this.has_intercept + this.num_slopes > 1)
|
| FullK = has_intercept + num_slopes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| }
|
| else {
|
| _error(3498, sprintf("invalid preconditioner %s", this.preconditioner))
|
| }
|
|
|
| ans = ans + (this.FG.is_sorted ? sum_coefs : sum_coefs[group_inv_p])
|
| }
|
|
|
|
|
| `Vector' Individual_Factor::mult_transpose(`Vector' y, `String' suggested_preconditioner)
|
| {
|
|
|
|
|
|
|
|
|
|
|
| `Integer' NI, NS, N, FullK
|
| `Matrix' alphas
|
| `Boolean' intercept_only
|
| `Vector' sorted_y, expanded_y
|
|
|
| NI = num_levels * has_intercept
|
| NS = num_levels * num_slopes
|
| N = NI + NS
|
| FullK = has_intercept + num_slopes
|
|
|
| intercept_only = this.has_intercept & !this.num_slopes
|
| this.preconditioner = (FullK == 1 & suggested_preconditioner == "block_diagonal") ? "diagonal" : suggested_preconditioner
|
|
|
| assert_msg(num_levels * FullK == N, "error1")
|
| assert_msg(num_levels == this.FI.num_levels, sprintf("assertion failed: num_levels (%f) ≠ FI.num_levels (%f)", num_levels, FI.num_levels))
|
| assert_msg(FG.num_levels == rows(y), "error3", 3456)
|
| assert_msg(aggregate_function == "sum" | aggregate_function == "mean", "error4")
|
|
|
|
|
| assert_msg(this.preconditioner == "none" | this.preconditioner == "diagonal", "error5")
|
|
|
|
|
| sorted_y = this.FG.is_sorted ? y : y[this.group_p]
|
|
|
|
|
|
|
|
|
| if (aggregate_function == "sum") {
|
| expanded_y = sorted_y[this.FG.levels, .]
|
| }
|
| else if (aggregate_function == "mean") {
|
|
|
| expanded_y = (sorted_y :/ this.FG.counts)[this.FG.levels, .]
|
| }
|
| else {
|
| _error(3333, sprintf("aggregation function not implemented: %s\n", aggregate_function))
|
| }
|
|
|
| if (this.preconditioner == "none") {
|
| if (intercept_only) {
|
| alphas = this.FI.panelsum(expanded_y, 1)
|
| }
|
| else if (!this.has_intercept) {
|
| alphas = fast_vec(this.panelsum(expanded_y :* this.unsorted_x, 1))
|
| }
|
| else {
|
| alphas = fast_vec( this.panelsum(expanded_y :* (J(this.num_obs, 1, 1), this.unsorted_x), 1) )
|
| }
|
| }
|
| else if (this.preconditioner == "diagonal") {
|
| alphas = J(N, 1, .)
|
|
|
| if (this.has_intercept) {
|
| alphas[|1\NI|] = this.FI.panelsum(expanded_y, 1) :* this.preconditioner_intercept
|
| }
|
| if (this.num_slopes) {
|
| alphas[|NI+1\N|] = fast_vec( this.panelsum(expanded_y :* this.unsorted_x, 1) :* this.preconditioner_slopes )
|
| }
|
| }
|
| else if (this.preconditioner == "block_diagonal") {
|
| assert(0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| }
|
| else {
|
| _error(3498, sprintf("invalid preconditioner %s", this.preconditioner))
|
| }
|
| return(alphas)
|
| }
|
|
|
|
|
| `Void' Individual_Factor::undo_preconditioning(`Matrix' alphas)
|
| {
|
| assert(0)
|
| }
|
|
|
|
|
|
|
| `Void' Individual_Factor::cleanup_before_saving()
|
| {
|
| super.cleanup_before_saving()
|
| FG.cleanup_before_saving()
|
| FI.cleanup_before_saving()
|
|
|
| bg = BipartiteGraph()
|
| bg.cleanup_before_saving()
|
| }
|
|
|
|
|
| `Individual_Factor' indiv_factor(`Varname' group_id,
|
| `Varname' individual_id,
|
| `Vector' sample,
|
| `Vector' indiv_sample,
|
| `String' aggregate_function,
|
| `Boolean' verbose)
|
| {
|
| `Factor' FG, FI
|
| `Individual_Factor' F
|
| `BipartiteGraph' bg
|
|
|
| assert(aggregate_function == "sum" | aggregate_function == "mean")
|
|
|
|
|
|
|
| if (verbose>2) printf("\n{txt}# Constructing factor for group id: {res}%s{txt}\n", group_id)
|
| FG = fe_factor(group_id, indiv_sample, ., "", ., 1, ., 0)
|
|
|
|
|
| if (verbose>2) printf("\n{txt}# Constructing factor for individual id: {res}%s{txt}\n", group_id)
|
| FI = fe_factor(individual_id, indiv_sample, ., "", ., 1, ., 0)
|
|
|
|
|
| bg = BipartiteGraph()
|
| bg.init(&FG, &FI, verbose-2)
|
| (void) bg.init_zigzag(0)
|
|
|
| assert(rows(bg.F12_1.counts) == bg.F12_1.num_levels)
|
|
|
|
|
| if (verbose>2) printf("\n{txt}# Tricking Bipartite() into dropping singletons instead of doing 1-core prunning\n\n")
|
| bg.F12_1.counts = J(bg.F12_1.num_levels, 1, 2)
|
|
|
| F = Individual_Factor()
|
| F.num_levels = FI.num_levels
|
| F.num_obs = FG.num_levels
|
| F.varlist = F.absvar = FG.varlist
|
| F.aggregate_function = aggregate_function
|
| F.verbose = verbose
|
| F.group_p = order(st_data(sample, group_id), 1)
|
| F.group_inv_p = invorder(F.group_p)
|
| swap(F.FG, FG)
|
| swap(F.FI, FI)
|
| swap(F.bg, bg)
|
|
|
| return(F)
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| end
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| `Matrix' inv_vec(`Vector' x, `Integer' num_cols)
|
| {
|
|
|
|
|
| return(num_cols == 1 ? x : colshape(x, num_cols))
|
| }
|
|
|
|
|
| `Vector' fast_vec(`Matrix' x)
|
| {
|
|
|
| return(cols(x) == 1 ? x : colshape(x, 1))
|
| }
|
|
|
|
|
| `Matrix' givens_rotation(`Real' a, `Real' b, `Real' r)
|
| {
|
|
|
| `Real' c, s
|
| if (b) {
|
| r = hypot(a, b)
|
| c = a / r
|
| s = b / r
|
| }
|
| else {
|
|
|
|
|
| r = a
|
| c = 1
|
| s = 0
|
| }
|
| return(c, -s \ s, c)
|
| }
|
|
|
|
|
| `Real' hypot(`Real' x, `Real' y)
|
| {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Real' ax, ay, scale, h, delta
|
| ax = abs(x)
|
| ay = abs(y)
|
|
|
|
|
| if (ax < ay) swap(ax, ay)
|
|
|
|
|
| if (ay <= ax * sqrt(epsilon(0.5))) return(ax)
|
|
|
|
|
| scale = epsilon(sqrt(smallestdouble()))
|
| if (ax > sqrt(maxdouble()/2)) {
|
| ax = ax * scale
|
| ay = ay * scale
|
| scale = 1 / scale
|
| }
|
| else if (ay < sqrt(smallestdouble())) {
|
| ax = ax / scale
|
| ay = ay / scale
|
| }
|
| else {
|
| scale = 1
|
| }
|
| h = sqrt(ax^2+ay^2)
|
|
|
|
|
| delta = h - ax
|
| h = h - (delta*(2*(ax-ay)) + (2*delta-ay)*ay + delta^2) / (2*h)
|
| return(h*scale)
|
| }
|
|
|
|
|
| `Void' assign(`Vector' x, `Real' x1, `Real' x2)
|
| {
|
|
|
| assert(rows(x) == 2)
|
| assert_msg(!hasmissing(x), "input x has missing values")
|
| x1 = x[1]
|
| x2 = x[2]
|
| }
|
|
|
|
|
| `Vector' normalize(`Vector' x, `Vector' weights, `Real' norm, `String' msg)
|
| {
|
| `Vector' normalized_x
|
| assert(weights!=.)
|
| norm = vector_norm(x, weights, msg)
|
|
|
| if (norm < epsilon(1)) {
|
| norm = 0
|
| return(x)
|
| }
|
| else {
|
| return(x / norm)
|
| }
|
| }
|
|
|
|
|
| `Real' vector_norm(`vector' x, `Vector' weights, `String' msg)
|
| {
|
|
|
|
|
| `Real' ans
|
| if (weights != 1) assert_msg(rows(x) == rows(weights), "weights non-conforming size")
|
| ans = sqrt(quadcross(x, weights, x))
|
| assert_msg(!missing(ans), msg)
|
| return(ans)
|
|
|
|
|
|
|
| }
|
|
|
|
|
| `Variables' panelmean(`Variables' y, `FE_Factor' f)
|
| {
|
| assert(0)
|
| assert_boolean(f.has_weights)
|
| assert_msg(f.panel_is_setup, "F.panel_setup() must be run beforehand")
|
| if (f.has_weights) {
|
| return(editmissing(f.panelsum(y) :/ f.weighted_counts, 0))
|
| }
|
| else {
|
| return(f.panelsum(y) :/ f.counts)
|
| }
|
| }
|
|
|
|
|
| `Matrix' precompute_inv_xx(`FE_Factor' f)
|
| {
|
| `Integer' i, L, K, offset
|
| `Vector' tmp_x, tmp_w
|
| `Matrix' inv_xx, tmp_inv_xx, sorted_x
|
| `RowVector' tmp_xmeans
|
|
|
|
|
| assert_boolean(f.has_weights)
|
| assert_boolean(f.has_intercept)
|
|
|
| L = f.num_levels
|
| K = cols(f.unsorted_x)
|
| inv_xx = J(L * K, K, .)
|
| sorted_x = f.sort(f.unsorted_x)
|
|
|
| for (i=1; i<=L; i++) {
|
| tmp_x = panelsubmatrix(sorted_x, i, f.info)
|
| tmp_w = f.has_weights ? panelsubmatrix(f.weights, i, f.info) : 1
|
| if (f.has_intercept) {
|
| tmp_xmeans = K > 1 ? f.x_means[i, .] : f.x_means[i]
|
| tmp_inv_xx = invsym(quadcrossdev(tmp_x, tmp_xmeans, tmp_w, tmp_x, tmp_xmeans))
|
| }
|
| else {
|
| tmp_inv_xx = invsym(quadcross(tmp_x, tmp_w, tmp_x))
|
| }
|
| offset = K * (i - 1)
|
| inv_xx[|offset + 1, 1 \ offset + K , . |] = tmp_inv_xx
|
| }
|
| return(inv_xx)
|
| }
|
|
|
|
|
| `Variables' panelsolve_invsym(`Variables' y, `FE_Factor' f, | `Matrix' alphas)
|
| {
|
| `Integer' i, L, K, offset
|
| `Boolean' save_alphas
|
| `Matrix' xbd, tmp_xbd, tmp_x, tmp_y, tmp_xy, tmp_inv_xx, sorted_x
|
| `Vector' tmp_w, b
|
| `RowVector' tmp_xmeans, tmp_ymeans
|
|
|
|
|
| assert_boolean(f.has_weights)
|
| assert_boolean(f.has_intercept)
|
| assert_msg(rows(f.unsorted_x) == rows(y))
|
|
|
| save_alphas = args()>=3 & alphas!=J(0,0,.)
|
| if (save_alphas) assert(cols(y)==1)
|
|
|
| L = f.num_levels
|
| K = cols(f.unsorted_x)
|
| xbd = J(rows(y), cols(y), .)
|
| sorted_x = f.sort(f.unsorted_x)
|
|
|
| for (i=1; i<=L; i++) {
|
| tmp_y = panelsubmatrix(y, i, f.info)
|
| tmp_x = panelsubmatrix(sorted_x, i, f.info)
|
| tmp_w = f.has_weights ? panelsubmatrix(f.weights, i, f.info) : 1
|
| offset = K * (i - 1)
|
| tmp_inv_xx = f.inv_xx[|offset + 1, 1 \ offset + K , . |]
|
|
|
| if (f.has_intercept) {
|
| tmp_ymeans = mean(tmp_y, tmp_w)
|
| tmp_xmeans = K > 1 ? f.x_means[i, .] : f.x_means[i]
|
| tmp_xy = quadcrossdev(tmp_x, tmp_xmeans, tmp_w, tmp_y, tmp_ymeans)
|
| b = tmp_inv_xx * tmp_xy
|
| tmp_xbd = (tmp_x :- tmp_xmeans) * b :+ tmp_ymeans
|
| if (save_alphas) alphas[i, .] = tmp_ymeans - tmp_xmeans * b, b'
|
| }
|
| else {
|
| tmp_xy = quadcross(tmp_x, tmp_w, tmp_y)
|
| b = tmp_inv_xx * tmp_xy
|
| tmp_xbd = tmp_x * b
|
| if (save_alphas) alphas[i, .] = b'
|
| }
|
| xbd[|f.info[i,1], 1 \ f.info[i,2], .|] = tmp_xbd
|
| }
|
| return(f.invsort(xbd))
|
| }
|
|
|
|
|
| `RowVector' function compute_stdevs(`Matrix' A)
|
| {
|
| `RowVector' stdevs
|
| `Integer' K, N
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| N = rows(A)
|
| K = cols(A)
|
| stdevs = J(1, K, .)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| stdevs = sqrt( (diagonal(cross(A, A))' - (cross(1, A) :^ 2 / N)) / (N-1) )
|
| assert_msg(!missing(stdevs), "stdevs are missing; is N==1?")
|
| stdevs = colmax(( stdevs \ J(1, K, 1e-3) ))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| return(stdevs)
|
| }
|
|
|
|
|
| `RowVector' function standardize_data(`Matrix' A)
|
| {
|
| `RowVector' stdevs
|
| assert_msg(!isfleeting(A), "input cannot be fleeting")
|
| stdevs = compute_stdevs(A)
|
| A = A :/ stdevs
|
| return(stdevs)
|
| }
|
|
|
|
|
| `Variables' st_data_pool(`Vector' sample,
|
| `Varlist' vars,
|
| `Integer' poolsize,
|
| | `Boolean' compact,
|
| `Varlist' keepvars,
|
| `Boolean' verbose)
|
| {
|
| `Integer' i, j, k
|
| `Varlist' temp_keepvars
|
| `Variables' data
|
|
|
| if (args()<4 | compact == .) compact = 0
|
| if (args()<6 | verbose == .) verbose = 0
|
|
|
| k = cols(vars)
|
| assert_msg(poolsize > 0, "poolsize must be a positive integer")
|
|
|
| if (k <= poolsize) {
|
| data = st_data(sample, vars)
|
| if (compact) compact_dataset(keepvars)
|
| }
|
| else {
|
| data = J(rows(sample), 0, .)
|
| for (i=1; i<=k; i=i+poolsize) {
|
| j = i + poolsize - 1
|
| if (j>k) j = k
|
| data = data, st_data(sample, vars[i..j])
|
| if (compact) {
|
| temp_keepvars = j == k ? keepvars : keepvars , vars[j+1..k]
|
| compact_dataset(temp_keepvars)
|
| }
|
| }
|
| }
|
|
|
| assert_msg(k == cols(data), "could not load data into Mata correctly; please contact author")
|
| return(data)
|
| }
|
|
|
|
|
| `Void' compact_dataset(`Varlist' keepvars)
|
| {
|
| keepvars = tokens(invtokens(keepvars))
|
| if (cols(keepvars)) {
|
| stata(sprintf("fvrevar %s, list", invtokens(keepvars)))
|
| stata(sprintf("novarabbrev keep %s", st_global("r(varlist)")))
|
| }
|
| else {
|
| stata("clear")
|
| }
|
| }
|
|
|
|
|
| `Void' add_undocumented_options(`String' object, `String' options, `Integer' verbose)
|
| {
|
| `StringRowVector' tokens
|
| `Integer' num_tokens, i
|
| `String' key, val, cmd, msg
|
|
|
| if (options == "") return
|
| tokens = tokens(options, " ()")
|
|
|
| msg = sprintf("options {bf:%s} not allowed", options)
|
| assert_msg(!mod(cols(tokens), 4), msg, 198, 0)
|
|
|
| num_tokens = trunc(cols(tokens) / 4)
|
| for (i=0; i<num_tokens; i++) {
|
| key = tokens[4*i+1]
|
| assert_msg(tokens[4*i+2] == "(")
|
| val = tokens[4*i+3]
|
| assert_msg(tokens[4*i+4] == ")")
|
| if (verbose > 1) printf("{txt} %s.%s = %s\n", object, key, val)
|
| cmd = sprintf("cap mata: %s.%s = %s", object, key, val)
|
| stata(cmd)
|
| assert_msg(!st_numscalar("c(rc)"), sprintf("option {bf:%s} not allowed", key), 198, 0)
|
| }
|
| }
|
|
|
|
|
|
|
|
|
| `Vector' get_indiv_idx(`Vector' sample, `Vector' indiv_sample, `Varname' group_id, `Vector' idx)
|
| {
|
| `Factor' F
|
| `Vector' mask, dropped_levels, indiv_idx
|
|
|
|
|
| F = factor(group_id, indiv_sample, ., "", 1, 1, ., 0)
|
|
|
|
|
| mask = J(st_nobs(), 1, 0)
|
| mask[indiv_sample] = F.levels
|
|
|
|
|
| dropped_levels = mask[sample[idx]]
|
|
|
|
|
| mask = create_mask(F.num_levels, 0, dropped_levels, 1)
|
|
|
|
|
| indiv_idx = selectindex(mask[F.levels])
|
|
|
| return(indiv_idx)
|
| }
|
|
|
|
|
|
|
|
|
|
|
| `RowVector' safe_divide(`RowVector' numerator, `RowVector' denominator, | `Real' epsi) {
|
|
|
|
|
| if (args()<3 | epsi==.) epsi = epsilon(1)
|
| return( numerator :/ colmax(denominator \ J(1,cols(denominator),epsi)) )
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| class Solution
|
| {
|
|
|
| `RowVector' tss
|
| `RowVector' means
|
| `RowVector' stdevs
|
| `RowVector' norm2
|
| `Boolean' is_standardized
|
| `Boolean' has_intercept
|
|
|
|
|
| `Integer' stop_code
|
| `Matrix' alphas
|
| `Matrix' data
|
|
|
|
|
| `Vector' b
|
| `Matrix' V
|
| `Integer' N
|
| `Integer' K
|
| `Integer' rank
|
| `Integer' df_r
|
| `Integer' df_a
|
| `Vector' resid
|
| `RowVector' kept
|
|
|
|
|
| `String' cmd
|
| `String' cmdline
|
| `String' model
|
| `String' title
|
|
|
|
|
| `Boolean' converged
|
| `Integer' iteration_count
|
| `Varlist' extended_absvars
|
| `Integer' df_m
|
| `Integer' N_clust
|
| `Integer' N_clust_list
|
| `Real' rss
|
| `Real' rmse
|
| `Real' F
|
| `Real' tss_within
|
| `Real' sumweights
|
| `Real' r2
|
| `Real' r2_within
|
| `Real' r2_a
|
| `Real' r2_a_within
|
| `Real' ll
|
| `Real' ll_0
|
| `Real' accuracy
|
|
|
|
|
|
|
| `String' weight_var
|
| `String' weight_type
|
| `String' vcetype
|
| `Integer' num_clusters
|
| `Varlist' clustervars
|
|
|
|
|
| `Real' collinear_tol
|
| `Boolean' fast_regression
|
| `Boolean' report_constant
|
|
|
|
|
| `Varname' depvar
|
| `Varlist' fullindepvars
|
| `Varlist' fullindepvars_bn
|
| `Varlist' indepvars
|
| `RowVector' indepvar_status
|
| `Varlist' varlist
|
| `Varname' residuals_varname
|
|
|
|
|
| `Void' new()
|
| `Void' check_collinear_with_fe()
|
| `Void' expand_results()
|
| `Void' post()
|
| }
|
|
|
|
|
| `Void' Solution::new()
|
| {
|
| collinear_tol = .
|
| converged = 0
|
| stop_code = 0
|
| fast_regression = 0
|
| iteration_count = 0
|
| cmd = "reghdfe"
|
| model = "ols"
|
| title = "Linear regression"
|
| residuals_varname = ""
|
| report_constant = .
|
| }
|
|
|
|
|
|
|
| `Void' Solution::check_collinear_with_fe(`Integer' verbose)
|
| {
|
| `RowVector' is_collinear, ok_index, collinear_index
|
| `Boolean' first_is_depvar
|
| `Integer' num_collinear, i
|
|
|
|
|
|
|
|
|
|
|
|
|
| assert_msg(inrange(collinear_tol, 0, 1e-1), "error partialling out; missing values found")
|
| assert_msg(K == cols(data))
|
|
|
|
|
| if (K<=1) return
|
|
|
|
|
| is_collinear = (diagonal(cross(data, data))' :/ norm2) :<= (collinear_tol)
|
|
|
|
|
| first_is_depvar = (depvar != "") & (depvar == varlist[1])
|
| if (first_is_depvar & is_collinear[1]) {
|
| assert_msg(indepvar_status[1] == 0)
|
| if (verbose > -1) printf("{txt}warning: dependent variable {res}%s{txt} is likely perfectly explained by the fixed effects (tol =%3.1e)\n", depvar, collinear_tol)
|
| is_collinear[1] = 0
|
|
|
|
|
|
|
|
|
| data[., 1] = J(rows(data), 1, 0)
|
| }
|
|
|
|
|
| num_collinear = sum(is_collinear)
|
|
|
|
|
| if (num_collinear) {
|
| data = select(data, !is_collinear)
|
| tss = select(tss, !is_collinear)
|
| means = select(means, !is_collinear)
|
| stdevs = select(stdevs, !is_collinear)
|
| norm2 = select(norm2, !is_collinear)
|
| }
|
|
|
|
|
| ok_index = selectindex(indepvar_status:==0)
|
| collinear_index = select(ok_index, is_collinear)
|
| indepvar_status[collinear_index] = J(1, num_collinear, 2)
|
|
|
|
|
| for (i=1; i<=K; i++) {
|
| if (is_collinear[i] & verbose>-1) {
|
| printf("{txt}note: {res}%s{txt} is probably collinear with the fixed effects (all partialled-out values are close to zero; tol =%3.1e)\n", varlist[i], collinear_tol)
|
| }
|
| }
|
|
|
| K = cols(data)
|
| }
|
|
|
|
|
| `Void' Solution::expand_results(`String' bname,
|
| `String' Vname,
|
| `Integer' verbose)
|
| {
|
| `RowVector' ok_index
|
| `Integer' full_K
|
| `Vector' full_b
|
| `Matrix' full_V
|
|
|
|
|
| if (report_constant) {
|
| if (verbose > 0) printf("{txt}# Adding _cons to varlist\n")
|
| indepvar_status = indepvar_status, 0
|
| fullindepvars = fullindepvars, "_cons"
|
| fullindepvars_bn = fullindepvars_bn, "_cons"
|
| indepvars = indepvars, "_cons"
|
| }
|
|
|
|
|
| if (verbose > 1) printf("{txt}# Expanding 'b' and 'V' to include base and omitted variables\n")
|
| full_K = cols(indepvar_status) - 1
|
| assert(full_K >= K)
|
| full_b = J(full_K, 1, 0)
|
| full_V = J(full_K, full_K, 0)
|
|
|
| if (K | report_constant) {
|
| ok_index = selectindex(indepvar_status[2..cols(indepvar_status)]:==0)
|
| full_b[ok_index] = b
|
| full_V[ok_index, ok_index] = V
|
| }
|
|
|
| st_matrix(bname, full_b')
|
| st_matrix(Vname, full_V)
|
| }
|
|
|
|
|
| `Void' Solution::post()
|
| {
|
| `String' text
|
| `Integer' i
|
|
|
|
|
|
|
| st_numscalar("e(N)", N)
|
| st_numscalar("e(rank)", rank)
|
| st_numscalar("e(df_r)", df_r)
|
| st_numscalar("e(df_m)", df_m)
|
| st_numscalar("e(tss)", tss)
|
| st_numscalar("e(tss_within)", tss_within)
|
| st_numscalar("e(rss)", rss)
|
| st_numscalar("e(mss)", tss - rss)
|
| st_numscalar("e(rmse)", rmse)
|
| st_numscalar("e(F)", F)
|
| st_numscalar("e(ll)", ll)
|
| st_numscalar("e(ll_0)", ll_0)
|
| st_numscalar("e(r2)", r2)
|
| st_numscalar("e(r2_within)", r2_within)
|
| st_numscalar("e(r2_a)", r2_a)
|
| st_numscalar("e(r2_a_within)", r2_a_within)
|
| if (!missing(sumweights)) st_numscalar("e(sumweights)", sumweights)
|
| st_numscalar("e(ic)", iteration_count)
|
| st_numscalar("e(converged)", converged)
|
| st_numscalar("e(report_constant)", report_constant)
|
|
|
| if (!missing(N_clust)) {
|
| st_numscalar("e(N_clust)", N_clust)
|
| st_numscalar("e(N_clustervars)", num_clusters)
|
| for (i=1; i<=num_clusters; i++) {
|
| text = sprintf("e(N_clust%g)", i)
|
| st_numscalar(text, N_clust_list[i])
|
| text = sprintf("e(clustvar%g)", i)
|
| st_global(text, clustervars[i])
|
| }
|
| text = "Statistics robust to heteroskedasticity"
|
| st_global("e(title3)", text)
|
| st_global("e(clustvar)", invtokens(clustervars))
|
| }
|
|
|
|
|
|
|
| st_global("e(cmd)", cmd)
|
| st_global("e(cmdline)", cmdline)
|
| st_global("e(model)", model)
|
| st_global("e(predict)", "reghdfe_p")
|
| st_global("e(estat_cmd)", "reghdfe_estat")
|
| st_global("e(footnote)", "reghdfe_footnote")
|
|
|
| st_global("e(marginsnotok)", "Residuals SCore")
|
| st_global("e(depvar)", depvar)
|
| st_global("e(indepvars)", invtokens(indepvars))
|
|
|
| assert(title != "")
|
| text = sprintf("HDFE %s", title)
|
| st_global("e(title)", text)
|
|
|
| if (residuals_varname != "") {
|
| st_global("e(resid)", residuals_varname)
|
| }
|
|
|
|
|
|
|
|
|
| text = strproper(vcetype)
|
| if (text=="Cluster") text = "Robust"
|
| if (text=="Unadjusted") text = ""
|
| assert(anyof( ("", "Robust", "Jackknife", "Bootstrap") , text))
|
| if (text!="") st_global("e(vcetype)", text)
|
|
|
| text = vcetype
|
| if (text=="unadjusted") text = "ols"
|
| st_global("e(vce)", text)
|
|
|
|
|
| if (weight_type != "") {
|
| st_global("e(wexp)", "= " + weight_var)
|
| st_global("e(wtype)", weight_type)
|
| }
|
|
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| class FixedEffects
|
| {
|
|
|
|
|
|
|
| `Integer' G
|
| `Integer' N
|
| `Integer' M
|
| `FE_Factors' factors
|
| `Vector' sample
|
| `String' tousevar
|
| `Integer' num_singletons
|
| `Varlist' absvars
|
| `Varlist' extended_absvars
|
|
|
|
|
| `Varname' group_id
|
| `Varname' individual_id
|
| `Varname' indiv_tousevar
|
| `String' function_individual
|
| `Vector' indiv_sample
|
| `Variable' indiv_weights
|
| `Boolean' has_indiv_fe
|
|
|
|
|
| `Boolean' has_weights
|
| `Variable' weights
|
| `String' weight_var
|
| `String' weight_type
|
| `Variable' true_weights
|
| `Real' weights_scale
|
|
|
|
|
| `Boolean' save_any_fe
|
| `Boolean' save_all_fe
|
| `Boolean' storing_alphas
|
|
|
|
|
| `Boolean' has_intercept
|
|
|
|
|
| `Integer' parallel_maxproc
|
| `Boolean' parallel_force
|
| `String' parallel_opts
|
| `String' parallel_dir
|
|
|
|
|
| `Boolean' initialized
|
| `Integer' verbose
|
| `Boolean' timeit
|
|
|
|
|
| `Integer' parallel_numproc
|
| `RowVector' parallel_poolsizes
|
|
|
|
|
|
|
| `String' technique
|
| `Integer' maxiter
|
| `Integer' miniter
|
| `Real' tolerance
|
| `String' transform
|
| `String' acceleration
|
| `String' preconditioner
|
| `Boolean' abort
|
|
|
|
|
| `Integer' min_ok
|
| `Integer' accel_start
|
| `Integer' accel_freq
|
|
|
|
|
| `Integer' poolsize
|
| `Boolean' compact
|
|
|
|
|
| `Solution' solution
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `String' panelvar
|
| `String' timevar
|
|
|
|
|
| `StringRowVector' dofadjustments
|
| `Integer' G_extended
|
| `Vector' doflist_K
|
| `Vector' doflist_M
|
| `Vector' doflist_M_is_exact
|
| `Vector' doflist_M_is_nested
|
|
|
| `Integer' df_a_nested
|
| `Integer' df_a_redundant
|
| `Integer' df_a_initial
|
| `Integer' df_a
|
|
|
|
|
| `String' vcetype
|
| `Integer' num_clusters
|
| `Varlist' clustervars
|
| `Varlist' base_clustervars
|
|
|
| `Boolean' drop_singletons
|
| `String' absorb
|
|
|
|
|
| `Void' new()
|
| `Void' init()
|
|
|
| `Void' partial_out()
|
| `Void' save_touse()
|
| `Void' save_variable()
|
| `Void' post_footnote()
|
| `Void' store_alphas()
|
| `Void' store_alphas_map()
|
| `Void' store_alphas_lsmr()
|
|
|
|
|
| `Integer' num_rows()
|
| `Integer' num_cols()
|
| `Vector' mult()
|
| `Vector' mult_transpose()
|
|
|
|
|
| `Void' load_weights()
|
| `Void' update_preconditioner()
|
|
|
|
|
| `Variables' project_one_fe()
|
| }
|
|
|
|
|
| `Void' FixedEffects::new()
|
| {
|
|
|
| initialized = 0
|
| absvars = tousevar = ""
|
| num_singletons = 0
|
| sample = indiv_sample = J(0, 1, .)
|
|
|
|
|
| technique = "lsmr"
|
| preconditioner = "block_diagonal"
|
| drop_singletons = 1
|
| weight_type = weight_var = ""
|
| weights = indiv_weights = 1
|
| true_weights = J(0, 1, .)
|
| weights_scale = .
|
| abort = 1
|
| verbose = 0
|
| timeit = 0
|
| has_indiv_fe = 0
|
|
|
|
|
| min_ok = 1
|
| accel_start = 6
|
| accel_freq = 3
|
|
|
|
|
| group_id = ""
|
| individual_id = ""
|
| indiv_tousevar = ""
|
|
|
|
|
| parallel_maxproc = 0
|
| parallel_force = 0
|
| parallel_opts = ""
|
| parallel_dir = ""
|
|
|
| storing_alphas = 0
|
| }
|
|
|
|
|
| `Void' FixedEffects::init()
|
| {
|
| `StringRowVector' ivars, cvars, targets
|
| `RowVector' intercepts, num_slopes
|
| `Vector' idx, indiv_idx
|
| `Integer' num_singletons_i
|
| `Integer' i, g, gg, j, remaining, rc
|
| `Boolean' already_found_individual
|
| `Boolean' is_fw_or_iw, zero_threshold
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| assert_msg(initialized == 0, "HDFE already initialized")
|
|
|
| has_indiv_fe = this.individual_id != ""
|
|
|
|
|
| if (this.tousevar == "") {
|
| this.tousevar = st_tempname()
|
| st_store(., st_addvar("byte", this.tousevar, 1), J(st_nobs(), 1, 1))
|
| }
|
|
|
|
|
| rc = _stata(`"ms_parse_absvars "' + this.absvars)
|
| assert_msg(!rc, "error in option absorb()", rc, 0)
|
| assert_msg(st_global("s(options)") == "", sprintf("invalid suboptions in absorb(): %s", st_global("s(options)")), 198, 0)
|
| if (verbose > 0) stata(`"sreturn list"')
|
|
|
|
|
| stata(sprintf("markout %s %s, strok", this.tousevar, st_global("s(basevars)")))
|
| if (has_indiv_fe) {
|
| stata(sprintf("markout %s %s, strok", this.indiv_tousevar, st_global("s(basevars)")))
|
| }
|
|
|
|
|
| this.absvars = tokens(st_global("s(absvars)"))
|
| this.has_intercept = strtoreal(st_global("s(has_intercept)"))
|
| ivars = tokens(st_global("s(ivars)"))
|
| cvars = tokens(st_global("s(cvars)"))
|
| targets = strtrim(tokens(st_global("s(targets)")))
|
| intercepts = strtoreal(tokens(st_global("s(intercepts)")))
|
| num_slopes = strtoreal(tokens(st_global("s(num_slopes)")))
|
|
|
|
|
| this.G = strtoreal(st_global("s(G)"))
|
| assert(this.G == cols(this.absvars))
|
|
|
|
|
| this.factors = FE_Factor(G)
|
| this.extended_absvars = tokens(st_global("s(extended_absvars)"))
|
| this.save_any_fe = strtoreal(st_global("s(save_any_fe)"))
|
| this.save_all_fe = strtoreal(st_global("s(save_all_fe)"))
|
|
|
|
|
| this.sample = selectindex(st_data(., tousevar))
|
| if (this.weight_var != "") this.weights = st_data(this.sample, this.weight_var)
|
|
|
| if (has_indiv_fe) {
|
| assert(this.weight_type != "fweight")
|
| this.indiv_sample = selectindex(st_data(., indiv_tousevar))
|
| if (this.weight_var != "") this.indiv_weights = st_data(this.indiv_sample, this.weight_var)
|
| this.group_id = group_id
|
| this.individual_id = individual_id
|
| }
|
|
|
| if (verbose > 0) printf("\n{txt}{title:Loading fixed effects information:}\n")
|
|
|
| if (G == 1 & ivars[1] == "_cons") {
|
|
|
| this.N = rows(this.sample)
|
| this.factors[1].num_obs = this.N
|
| this.factors[1].counts = this.N
|
| this.factors[1].num_levels = 1
|
| this.factors[1].levels = J(this.N, 1, 1)
|
| this.factors[1].is_sorted = 1
|
| this.factors[1].method = "none"
|
| this.factors[1].is_individual_fe = 0
|
| if (verbose > 0) {
|
| logging_singletons("start", absvars)
|
| logging_singletons("iter1", absvars, this.factors[1], intercepts[1], num_slopes[1], rows(this.sample), i, 1)
|
| logging_singletons("iter2", absvars, this.factors[1])
|
| printf(" %10.0g {c |}", 0)
|
| printf("\n")
|
| logging_singletons("end", absvars)
|
| }
|
| }
|
| else {
|
|
|
| if (verbose > 0) logging_singletons("start", absvars)
|
| remaining = this.G
|
| i = 0
|
| already_found_individual = 0
|
| while (remaining) {
|
| ++i
|
| g = 1 + mod(i-1, this.G)
|
|
|
| if (verbose > 0) logging_singletons("iter1", absvars, this.factors[g], intercepts[g], num_slopes[g], rows(this.sample), i, g)
|
|
|
| if (rows(this.sample) < 2) {
|
| if (verbose > 0) printf("\n")
|
| exit(error(2001))
|
| }
|
|
|
| if (i <= this.G) {
|
| if (ivars[g] == this.individual_id) {
|
|
|
| already_found_individual = 1
|
|
|
| this.factors[g] = indiv_factor(group_id, individual_id, this.sample, this.indiv_sample, function_individual, verbose)
|
| assert_msg(!missing(factors[g].num_levels), "Stata/Mata bug: Individual_Factor.num_levels is missing")
|
| assert(this.factors[g].is_individual_fe == 1)
|
| }
|
| else {
|
|
|
| this.factors[g] = fe_factor(ivars[g], this.sample, ., "", ., 1, ., 0)
|
| assert(this.factors[g].is_individual_fe == 0)
|
| }
|
| }
|
|
|
| if (verbose > 0) logging_singletons("iter2", absvars, this.factors[g])
|
|
|
| if (drop_singletons) {
|
| is_fw_or_iw = (this.weight_type == "fweight") | (this.weight_type == "iweight")
|
| zero_threshold = (this.weight_type == "iweight") ? 1 : 0
|
| if (this.factors[g].is_individual_fe) {
|
| idx = factors[g].drop_singletons(this.sample, this.indiv_sample)
|
| }
|
| else {
|
| idx = this.factors[g].drop_singletons(is_fw_or_iw ? this.weights : J(0, 1, .), zero_threshold)
|
| }
|
| num_singletons_i = rows(idx)
|
| this.factors[g].num_singletons = this.factors[g].num_singletons + num_singletons_i
|
| this.num_singletons = this.num_singletons + num_singletons_i
|
| if (verbose > 0) {
|
| printf(" %10.0g {c |}", num_singletons_i)
|
| displayflush()
|
| }
|
|
|
| if (num_singletons_i == 0) {
|
| --remaining
|
| }
|
| else {
|
| remaining = this.G - 1
|
|
|
|
|
| if (has_indiv_fe) {
|
| indiv_idx = get_indiv_idx(this.sample, this.indiv_sample, this.group_id, idx)
|
| this.indiv_sample[indiv_idx] = J(rows(indiv_idx), 1, 0)
|
| this.indiv_sample = select(this.indiv_sample, this.indiv_sample)
|
| }
|
|
|
|
|
| this.sample[idx] = J(rows(idx), 1, 0)
|
|
|
|
|
| if (this.weight_type == "aweight" | this.weight_type == "pweight") this.weights = select(this.weights, this.sample)
|
|
|
| this.sample = select(this.sample, this.sample)
|
|
|
| for (j=i-1; j>=max((1, i-remaining)); j--) {
|
| gg = 1 + mod(j-1, this.G)
|
| this.factors[gg].drop_obs(this.factors[gg].is_individual_fe ? indiv_idx : idx)
|
| if (verbose > 0) printf("{res} .")
|
| }
|
| }
|
| }
|
| else {
|
| if (verbose > 0) printf(" n/a {c |}")
|
| --remaining
|
| }
|
| if (verbose > 0) printf("\n")
|
| }
|
| if (verbose > 0) logging_singletons("end", absvars)
|
| }
|
|
|
|
|
| if (drop_singletons & num_singletons) this.save_touse()
|
|
|
| if (has_indiv_fe) {
|
| assert_msg(already_found_individual, "individual id not found on absorb()", 100, 0)
|
| }
|
|
|
| if (drop_singletons & this.num_singletons>0 & verbose>-1 | this.factors[1].num_obs<2) {
|
| if (this.weight_type=="iweight") {
|
|
|
| printf(`"{txt}(dropped %s observations that are either {browse "http://scorreia.com/research/singletons.pdf":singletons} or {browse "http://scorreia.com/research/separation.pdf":separated} by a fixed effect)\n"', strofreal(this.num_singletons))
|
| }
|
| else {
|
| printf(`"{txt}(dropped %s {browse "http://scorreia.com/research/singletons.pdf":singleton observations})\n"', strofreal(this.num_singletons))
|
| }
|
| }
|
|
|
| if (this.factors[1].num_obs < 2) {
|
| exit(error(2001))
|
| }
|
|
|
| this.N = factors[1].num_obs
|
|
|
| assert_msg(this.N > 1, "N check #2")
|
| assert_msg(!missing(this.N), "N check #3")
|
|
|
|
|
|
|
| if (verbose > 0) printf("\n{txt}# Initializing panelsetup() and loading slope variables for each FE\n\n")
|
| for (g=1; g<=this.G; g++) {
|
| assert_in(this.factors[g].is_individual_fe, (0, 1))
|
|
|
| this.factors[g].absvar = absvars[g]
|
| this.factors[g].ivars = tokens(ivars[g])
|
| this.factors[g].cvars = tokens(cvars[g])
|
| this.factors[g].has_intercept = intercepts[g]
|
| this.factors[g].num_slopes = num_slopes[g]
|
| this.factors[g].target = targets[g]
|
| this.factors[g].save_fe = (targets[g] != "")
|
|
|
| if (verbose > 0) printf("{txt} - Fixed effects: {res}%s{txt}\n", this.factors[g].absvar)
|
| if (verbose > 0) printf("{txt} panelsetup()\n")
|
|
|
| this.factors[g].panelsetup()
|
|
|
| if (this.factors[g].num_slopes) {
|
|
|
|
|
|
|
| if (verbose > 0) printf("{txt} loading cvars({res}%s{txt})\n", strtrim(invtokens(cvars)))
|
|
|
| if (this.factors[g].is_individual_fe) {
|
| this.factors[g].unsorted_x = st_data(this.indiv_sample, this.factors[g].cvars)
|
| }
|
| else {
|
| this.factors[g].unsorted_x = st_data(this.sample, this.factors[g].cvars)
|
| }
|
| this.factors[g].x_stdevs = standardize_data(this.factors[g].unsorted_x)
|
| }
|
| }
|
|
|
|
|
| this.load_weights(1)
|
|
|
|
|
| if (has_indiv_fe) {
|
| for (g=1; g<=this.G; g++) {
|
| if (this.factors[g].is_individual_fe==0) {
|
| this.factors[g].group_p = order(st_data(this.sample, this.group_id), 1)
|
| this.factors[g].group_inv_p = invorder(this.factors[g].group_p)
|
| }
|
| }
|
| }
|
|
|
|
|
|
|
| this.initialized = 1
|
| }
|
|
|
|
|
| `Integer' FixedEffects::num_rows()
|
| {
|
| assert(!missing(N))
|
| return(N)
|
| }
|
|
|
|
|
| `Integer' FixedEffects::num_cols()
|
| {
|
| assert_msg(!missing(M))
|
| return(M)
|
| }
|
|
|
|
|
|
|
| `Vector' FixedEffects::mult(`Vector' x)
|
| {
|
|
|
| `Integer' g, i, n
|
| `Vector' ans
|
|
|
| assert_msg(M == rows(x), "Matrix A and vector x not conformable")
|
| `DEBUG' assert_msg(!hasmissing(x), ".mult() received missing values")
|
| assert_msg(cols(x) == 1)
|
| ans = J(this.N, 1, 0)
|
|
|
| for (g=i=1; g<=G; g++) {
|
| n = factors[g].num_levels * (factors[g].has_intercept + factors[g].num_slopes)
|
| factors[g].mult(x[|i \ i + n -1|], preconditioner, ans)
|
| i = i + n
|
| }
|
| `DEBUG' assert_msg(!hasmissing(ans), ".mult() returned missing values")
|
| `DEBUG' assert_msg(cols(ans) == 1)
|
| return(ans)
|
| }
|
|
|
|
|
| `Vector' FixedEffects::mult_transpose(`Vector' y)
|
| {
|
|
|
|
|
|
|
| `Integer' g, i, n
|
| `Vector' alphas
|
|
|
| assert_msg(this.N == rows(y), "Matrix A and vector y not conformable")
|
| assert_msg(cols(y) == 1)
|
| alphas = J(this.M, 1, .)
|
|
|
| for (g=i=1; g<=this.G; g++) {
|
| n = factors[g].num_levels * (factors[g].has_intercept + factors[g].num_slopes)
|
| alphas[| i \ i+n-1 |] = factors[g].mult_transpose(y, preconditioner)
|
| i = i + n
|
| }
|
| `DEBUG' assert_msg(cols(alphas) == 1)
|
| return(alphas)
|
| }
|
|
|
|
|
|
|
| `Void' FixedEffects::load_weights(`Boolean' verbose)
|
| {
|
| `Integer' g
|
|
|
| this.has_weights = (this.weight_type != "" & this.weight_var != "")
|
| if (this.verbose > 0 & verbose > 0 & this.has_weights) printf("\n{txt}# Loading weights [{res}%s{txt}={res}%s{txt}]\n\n", this.weight_type, this.weight_var)
|
|
|
|
|
|
|
|
|
| if (this.has_weights) {
|
| if (this.weights == 1) this.weights = st_data(this.sample, this.weight_var)
|
| assert_msg(rows(this.weights) == rows(this.sample))
|
| assert_msg(!hasmissing(this.weights), "weights can't be missing")
|
| }
|
| else {
|
| this.weights = 1
|
| }
|
|
|
|
|
|
|
|
|
|
|
| weights_scale = 1
|
| if (this.weight_type != "fweight") {
|
| weights_scale = clip(colmin(this.weights), 1.0X-017, 1)
|
| if (weights_scale != 1) this.weights = this.weights :/ weights_scale
|
| }
|
|
|
| M = 0
|
|
|
| for (g=1; g<=this.G; g++) {
|
| M = M + factors[g].num_levels * (factors[g].has_intercept + factors[g].num_slopes)
|
|
|
| if (this.has_weights) {
|
| factors[g].set_weights(factors[g].is_individual_fe ? this.indiv_weights : this.weights, this.verbose > 0 & verbose > 0)
|
| }
|
| }
|
|
|
| this.update_preconditioner()
|
| }
|
|
|
|
|
| `Void' FixedEffects::update_preconditioner()
|
| {
|
| `FE_Factor' F
|
| `Integer' g
|
|
|
| if (this.weight_type == "iweight") {
|
|
|
|
|
| }
|
| else if (this.technique == "map") {
|
|
|
| for (g=1; g<=G; g++) {
|
| assert(factors[g].panel_is_setup==1)
|
|
|
| if (factors[g].num_slopes) {
|
| if (verbose > 0) printf("{txt} - preprocessing cvars of factor {res}%s{txt}\n", factors[g].absvar)
|
| if (factors[g].has_intercept) {
|
| factors[g].x_means = factors[g].panelmean(factors[g].unsorted_x, 1)
|
| }
|
| factors[g].inv_xx = precompute_inv_xx(factors[g])
|
| }
|
| }
|
| }
|
| else if (preconditioner == "none") {
|
|
|
| }
|
| else if (preconditioner == "diagonal") {
|
| for (g=1; g<=G; g++) {
|
| factors[g].init_diagonal_preconditioner()
|
| }
|
| }
|
| else if (preconditioner == "block_diagonal") {
|
| for (g=1; g<=this.G; g++) {
|
|
|
| if (factors[g].has_intercept + factors[g].num_slopes > 1) {
|
| factors[g].init_block_diag_preconditioner(this.weight_type, this.weights_scale, true_weights)
|
| }
|
| else {
|
| factors[g].init_diagonal_preconditioner()
|
| }
|
| }
|
| }
|
| else {
|
| assert(0)
|
| }
|
| }
|
|
|
|
|
| `Void' FixedEffects::save_touse(| `Varname' touse, `Boolean' replace)
|
| {
|
| `Integer' idx
|
| `Vector' mask
|
|
|
|
|
| if (args()<1 | touse=="") {
|
| assert(tousevar != "")
|
| touse = tousevar
|
| }
|
|
|
|
|
| if (args()==0) replace = 1
|
| if (args()==1 | replace==.) replace = 0
|
|
|
| if (verbose > 0) printf("\n{txt}# %s e(sample)\n", replace ? "Updating" : "Saving")
|
|
|
|
|
| mask = create_mask(st_nobs(), 0, sample, 1)
|
|
|
|
|
| if (replace) {
|
| st_store(., touse, mask)
|
| }
|
| else {
|
| idx = st_addvar("byte", touse, 1)
|
| st_store(., idx, mask)
|
| }
|
|
|
|
|
|
|
|
|
| if (has_indiv_fe) {
|
|
|
| mask = create_mask(st_nobs(), 0, indiv_sample, 1)
|
|
|
|
|
| if (replace) {
|
| st_store(., indiv_tousevar, mask)
|
| }
|
| else {
|
| idx = st_addvar("byte", indiv_sample, 1)
|
| st_store(., idx, mask)
|
| }
|
| }
|
| }
|
|
|
|
|
| `Void' FixedEffects::partial_out(`Varlist' varnames,
|
| |`Boolean' save_tss,
|
| `Boolean' standardize_inputs)
|
| {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if (this.timeit) timer_on(40)
|
|
|
| `Boolean' input_is_varlist
|
| `Boolean' solver_failed
|
| `Integer' i, g
|
| `Solution' temp_sol
|
| `Matrix' data
|
| `String' msg
|
| `Matrix' tss
|
| `Varlist' keepvars
|
|
|
|
|
| `FunctionP' fun_transform
|
| `FunctionP' fun_accel
|
|
|
| assert_msg(initialized == 1, "must run HDFE.init() first")
|
|
|
|
|
| if (args()<2 | save_tss==.) save_tss = 1
|
| if (args()<3 | standardize_inputs==.) standardize_inputs = 1
|
|
|
| solution = Solution()
|
| solution.varlist = tokens(invtokens(varnames))
|
| solution.K = cols(solution.varlist)
|
|
|
|
|
| if (verbose > 0) {
|
| msg = poolsize > solution.K ? " in a single block" : sprintf(" in blocks of %g", poolsize)
|
| printf("\n{txt}# Loading and partialling out %g variables%s\n\n", solution.K, msg)
|
| }
|
|
|
|
|
| assert_in(technique, ("map", "lsmr", "lsqr", "gt"), sprintf(`"invalid algorithm "%s""', technique))
|
| assert_in(preconditioner, ("none", "diagonal", "block_diagonal"), sprintf(`"invalid LSMR/LSQR preconditioner "%s""', preconditioner))
|
| if (G==1) acceleration = "none"
|
| if (transform=="kaczmarz" & acceleration=="conjugate_gradient") printf("{err}(WARNING: convergence is {bf:unlikely} with transform=kaczmarz and accel=CG)\n")
|
|
|
|
|
|
|
|
|
| for (g=1; g<=this.G; g++) {
|
| this.factors[g].cleanup_before_saving()
|
| }
|
|
|
|
|
| if (verbose > 0) printf("{txt} - Loading data into Mata: {res}%s{txt}\n", invtokens(varnames))
|
| if (compact) keepvars = this.base_clustervars , this.timevar, this.panelvar
|
| if (this.timeit) timer_on(41)
|
| data = st_data_pool(sample, solution.varlist, poolsize, compact, keepvars, verbose)
|
| if (this.timeit) timer_off(41)
|
|
|
|
|
| assert(weights == 1 | rows(weights) == rows(data))
|
|
|
| if (this.timeit) timer_on(42)
|
|
|
|
|
| solution.norm2 = diagonal(cross(data, data))'
|
|
|
|
|
| solution.means = mean(data, weights)
|
|
|
|
|
|
|
| if (save_tss) {
|
| if (verbose > 0) printf("{txt} - Computing total sum of squares\n")
|
| if (has_intercept) {
|
| tss = crossdev(data, solution.means, weights, data, solution.means)
|
| }
|
| else {
|
| tss = cross(data, weights, data)
|
| }
|
| tss = diagonal(tss)'
|
| if (weight_type=="aweight" | weight_type=="pweight") tss = tss :* (rows(data) / sum(weights))
|
| solution.tss = tss
|
| }
|
|
|
|
|
|
|
| if (standardize_inputs) {
|
| if (verbose > 0) printf("{txt} - Standardizing variables\n")
|
| solution.stdevs = standardize_data(data)
|
| solution.norm2 = solution.norm2 :/ solution.stdevs :^ 2
|
| solution.means = solution.means :/ solution.stdevs
|
| }
|
|
|
|
|
| solution.is_standardized = standardize_inputs
|
| solution.has_intercept = has_intercept
|
|
|
| if (this.timeit) timer_off(42)
|
|
|
|
|
| if (verbose > 1) printf("\n")
|
|
|
| if (parallel_maxproc > 0) {
|
| cleanup_for_parallel(this)
|
| if (this.timeit) timer_on(49)
|
| save_before_parallel(parallel_dir, this, data)
|
| if (this.timeit) timer_off(49)
|
| if (this.timeit) timer_off(40)
|
| exit()
|
| }
|
|
|
| if (technique == "map") {
|
|
|
| if (transform=="cimmino") fun_transform = &transform_cimmino()
|
| if (transform=="kaczmarz") fun_transform = &transform_kaczmarz()
|
| if (transform=="symmetric_kaczmarz") fun_transform = &transform_sym_kaczmarz()
|
| if (transform=="random_kaczmarz") fun_transform = &transform_rand_kaczmarz()
|
| if (transform=="unrolled_sym_kaczmarz") fun_transform = &transform_unrolled_sym_kaczmarz()
|
| if (transform=="commutative_kaczmarz") fun_transform = &transform_commutative_kaczmarz()
|
| assert_msg(fun_transform != NULL, "NULL transform function")
|
|
|
|
|
| if (acceleration=="test") fun_accel = &accelerate_test()
|
| if (acceleration=="none") fun_accel = &accelerate_none()
|
| if (acceleration=="conjugate_gradient") fun_accel = &accelerate_cg()
|
| if (acceleration=="steepest_descent") fun_accel = &accelerate_sd()
|
| if (acceleration=="aitken") fun_accel = &accelerate_aitken()
|
| if (acceleration=="hybrid") fun_accel = &accelerate_hybrid()
|
| assert_msg(fun_accel != NULL, "NULL accelerate function")
|
|
|
| if (verbose>0) printf("{txt} - Running solver (acceleration={res}%s{txt}, transform={res}%s{txt} tol={res}%-1.0e{txt})\n", acceleration, transform, tolerance)
|
| if (verbose==1) printf("{txt} - Iterating:")
|
| if (verbose>1) printf("{txt} ")
|
|
|
| if (this.timeit) timer_on(46)
|
| assert(this.solution.K == cols(data))
|
| map_solver(this, data, poolsize, fun_accel, fun_transform)
|
| if (this.timeit) timer_off(46)
|
|
|
| }
|
| else if (technique == "lsmr" | technique == "lsqr") {
|
| solution.stop_code = 0
|
| solution.iteration_count = 0
|
| solution.alphas = J(this.num_cols(), 0, .)
|
|
|
| if (verbose > 0) printf("{txt} - Partialling out (%s) in a pools up to size %g\n", strupper(technique), poolsize)
|
| for (i=1; i<=cols(data); i++) {
|
| if (this.timeit) timer_on(46)
|
| if (technique == "lsmr") {
|
| temp_sol = lsmr(this, data[., i], miniter, maxiter, tolerance, tolerance, ., verbose)
|
| }
|
| else {
|
| temp_sol = lsqr(this, data[., i], miniter, maxiter, tolerance, tolerance, ., verbose)
|
| }
|
| if (this.timeit) timer_off(46)
|
| assert(temp_sol.stop_code > 0)
|
| solver_failed = (temp_sol.stop_code >= 10)
|
| if (solver_failed) printf("{err}convergence not achieved in %s iterations (stop code=%g); try increasing maxiter() or decreasing tol().\n", strtrim(sprintf("%8.0fc", temp_sol.iteration_count)), temp_sol.stop_code)
|
| if (solver_failed & temp_sol.stop_code == 13 & this.abort==0) solver_failed = 0
|
| if (solver_failed) exit(430)
|
|
|
| solution.stop_code = max(( solution.stop_code , temp_sol.stop_code ))
|
| solution.converged = solution.stop_code < 10
|
| solution.iteration_count = max(( solution.iteration_count , temp_sol.iteration_count ))
|
| if (this.timeit) timer_on(47)
|
| solution.alphas = solution.alphas , temp_sol.alphas
|
| data[., i] = temp_sol.data
|
| if (this.timeit) timer_off(47)
|
| }
|
| swap(solution.data, data)
|
| }
|
| else {
|
| _assert_abort(90, "ALGORITHM NOT CURRENTLY IMPLEMENTED", 1)
|
| }
|
| data = .
|
|
|
| if (verbose == 0) printf(`"{txt}({browse "http://scorreia.com/research/hdfe.pdf":MWFE estimator} converged in %s iteration%s)\n"', strofreal(solution.iteration_count), solution.iteration_count > 1 ? "s" : "s")
|
| if (verbose > 0) printf("\n")
|
|
|
| assert_msg(!hasmissing(solution.data), "error partialling out; missing values found")
|
| if (this.timeit) timer_off(40)
|
| }
|
|
|
|
|
| `Void' FixedEffects::save_variable(`Varname' varname,
|
| `Variables' data,
|
| | `Varlist' varlabel)
|
| {
|
|
|
| `RowVector' idx
|
| `Integer' i
|
| idx = st_addvar("double", tokens(varname))
|
| st_store(this.sample, idx, data)
|
| if (args()>=3 & varlabel!="") {
|
| for (i=1; i<=cols(data); i++) {
|
| st_varlabel(idx[i], varlabel[i])
|
| }
|
| }
|
| }
|
|
|
|
|
| `Void' FixedEffects::post_footnote()
|
| {
|
| `Matrix' table
|
| `StringVector' rowstripe
|
| `StringRowVector' colstripe
|
| `String' text
|
|
|
| assert(!missing(G))
|
| st_numscalar("e(N_hdfe)", G)
|
| st_numscalar("e(N_hdfe_extended)", G_extended)
|
| st_numscalar("e(df_a)", df_a)
|
| st_numscalar("e(df_a_initial)", df_a_initial)
|
| st_numscalar("e(df_a_redundant)", df_a_redundant)
|
| st_numscalar("e(df_a_nested)", df_a_nested)
|
| st_global("e(dofmethod)", invtokens(dofadjustments))
|
|
|
| if (absvars == "") {
|
| absvars = extended_absvars = "_cons"
|
| }
|
|
|
| text = sprintf("Absorbing %g HDFE %s", G, plural(G, "group"))
|
| st_global("e(title2)", text)
|
|
|
| st_global("e(absvars)", invtokens(absvars))
|
| text = invtokens(extended_absvars)
|
| text = subinstr(text, "1.", "")
|
| st_global("e(extended_absvars)", text)
|
|
|
|
|
| table = (doflist_K \ doflist_M \ (doflist_K-doflist_M) \ !doflist_M_is_exact \ doflist_M_is_nested)'
|
| rowstripe = extended_absvars'
|
| rowstripe = J(rows(table), 1, "") , extended_absvars'
|
| colstripe = "Categories" \ "Redundant" \ "Num Coefs" \ "Inexact?" \ "Nested?"
|
| colstripe = J(cols(table), 1, "") , colstripe
|
| st_matrix("e(dof_table)", table)
|
| st_matrixrowstripe("e(dof_table)", rowstripe)
|
| st_matrixcolstripe("e(dof_table)", colstripe)
|
|
|
| st_numscalar("e(drop_singletons)", drop_singletons)
|
| st_numscalar("e(num_singletons)", num_singletons)
|
|
|
|
|
| if (length(st_numscalar("e(N)"))) st_numscalar("e(N_full)", st_numscalar("e(N)") + num_singletons)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| }
|
|
|
|
|
| `Variables' FixedEffects::project_one_fe(`Variables' y, `Integer' g)
|
| {
|
| `Boolean' store_these_alphas
|
| `Matrix' alphas, proj_y
|
|
|
|
|
|
|
|
|
| assert(individual_id == "")
|
|
|
| store_these_alphas = storing_alphas & factors[g].save_fe
|
| if (store_these_alphas) assert(cols(y)==1)
|
|
|
| if (factors[g].num_slopes==0) {
|
| if (store_these_alphas) {
|
| factors[g].tmp_alphas = alphas = factors[g].panelmean(y, 1)
|
| return(alphas[factors[g].levels, .])
|
| }
|
| else {
|
| if (cols(y)==1 & factors[g].num_levels > 1) {
|
|
|
| return(factors[g].panelmean(y, 1)[factors[g].levels])
|
| }
|
| else {
|
| return(factors[g].panelmean(y, 1)[factors[g].levels, .])
|
| }
|
| }
|
| }
|
| else {
|
|
|
| if (store_these_alphas) {
|
| alphas = J(factors[g].num_levels, factors[g].has_intercept + factors[g].num_slopes, .)
|
| proj_y = panelsolve_invsym(factors[g].sort(y), factors[g], alphas)
|
| factors[g].tmp_alphas = alphas
|
| return(proj_y)
|
| }
|
| else {
|
| return(panelsolve_invsym(factors[g].sort(y), factors[g]))
|
| }
|
| }
|
| }
|
|
|
|
|
| `Void' FixedEffects::store_alphas(`Anything' d_varname)
|
| {
|
| `Variable' d
|
|
|
| if (verbose > 0) printf("\n{txt}## Storing estimated fixed effects (technique=%s)\n\n", technique)
|
|
|
|
|
| if (eltype(d_varname) == "string") {
|
| if (verbose > 0) printf("{txt} - Loading d = e(depvar) - xb - e(resid)\n")
|
| d = st_data(sample, d_varname)
|
| }
|
| else {
|
| d = d_varname
|
| }
|
| assert(!missing(d))
|
|
|
|
|
| if (technique == "map") {
|
| store_alphas_map(d)
|
| }
|
| else {
|
|
|
| store_alphas_lsmr(d)
|
| }
|
| }
|
|
|
|
|
| `Void' FixedEffects::store_alphas_lsmr(`Variable' d)
|
| {
|
| `Integer' g, row_index, col_index, i
|
| `Integer' k, n
|
| `Boolean' intercept_only
|
| `Solution' sol
|
| `Matrix' alphas
|
| `StringRowVector' varlabel
|
| `RowVector' tmp_stdev
|
| `Real' intercept_mean
|
|
|
| if (technique == "lsmr") {
|
| sol = lsmr(this, d, miniter, maxiter, tolerance, tolerance, ., verbose)
|
| }
|
| else {
|
| sol = lsqr(this, d, miniter, maxiter, tolerance, tolerance, ., verbose)
|
| }
|
|
|
| if (verbose > 0) printf("{txt} - SSR of d wrt FEs: %g\n", quadcross(sol.data, sol.data))
|
|
|
| for (g=row_index=col_index=1; g<=G; g++) {
|
|
|
| k = factors[g].has_intercept + factors[g].num_slopes
|
| n = factors[g].num_levels * k
|
| intercept_only = factors[g].has_intercept& !factors[g].num_slopes
|
|
|
|
|
| alphas = sol.alphas[|row_index \ row_index + n -1|]
|
| row_index = row_index + n
|
|
|
|
|
|
|
| if (factors[g].preconditioner == "none" | factors[g].preconditioner == "block_diagonal") {
|
| alphas = inv_vec(alphas, k)
|
| }
|
| else {
|
| if (!intercept_only) {
|
| if (factors[g].has_intercept) {
|
| alphas = alphas[|1\factors[g].num_levels|] , inv_vec(alphas[|factors[g].num_levels+1\.|] , k-1)
|
| }
|
| else {
|
| alphas = inv_vec(alphas , k)
|
| }
|
| }
|
|
|
| }
|
|
|
| factors[g].undo_preconditioning(alphas)
|
|
|
| if (verbose > 0) printf("{txt} - [FE %g] Creating varlabels\n", g)
|
|
|
| varlabel = J(1, k, "")
|
| for (i=1; i<=k; i++) {
|
| varlabel[i] = sprintf("[FE] %s", extended_absvars[col_index])
|
| col_index++
|
| }
|
|
|
| if (factors[g].num_slopes) {
|
| if (verbose > 0) printf("{txt} - [FE %g] Recovering unstandardized variables\n", g)
|
| tmp_stdev = factors[g].x_stdevs
|
| if (factors[g].has_intercept) tmp_stdev = 1, tmp_stdev
|
|
|
|
|
| alphas = alphas :/ tmp_stdev
|
| }
|
|
|
|
|
|
|
|
|
|
|
| if (factors[g].preconditioner=="none" & factors[g].has_intercept & !factors[g].num_slopes) {
|
| assert_boolean(factors[g].has_weights)
|
| if (factors[g].has_weights) {
|
| intercept_mean = mean(alphas[., 1], factors[g].weighted_counts)
|
| }
|
| else {
|
| intercept_mean = mean(alphas[., 1], factors[g].counts)
|
|
|
| }
|
| alphas[., 1] = alphas[., 1] :- intercept_mean
|
| }
|
|
|
|
|
| if (factors[g].save_fe) {
|
| if (verbose > 0) printf("{txt} - [FE %g] Storing alphas in dataset\n", g)
|
| save_variable(factors[g].target, alphas[factors[g].levels, .], varlabel)
|
| }
|
| }
|
| }
|
|
|
|
|
| `Void' FixedEffects::store_alphas_map(`Variable' d)
|
| {
|
| `Integer' g, i, j
|
| `StringRowVector' varlabel
|
| `RowVector' tmp_stdev
|
|
|
|
|
| if (verbose > 0) printf("{txt} - Initializing alphas\n")
|
| for (g=j=1; g<=G; g++) {
|
| if (!factors[g].save_fe) continue
|
| factors[g].alphas = factors[g].tmp_alphas = J(factors[g].num_levels, factors[g].has_intercept + factors[g].num_slopes, 0)
|
| }
|
|
|
|
|
| if (verbose > 0) printf("{txt} - Computing alphas\n")
|
| this.storing_alphas = 1
|
| this.solution.converged = 0
|
| d = accelerate_sd(this, d, &transform_kaczmarz())
|
| this.storing_alphas = 0
|
|
|
| if (verbose > 0) printf("{txt} - SSR of d wrt FEs: %g\n", quadcross(d,d))
|
|
|
|
|
| if (verbose > 0) printf("{txt} - Creating varlabels\n")
|
| for (g=j=1; g<=G; g++) {
|
| if (!factors[g].save_fe) {
|
| j = j + factors[g].has_intercept + factors[g].num_slopes
|
| continue
|
| }
|
| varlabel = J(1, factors[g].has_intercept + factors[g].num_slopes, "")
|
| for (i=1; i<=cols(varlabel); i++) {
|
| varlabel[i] = sprintf("[FE] %s", extended_absvars[j])
|
| j++
|
| }
|
|
|
| if (factors[g].num_slopes) {
|
| if (verbose > 0) printf("{txt} - Recovering unstandardized variables\n")
|
| tmp_stdev = factors[g].x_stdevs
|
| if (factors[g].has_intercept) tmp_stdev = 1, tmp_stdev
|
|
|
|
|
| factors[g].alphas = factors[g].alphas :/ tmp_stdev
|
| }
|
|
|
| if (verbose > 0) printf("{txt} - Storing alphas in dataset\n")
|
| save_variable(factors[g].target, factors[g].alphas[factors[g].levels, .], varlabel)
|
| factors[g].alphas = .
|
| assert(factors[g].tmp_alphas == J(0, 0, .))
|
| }
|
| }
|
|
|
|
|
| `Void' logging_singletons(`String' step, | `StringRowVector' absvars, `Optional_FE_Factor' f, `Boolean' has_intercept, `Integer' num_slopes, `Integer' num_obs, `Integer' i, `Integer' g)
|
| {
|
| `Integer' spaces, G
|
| `String' name
|
| spaces = max((0, max(strlen(absvars))-4))
|
| G = cols(absvars)
|
|
|
| if (step == "start") {
|
| printf("\n{txt}# Initializing Mata object for %g fixed %s\n\n", G, plural(G, "effect"))
|
| printf("{txt} {c TLC}{hline 4}{c TT}{hline 3}{c TT}{hline 1}%s{hline 6}{c TT}{hline 6}{c TT}{hline 9}{c TT}{hline 11}{c TT}{hline 12}{c TT}{hline 9}{c TT}{hline 8}{c TT}{hline 14}{c TRC}\n", "{hline 1}" * spaces)
|
| printf("{txt} {c |} i {c |} g {c |} %s Name {c |} Int? {c |} #Slopes {c |} Obs. {c |} Levels {c |} Sorted? {c |} Indiv? {c |} #Drop Singl. {c |}\n", " " * spaces)
|
| printf("{txt} {c LT}{hline 4}{c +}{hline 3}{c +}{hline 1}%s{hline 6}{c +}{hline 6}{c +}{hline 9}{c +}{hline 11}{c +}{hline 12}{c +}{hline 9}{c +}{hline 8}{c +}{hline 14}{c RT}\n", "{hline 1}" * spaces)
|
| }
|
| else if (step == "iter1"){
|
| name = absvars[g]
|
| if (name == "") name = "_cons"
|
| printf("{txt} {c |} %2.0f {c |} %1.0f {c |} {res}%s{txt} {c |} ", i, g, (spaces+5-strlen(name)) * " " + name)
|
| printf("{txt}{%s}%3s{txt} {c |} %1.0f {c |}", has_intercept ? "txt" : "err", has_intercept ? "Yes" : "No", num_slopes)
|
| printf("{res}%10.0g{txt} {c |}", num_obs)
|
| }
|
| else if (step == "iter2") {
|
| printf(" {res}%10.0g{txt} {c |} %7s {c |} %6s {c |}", f.num_levels, f.is_sorted ? "Yes" : "No", f.is_individual_fe ? "Yes" : "No")
|
| }
|
| else if (step == "end") {
|
| printf("{txt} {c BLC}{hline 4}{c BT}{hline 3}{c BT}{hline 1}%s{hline 6}{c BT}{hline 6}{c BT}{hline 9}{c BT}{hline 11}{c BT}{hline 12}{c BT}{hline 9}{c BT}{hline 8}{c BT}{hline 14}{c BRC}\n", "{hline 1}" * spaces)
|
| }
|
| else {
|
| assert(0)
|
| }
|
|
|
| displayflush()
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
|
|
| `Void' function reghdfe_solve_ols(`FixedEffects' S, `Solution' sol, `String' vce_mode)
|
| {
|
| `Matrix' xx, inv_xx, W, inv_V, X
|
| `Vector' xy, w, y
|
| `Integer' tmp_N, used_df_r
|
| `Real' stdev_y
|
| `RowVector' stdev_x, is_collinear, idx
|
|
|
|
|
|
|
| if (S.verbose > 0) printf("{txt}# Solving least-squares regression of partialled-out variables\n\n")
|
| if (S.vcetype == "unadjusted" & S.weight_type=="pweight") S.vcetype = "robust"
|
|
|
|
|
|
|
|
|
| assert_in(vce_mode, ("vce_none", "vce_small", "vce_asymptotic"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| assert_msg(rows(sol.means) == 1, "nonconforming row(means)")
|
| assert_msg(cols(sol.means) == cols(sol.data), "nonconforming cols(means)")
|
|
|
| sol.K = cols(sol.data) - 1
|
| sol.N = rows(sol.data)
|
|
|
|
|
| if (rows(S.true_weights)) {
|
|
|
| assert_msg(S.weight_type == "aweight")
|
| sol.N = sum(S.true_weights)
|
| w = S.weights * (sum(S.true_weights) / sum(S.weights))
|
| }
|
| else if (S.weight_type=="fweight") {
|
| sol.N = quadsum(S.weights)
|
| w = S.weights
|
| }
|
| else if (S.weight_type=="aweight" | S.weight_type=="pweight") {
|
| w = S.weights * (sol.N / quadsum(S.weights))
|
| }
|
| else {
|
| w = 1
|
| }
|
|
|
| assert_msg(!missing(S.weights_scale), "weights_scale must not be missing; did you ran .load_weights()?")
|
| sol.sumweights = S.weight_type != "" ? quadsum(S.weights) * S.weights_scale : sol.N
|
|
|
|
|
| sol.K = cols(sol.data) - 1
|
| xx = quadcross(sol.data, w, sol.data)
|
| sol.tss = sol.tss[1]
|
| sol.tss_within = xx[1,1]
|
| xy = sol.K ? xx[2..sol.K+1, 1] : J(0, 1, .)
|
| xx = sol.K ? xx[2..sol.K+1, 2..sol.K+1] : J(0, 0, .)
|
|
|
| assert_msg( cols(sol.norm2) == sol.K+1 , "partial_out() was run with a different set of vars")
|
| assert_msg( cols(xx) == sol.K , "x'x has the wrong number of columns")
|
|
|
|
|
| inv_xx = reghdfe_rmcoll(xx, sol, is_collinear=., S.verbose)
|
|
|
|
|
|
|
| reghdfe_trim_collinear(is_collinear, sol, xx, inv_xx, xy, y=., X=.)
|
|
|
| sol.df_m = sol.rank = sol.K
|
| sol.df_r = sol.N - S.df_a - sol.df_m
|
|
|
|
|
| if (!sol.K) {
|
| sol.b = J(0, 1, .)
|
| }
|
| else if (sol.fast_regression) {
|
| if (S.verbose > 0) printf("{txt}note: solving least-squares regression with a faster but less accurate method\n")
|
|
|
|
|
| sol.b = qrsolve(xx, xy)
|
| }
|
| else if (S.has_weights) {
|
| sol.b = qrsolve(X :* sqrt(S.weights), y :* sqrt(S.weights))
|
| }
|
| else {
|
| sol.b = qrsolve(X, y)
|
| }
|
|
|
|
|
| if (vce_mode != "vce_none") sol.resid = y - X * sol.b
|
|
|
|
|
| assert_boolean(sol.report_constant)
|
| if (sol.report_constant) {
|
| tmp_N = (S.weight_type=="aweight" | S.weight_type=="pweight") ? sol.N : sol.sumweights
|
| if (rows(S.true_weights)) tmp_N = sol.N
|
| reghdfe_extend_b_and_xx(sol, xx, inv_xx, tmp_N)
|
| }
|
|
|
|
|
| if (vce_mode == "vce_none") {
|
| assert(!sol.is_standardized)
|
| return
|
| }
|
|
|
| sol.rss = quadcross(sol.resid, w, sol.resid)
|
|
|
|
|
| if (sol.report_constant) {
|
| X = sol.K ? X :+ sol.means[2..cols(sol.means)] : J(rows(X), 0, .)
|
| }
|
|
|
|
|
| assert_msg(anyof( ("unadjusted", "robust", "cluster") , S.vcetype), "invalid vcetype: " + S.vcetype)
|
| if (S.vcetype == "unadjusted") {
|
| reghdfe_vce_unadjusted(S, sol, inv_xx, vce_mode)
|
| }
|
| else if (S.vcetype == "robust") {
|
| reghdfe_vce_robust(S, sol, inv_xx, X, w, vce_mode)
|
| }
|
| else {
|
| reghdfe_vce_cluster(S, sol, inv_xx, X, w, vce_mode)
|
| }
|
|
|
|
|
| W = .
|
| if (sol.K) {
|
| idx = 1..sol.K
|
| inv_V = invsym(sol.V[idx, idx])
|
| if (diag0cnt(inv_V)) {
|
| if (S.verbose > -1) printf("{txt}warning: missing F statistic; dropped variables due to collinearity or too few clusters\n")
|
| W = .
|
| }
|
| else {
|
|
|
| W = sol.b[idx]' * inv_V * sol.b[idx] / sol.df_m
|
| if (missing(W) & S.verbose > -1) printf("{txt}warning: missing F statistic\n")
|
| }
|
| }
|
|
|
|
|
| if (missing(sol.V)) {
|
| if (S.verbose > 0) printf("{txt} - VCE has missing values, setting it to zeroes (are your regressors all collinear?)\n")
|
| sol.V = J(rows(sol.V), rows(sol.V), 0)
|
| }
|
|
|
|
|
| if (sol.is_standardized) {
|
|
|
| assert(rows(sol.stdevs)==1)
|
| assert(sol.K == rows(sol.b) - sol.report_constant)
|
| assert(cols(sol.stdevs) - 1 == rows(sol.b) - sol.report_constant)
|
|
|
|
|
| stdev_y = sol.stdevs[1]
|
| stdev_x = sol.K ? sol.stdevs[2..cols(sol.stdevs)] : J(1, 0, .)
|
| if (sol.report_constant) stdev_x = stdev_x, 1
|
| stdev_x = stdev_x :/ stdev_y
|
|
|
|
|
| sol.rss = sol.rss * stdev_y ^ 2
|
| sol.tss_within = sol.tss_within * stdev_y ^ 2
|
| sol.resid = sol.resid * stdev_y
|
| sol.V = sol.V :/ (stdev_x' * stdev_x)
|
| sol.b = sol.b :/ stdev_x'
|
| }
|
|
|
|
|
| used_df_r = sol.N - S.df_a - sol.df_m - S.df_a_nested
|
| sol.r2 = 1 - sol.rss / sol.tss
|
| sol.r2_a = 1 - (sol.rss / used_df_r) / (sol.tss / (sol.N - sol.has_intercept ) )
|
| sol.r2_within = 1 - sol.rss / sol.tss_within
|
| sol.r2_a_within = 1 - (sol.rss / used_df_r) / (sol.tss_within / (used_df_r + sol.rank))
|
|
|
| sol.ll = - 0.5 * sol.N * (1 + ln(2 * pi()) + ln(sol.rss / sol.N))
|
| sol.ll_0 = - 0.5 * sol.N * (1 + ln(2 * pi()) + ln(sol.tss_within / sol.N))
|
|
|
| sol.rmse = used_df_r ? sqrt(sol.rss / used_df_r) : sqrt(sol.rss)
|
| sol.F = W
|
|
|
| sol.vcetype = S.vcetype
|
| sol.weight_type = S.weight_type
|
| sol.weight_var = S.weight_var
|
| if (S.verbose > 0) printf("\n")
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Matrix' reghdfe_rmcoll(`Matrix' xx, `Solution' sol, `RowVector' is_collinear, `Integer' verbose)
|
| {
|
| `Integer' num_dropped, i
|
| `Matrix' inv_xx, alt_inv_xx
|
| `RowVector' ok_index
|
|
|
| if (!sol.K) {
|
| is_collinear = J(1, 0, .)
|
| return(J(0, 0, .))
|
| }
|
|
|
| inv_xx = invsym(xx, 1..sol.K)
|
| num_dropped = diag0cnt(inv_xx)
|
|
|
|
|
|
|
|
|
| if (num_dropped) {
|
| assert_msg(sol.K > 0 & sol.K == cols(inv_xx))
|
| alt_inv_xx = invsym(xx)
|
| if (num_dropped != diag0cnt(alt_inv_xx)) {
|
| inv_xx = alt_inv_xx
|
| num_dropped = diag0cnt(alt_inv_xx)
|
| }
|
| }
|
|
|
|
|
| is_collinear = !diagonal(inv_xx)'
|
| assert(cols(is_collinear) == sol.K)
|
|
|
|
|
| ok_index = selectindex(sol.indepvar_status:==0)
|
| for (i=1; i<=sol.K; i++) {
|
| if (is_collinear[i] & verbose>-1) {
|
| printf("{txt}note: {res}%s{txt} omitted because of collinearity\n", sol.fullindepvars[ok_index[i]])
|
| }
|
| }
|
|
|
| return(inv_xx)
|
| }
|
|
|
|
|
| `Void' reghdfe_trim_collinear(`RowVector' is_collinear, `Solution' sol,
|
| `Matrix' xx, `Matrix' inv_xx, `Vector' xy,
|
| `Vector' y, `Matrix' X)
|
| {
|
| `RowVector' ok_index, collinear_index, extended_is_collinear
|
| `Integer' num_collinear
|
|
|
|
|
| y = sol.data[., 1]
|
| swap(X=., sol.data)
|
| X = select(X, (0, !is_collinear))
|
| sol.K = cols(X)
|
|
|
|
|
| ok_index = selectindex(!is_collinear)
|
| xx = xx[ok_index, ok_index]
|
| inv_xx = inv_xx[ok_index, ok_index]
|
| xy = xy[ok_index]
|
|
|
| assert_msg(cols(xx) == sol.K)
|
| assert_msg(cols(inv_xx) == sol.K)
|
|
|
|
|
| extended_is_collinear = 0, is_collinear
|
| sol.norm2 = select(sol.norm2, !extended_is_collinear)
|
| sol.stdevs = select(sol.stdevs, !extended_is_collinear)
|
| sol.means = select(sol.means, !extended_is_collinear)
|
|
|
|
|
| num_collinear = sum(extended_is_collinear)
|
| ok_index = selectindex(sol.indepvar_status:==0)
|
| collinear_index = select(ok_index, extended_is_collinear)
|
| if (num_collinear) sol.indepvar_status[collinear_index] = J(1, num_collinear, 3)
|
| assert_msg(sol.indepvar_status[1] == 0)
|
| }
|
|
|
|
|
|
|
|
|
|
|
| `Void' reghdfe_extend_b_and_xx(`Solution' sol, `Matrix' xx, `Matrix' inv_xx, `Integer' N)
|
| {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `RowVector' means_x, side
|
| `Real' b0, corner
|
|
|
|
|
| means_x = sol.K ? sol.means[2..cols(sol.means)] : J(1, 0, .)
|
|
|
|
|
| b0 = sol.means[1] - means_x * sol.b
|
| sol.b = sol.b \ b0
|
|
|
|
|
| corner = N
|
| side = N * means_x
|
| xx = (xx , side' \ side , corner)
|
|
|
|
|
| corner = (1 / N) + means_x * inv_xx * means_x'
|
| side = - means_x * inv_xx
|
| inv_xx = (inv_xx , side' \ side , corner)
|
| }
|
|
|
|
|
| `Void' reghdfe_vce_unadjusted(`FixedEffects' S, `Solution' sol, `Matrix' inv_xx, `String' vce_mode)
|
| {
|
| `Integer' dof_adj
|
| if (S.verbose > 0) {
|
| printf("{txt} - Small-sample-adjustment: q = N / (N-df_m-df_a) = %g / (%g - %g - %g) = %g\n", sol.N, sol.N, sol.rank, S.df_a, sol.N / sol.df_r )
|
| }
|
| dof_adj = sol.N / sol.df_r
|
| if (vce_mode == "vce_asymptotic") dof_adj = sol.N / (sol.N-1)
|
| sol.V = (sol.rss / sol.N) * dof_adj * inv_xx
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Void' reghdfe_vce_robust(`FixedEffects' S,
|
| `Solution' sol,
|
| `Matrix' D,
|
| `Matrix' X,
|
| `Variable' w,
|
| `String' vce_mode)
|
| {
|
| `Matrix' M
|
| `Integer' dof_adj
|
|
|
| if (S.verbose > 0) printf("{txt}# Estimating Robust Variance-Covariance Matrix of the Estimators (VCE)\n\n")
|
| if (S.verbose > 0) printf("{txt} - VCE type: {res}%s{txt}\n", S.vcetype)
|
| if (S.verbose > 0) printf("{txt} - Weight type: {res}%s{txt}\n", S.weight_type=="" ? "<none>" : S.weight_type)
|
|
|
| if (rows(S.true_weights)) {
|
| assert(S.weight_type=="aweight")
|
| w = (sol.resid :* w) :^ 2 :/ S.true_weights
|
| }
|
| else if (S.weight_type=="") {
|
| w = sol.resid :^ 2
|
| }
|
| else if (S.weight_type=="fweight") {
|
| w = sol.resid :^ 2 :* w
|
| }
|
| else if (S.weight_type=="aweight" | S.weight_type=="pweight") {
|
| w = (sol.resid :* w) :^ 2
|
| }
|
|
|
| dof_adj = sol.N / (sol.N - S.df_a - sol.df_m)
|
| if (vce_mode == "vce_asymptotic") dof_adj = sol.N / (sol.N - 1)
|
| if (S.verbose > 0 & vce_mode != "vce_asymptotic") {
|
| printf("{txt} - Small-sample-adjustment: q = N / (N-df_m-df_a) = %g / (%g - %g - %g) = %g\n", sol.N, sol.N, sol.df_m, S.df_a, dof_adj )
|
| }
|
| M = sol.report_constant ? quadcross(X, 1, w, X, 1) : quadcross(X, w, X)
|
| sol.V = D * M * D * dof_adj
|
| w = J(0, 0, .)
|
| }
|
|
|
|
|
| `Void' reghdfe_vce_cluster(`FixedEffects' S,
|
| `Solution' sol,
|
| `Matrix' D,
|
| `Matrix' X,
|
| `Variable' w,
|
| `String' vce_mode)
|
| {
|
| `Matrix' M
|
| `Integer' dof_adj, N_clust, df_r, nested_adj
|
| `Integer' Q, q, g, sign, i, j
|
| `FactorPointers' FPlist
|
| `FactorPointer' FP
|
| `Varlist' vars
|
| `String' var, var_with_spaces
|
| `Boolean' clustervar_is_absvar, required_fix
|
| `Matrix' tuples
|
| `RowVector' tuple
|
| `RowVector' N_clust_list
|
| `Matrix' joined_levels
|
| `Integer' Msize
|
|
|
| w = sol.resid :* w
|
| Msize = sol.K + sol.report_constant
|
|
|
| vars = S.clustervars
|
| Q = cols(vars)
|
| if (S.verbose > 0) {
|
| printf("{txt}# Estimating Cluster Robust Variance-Covariance Matrix of the Estimators (VCE)\n\n")
|
| printf("{txt} - VCE type: {res}%s{txt} (%g-way clustering)\n", S.vcetype, Q)
|
| printf("{txt} - Cluster variables: {res}%s{txt}\n", invtokens(vars))
|
| printf("{txt} - Weight type: {res}%s{txt}\n", S.weight_type=="" ? "<none>" : S.weight_type)
|
| }
|
|
|
| assert_msg(1 <= Q & Q <= 10)
|
|
|
|
|
| FPlist = J(1, Q, NULL)
|
| N_clust_list = J(1, Q, .)
|
| for (q=1; q<=Q; q++) {
|
| var = vars[q]
|
| clustervar_is_absvar = 0
|
| for (g=1; g<=S.G; g++) {
|
| if (invtokens(S.factors[g].varlist, "#") == var) {
|
| clustervar_is_absvar = 1
|
| FP = &(S.factors[g])
|
| break
|
| }
|
| }
|
| var_with_spaces = subinstr(var, "#", " ")
|
| if (!clustervar_is_absvar) FP = &(factor(var_with_spaces, S.sample, ., "", ., ., ., 0))
|
| N_clust_list[q] = (*FP).num_levels
|
| if (S.verbose > 0) printf("{txt} - {res}%s{txt} has {res}%g{txt} levels\n", var, N_clust_list[q])
|
| FPlist[q] = FP
|
| }
|
|
|
|
|
| if (S.verbose > 0) printf("{txt} - Computing the 'meat' of the VCE\n")
|
| M = J(Msize, Msize, 0)
|
| tuples = .
|
| for (q=1; q<=Q; q++) {
|
| tuples = reghdfe_choose_n_k(Q, q, tuples)
|
| sign = mod(q, 2) ? 1 : -1
|
| for (j=1; j<=rows(tuples); j++) {
|
| tuple = tuples[j, .]
|
| if (S.verbose > 0) printf("{txt} - Level %g/%g; sublevel %g/%g; M = M %s ClusterVCE(%s)\n", q, Q, j, rows(tuples), sign > 0 ? "+" : "-" , invtokens(strofreal(tuple)))
|
| if (q==1) {
|
| assert(tuple==j)
|
| FP = FPlist[j]
|
| }
|
| else if (q==2) {
|
| FP = &join_factors( *FPlist[tuple[1]] , *FPlist[tuple[2]] , ., ., 1)
|
| }
|
| else {
|
| joined_levels = (*FPlist[tuple[1]]).levels
|
| for (i=2; i<=cols(tuple); i++) {
|
| joined_levels = joined_levels, (*FPlist[tuple[i]]).levels
|
| }
|
| FP = &_factor(joined_levels, ., ., "", ., ., ., 0)
|
| }
|
| M = M + sign * reghdfe_vce_cluster_meat(FP, X, w, Msize, sol.report_constant)
|
| }
|
| }
|
|
|
|
|
| N_clust = min(N_clust_list)
|
|
|
| nested_adj = S.df_a_nested > 0
|
|
|
|
|
|
|
|
|
| dof_adj = (sol.N - 1) / (sol.N - nested_adj - sol.df_m-S.df_a) * N_clust / (N_clust - 1)
|
| if (vce_mode == "vce_asymptotic") dof_adj = N_clust / (N_clust - 1)
|
| if (S.verbose > 0) {
|
| printf("{txt} - Small-sample-adjustment: q = (%g - 1) / (%g - %g) * %g / (%g - 1) = %g\n", sol.N, sol.N, sol.df_m+S.df_a+nested_adj, N_clust, N_clust, dof_adj)
|
| }
|
| sol.V = D * M * D * dof_adj
|
| if (Q > 1) {
|
| required_fix = reghdfe_fix_psd(sol.V, S.solution.report_constant)
|
| if (required_fix) printf("{txt}Warning: VCV matrix was non-positive semi-definite; adjustment from Cameron, Gelbach & Miller applied.\n")
|
| }
|
|
|
|
|
| assert(!missing(sol.df_r))
|
| df_r = N_clust - 1
|
| if (sol.df_r > df_r) {
|
| sol.df_r = df_r
|
| }
|
| else if (S.verbose > 0) {
|
| printf("{txt} - Unclustered df_r (N - df_m - df_a = %g) are {it:lower} than clustered df_r (N_clust-1 = %g)\n", sol.df_r, df_r)
|
| printf("{txt} Thus, we set e(df_r) as the former.\n")
|
| printf("{txt} This breaks consistency with areg but ensures internal consistency\n")
|
| printf("{txt} between vce(robust) and vce(cluster _n)\n")
|
| }
|
|
|
| sol.N_clust = N_clust
|
| sol.N_clust_list = N_clust_list
|
| sol.num_clusters = Q
|
| sol.clustervars = S.clustervars
|
| if (S.verbose > 0) printf("\n")
|
| }
|
|
|
|
|
|
|
| `Matrix' reghdfe_vce_cluster_meat(`FactorPointer' FP,
|
| `Variables' X,
|
| `Variable' resid,
|
| `Integer' Msize,
|
| `Boolean' report_constant)
|
| {
|
| `Integer' i, N_clust
|
| `Variables' X_sorted
|
| `Variable' resid_sorted
|
| `Matrix' X_tmp
|
| `Vector' resid_tmp
|
| `RowVector' Xe_tmp
|
| `Matrix' M
|
|
|
| if (cols(X)==0 & !report_constant) return(J(0,0,0))
|
|
|
| N_clust = (*FP).num_levels
|
| (*FP).panelsetup()
|
| X_sorted = (*FP).sort(X)
|
| resid_sorted = (*FP).sort(resid)
|
| M = J(Msize, Msize, 0)
|
|
|
| if (cols(X)) {
|
| for (i=1; i<=N_clust; i++) {
|
| X_tmp = panelsubmatrix(X_sorted, i, (*FP).info)
|
| resid_tmp = panelsubmatrix(resid_sorted, i, (*FP).info)
|
| Xe_tmp = quadcross(1, 0, resid_tmp, X_tmp, report_constant)
|
| M = M + quadcross(Xe_tmp, Xe_tmp)
|
| }
|
| }
|
| else {
|
|
|
| assert(report_constant)
|
| for (i=1; i<=N_clust; i++) {
|
| resid_tmp = panelsubmatrix(resid_sorted, i, (*FP).info)
|
| M = M + quadsum(resid_tmp) ^ 2
|
| }
|
| }
|
|
|
| return(M)
|
| }
|
|
|
|
|
|
|
|
|
| `Matrix' reghdfe_choose_n_k(`Integer' n, `Integer' k, `Matrix' prev_ans)
|
| {
|
| `RowVector' v
|
| `Integer' q
|
| `Matrix' candidate
|
| `Matrix' ans
|
| v = 1::n
|
| if (k==1) return(v)
|
|
|
| q = rows(prev_ans)
|
| assert(q==comb(n, k-1))
|
| assert(cols(prev_ans)==k-1)
|
| candidate = v # J(q, 1, 1)
|
| candidate = candidate , J(n, 1, prev_ans)
|
| ans = select(candidate, candidate[., 1] :< candidate[., 2])
|
| return(ans)
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Boolean' function reghdfe_fix_psd(`Matrix' V, `Boolean' report_constant) {
|
| `Matrix' U
|
| `Vector' lambda
|
| `Boolean' required_fix
|
| `Matrix' V_backup
|
| `RowVector' index
|
|
|
| if (!issymmetric(V)) _makesymmetric(V)
|
| if (!issymmetric(V)) exit(error(505))
|
|
|
| if (report_constant) {
|
| index = 1..cols(V)-1
|
| V_backup = V[index, index]
|
| }
|
|
|
| symeigensystem(V, U=., lambda=.)
|
| required_fix = min(lambda) < 0
|
|
|
| if (required_fix) {
|
| lambda = lambda :* (lambda :>= 0)
|
| V = quadcross(U', lambda, U')
|
|
|
|
|
| if (report_constant) {
|
| (void) reghdfe_fix_psd(V_backup, 0)
|
| V[index, index] = V_backup
|
| }
|
| }
|
|
|
| return(required_fix)
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| `Void' estimate_dof(`FixedEffects' S, `StringRowVector' dofadjustments, `Varname' groupvar)
|
| {
|
| `Integer' g, n, i, j
|
| `Boolean' skip_next
|
| `RowVector' intercept_index
|
| `Integer' indiv_fe_i, indiv_fe_g
|
|
|
|
|
| if (S.verbose > 0) printf("\n{txt}# Estimating degrees-of-freedom absorbed by the fixed effects\n\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| S.df_a_nested = 0
|
|
|
|
|
| S.doflist_K = J(1, 0, .)
|
| S.G_extended = 0
|
| for (g=1; g<=S.G; g++) {
|
| n = S.factors[g].has_intercept + S.factors[g].num_slopes
|
| S.G_extended = S.G_extended + n
|
| S.doflist_K = S.doflist_K , J(1, n, S.factors[g].num_levels)
|
| }
|
|
|
|
|
| S.doflist_M = S.doflist_M_is_exact = S.doflist_M_is_nested = J(1, S.G_extended, 0)
|
|
|
|
|
| intercept_index = J(1, S.G_extended, 0)
|
| for (g=i=1; g<=S.G; g++) {
|
| if (S.factors[g].has_intercept & !S.factors[g].is_individual_fe) intercept_index[i] = g
|
| if (S.factors[g].is_individual_fe) {
|
| assert(indiv_fe_i == .)
|
| indiv_fe_i = i
|
| indiv_fe_g = g
|
| }
|
| n = S.factors[g].has_intercept + S.factors[g].num_slopes
|
| i = i + n
|
| }
|
|
|
|
|
| assert(0 <= S.num_clusters & S.num_clusters <= 10)
|
| if (S.num_clusters > 0 & anyof(dofadjustments, "clusters")) {
|
| dof_update_nested(S, intercept_index)
|
| }
|
|
|
|
|
|
|
|
|
| n = cols(selectindex(intercept_index))
|
| if ((n > 1) | (n > 0 & S.df_a_nested > 0)) {
|
| if (S.verbose > 0) printf("{txt} - there is at least one redundant coef. for every set of FE intercepts after the first one\n")
|
| skip_next = S.df_a_nested == 0
|
| for (i=1; i<=S.G_extended; i++) {
|
| g = intercept_index[i]
|
| if (!g) continue
|
| if (skip_next) {
|
| skip_next = 0
|
| continue
|
| }
|
| S.doflist_M[i] = 1
|
| }
|
| }
|
|
|
|
|
| for (i=1; i<=S.G_extended; i++) {
|
| g = intercept_index[i]
|
| if (!g) continue
|
| S.doflist_M_is_exact[i] = 1
|
| break
|
| }
|
|
|
|
|
| if (anyof(dofadjustments, "firstpair") | anyof(dofadjustments, "pairwise")) {
|
|
|
| dof_update_mobility_group(S, intercept_index, dofadjustments, groupvar)
|
| }
|
|
|
| if (anyof(dofadjustments, "continuous")) {
|
| dof_update_cvars(S)
|
| }
|
|
|
|
|
| if (S.individual_id != "") {
|
| dof_update_individual_fe(S, indiv_fe_i, indiv_fe_g)
|
| }
|
|
|
|
|
| S.df_a_initial = sum(S.doflist_K)
|
| S.df_a_redundant = sum(S.doflist_M)
|
| S.df_a = S.df_a_initial - S.df_a_redundant
|
| S.dofadjustments = dofadjustments
|
| if (S.verbose > 0) printf("\n")
|
| }
|
|
|
|
|
| `Void' dof_update_nested(`FixedEffects' S,
|
| `RowVector' intercept_index)
|
| {
|
| `Integer' i, g, i_cluster
|
| `String' absvar, clustervar, vars
|
| `DataCol' cluster_data
|
| `Factor' F
|
|
|
|
|
| for (i=1; i<=S.G_extended; i++) {
|
| g = intercept_index[i]
|
| if (!g) continue
|
| absvar = invtokens(S.factors[g].ivars, "#")
|
| if (anyof(S.clustervars, absvar)) {
|
| S.doflist_M[i] = S.doflist_K[i]
|
| S.df_a_nested = S.df_a_nested + S.doflist_M[i]
|
| S.doflist_M_is_exact[i] = S.doflist_M_is_nested[i] = 1
|
| intercept_index[i] = 0
|
| if (S.verbose > 0) printf("{txt} - categorical variable {res}%s{txt} is also a cluster variable, so it doesn't reduce DoF\n", absvar)
|
| }
|
| }
|
|
|
|
|
| for (i_cluster=1; i_cluster<= S.num_clusters; i_cluster++) {
|
| cluster_data = .
|
| if (!any(intercept_index)) break
|
| for (i=1; i<=S.G_extended; i++) {
|
| g = intercept_index[i]
|
| if (!g | S.doflist_M_is_exact[i]) continue
|
| absvar = invtokens(S.factors[g].ivars, "#")
|
| clustervar = S.clustervars[i_cluster]
|
|
|
|
|
| if (cluster_data == .) {
|
| if (strpos(clustervar, "#")) {
|
| vars = subinstr(clustervar, "#", " ", .)
|
| F = factor(vars, S.sample, ., "", 0, 0, ., 0)
|
| cluster_data = F.levels
|
| F = Factor()
|
| }
|
| else {
|
| cluster_data = __fload_data(clustervar, S.sample, 0)
|
| }
|
| }
|
|
|
| if (S.factors[g].nested_within(cluster_data)) {
|
| S.doflist_M[i] = S.doflist_K[i]
|
| S.doflist_M_is_exact[i] = S.doflist_M_is_nested[i] = 1
|
| S.df_a_nested = S.df_a_nested + S.doflist_M[i]
|
| intercept_index[i] = 0
|
| if (S.verbose > 0) printf("{txt} - categorical variable {res}%s{txt} is nested within cluster variable {res}%s{txt}, so it doesn't reduce DoF\n", absvar, clustervar)
|
| }
|
| }
|
| }
|
| cluster_data = .
|
| }
|
|
|
|
|
| `Void' dof_update_mobility_group(`FixedEffects' S,
|
| `RowVector' intercept_index,
|
| `StringRowVector' dofadjustments,
|
| `Varname' groupvar)
|
| {
|
| `BipartiteGraph' bg
|
| `Integer' bg_verbose
|
| `Integer' pair_count
|
| `Integer' i, j
|
| `Integer' g, h
|
| `Boolean' save_subgraph
|
| `String' grouplabel
|
| `Integer' m
|
|
|
| bg = BipartiteGraph()
|
| bg_verbose = max(( S.verbose - 1 , 0 ))
|
| pair_count = 0
|
|
|
| for (i=1; i<=S.G_extended; i++) {
|
| g = intercept_index[i]
|
| if (!g) continue
|
|
|
| for (j=i+1; j<=S.G_extended; j++) {
|
| h = intercept_index[j]
|
| if (!h) continue
|
|
|
| bg.init(&(S.factors[g]), &(S.factors[h]), bg_verbose)
|
| ++pair_count
|
| save_subgraph = (pair_count == 1) & (groupvar != "")
|
| m = bg.init_zigzag(save_subgraph)
|
| if (S.verbose > 0) printf("{txt} - mobility groups between FE intercepts #%f and #%f: {res}%f\n", g, h, m)
|
| if (save_subgraph) {
|
| if (S.verbose > 0) printf("{txt} - Saving identifier for the first mobility group: {res}%s\n", groupvar)
|
| st_store(S.sample, st_addvar("long", groupvar), bg.subgraph_id)
|
| grouplabel = sprintf("Mobility group between %s and %s", invtokens(S.factors[g].ivars, "#"), invtokens(S.factors[h].ivars, "#"))
|
| st_varlabel(groupvar, grouplabel)
|
| }
|
| S.doflist_M[j] = max(( S.doflist_M[j] , m ))
|
| if (pair_count==1) S.doflist_M_is_exact[j] = 1
|
| if (pair_count & anyof(dofadjustments, "firstpair")) break
|
| }
|
| if (pair_count & anyof(dofadjustments, "firstpair")) break
|
| }
|
| }
|
|
|
|
|
| `Void' dof_update_cvars(`FixedEffects' S)
|
| {
|
| `Integer' i, j, g, k
|
| `Boolean' has_int
|
| `Matrix' tmp, sorted_x
|
| `RowVector' zeros, results
|
|
|
| for (i=g=1; g<=S.G; g++) {
|
|
|
|
|
|
|
| has_int = S.factors[g].has_intercept
|
| if (has_int) i++
|
| k = S.factors[g].num_slopes
|
| if (!k) continue
|
| if (S.factors[g].is_individual_fe) continue
|
| assert(k == cols(S.factors[g].unsorted_x))
|
| results = J(1, k, 0)
|
|
|
| zeros = J(1, k, 1e-6)
|
|
|
| sorted_x = S.factors[g].sort(S.factors[g].unsorted_x)
|
|
|
|
|
|
|
|
|
| for (j = 1; j <= S.factors[g].num_levels; j++) {
|
| tmp = colminmax(panelsubmatrix(sorted_x, j, S.factors[g].info))
|
| if (has_int) {
|
| results = results + ((tmp[2, .] - tmp[1, .]) :<= zeros)
|
| }
|
| else {
|
| results = results + (colsum(abs(tmp)) :<= zeros)
|
| }
|
| }
|
|
|
| if (sum(results)) {
|
| if (has_int & (S.verbose > 0)) printf("{txt} - the slopes in the FE #%f are constant for {res}%f{txt} levels, which don't reduce DoF\n", g, sum(results))
|
| if (!has_int & (S.verbose > 0)) printf("{txt} - the slopes in the FE #%f are zero for {res}%f{txt} levels, which don't reduce DoF\n", g, sum(results))
|
| S.doflist_M[i..i+S.factors[g].num_slopes-1] = results
|
| }
|
| i = i + S.factors[g].num_slopes
|
| }
|
| }
|
|
|
| `Void' dof_update_individual_fe(`FixedEffects' S,
|
| `Integer' i,
|
| `Integer' g)
|
| {
|
| assert(S.doflist_M[i] == 0)
|
|
|
|
|
|
|
|
|
| }
|
|
|
| end
|
|
|
| mata:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Solution' lsmr(`FixedEffects' A,
|
| `Matrix' b,
|
| `Integer' miniter,
|
| | `Integer' maxiter,
|
| `Real' atol,
|
| `Real' btol,
|
| `Real' conlim,
|
| `Integer' verbose)
|
| {
|
|
|
|
|
| `Integer' m, n, iter
|
| `Vector' x
|
| `Solution' sol
|
|
|
|
|
| `Real' α, β, ρ, θ, ζ, α_bar, ρ_bar, θ_bar, ζ_bar
|
| `Real' last_ρ, last_ρ_bar
|
| `Vector' u, v, h, h_bar
|
| `Matrix' P, P_bar, P_tilde
|
|
|
|
|
| `Real' β_hat, β_umlaut, ρ_dot, θ_tilde, β_dot, β_tilde, τ_tilde, τ_dot, ρ_tilde
|
| `Real' last_ζ, last_θ_tilde
|
| `Real' norm_A
|
| `Real' norm_r
|
| `Real' norm_At_r
|
| `Real' norm_x
|
| `Real' cond_A
|
| `Real' norm_b
|
| `Real' max_ρ_bar, min_ρ_bar, norm_A2
|
| `Real' ρ_temp
|
| `Real' rel_res, rtol, rel_neq
|
|
|
| `String' msg
|
|
|
| assert(A.weights != .)
|
|
|
|
|
| m = A.num_rows()
|
| n = A.num_cols()
|
| sol = Solution()
|
|
|
|
|
| if (miniter == .) miniter = 0
|
| if (maxiter == .) maxiter = 1e5
|
| if (atol == .) atol = 1e-6
|
| if (btol == .) btol = 1e-6
|
| if (conlim == .) conlim = 1e8
|
| if (verbose == .) verbose = 0
|
|
|
|
|
| if (atol < epsilon(1)) atol = epsilon(1)
|
| if (btol < epsilon(1)) btol = epsilon(1)
|
| if (conlim == 0 | conlim > 1 / epsilon(1)) conlim = 1 / epsilon(1)
|
|
|
|
|
| assert_msg(m == rows(b), "Matrix A and vector b not conformable")
|
| assert_msg(inrange(miniter, 0, 1e10), "miniter outside valid range: [0, 1e+10]")
|
| assert_msg(inrange(maxiter, 1, 1e10), "maxiter outside valid range: [1, 1e+10]")
|
| assert_msg(miniter <= maxiter, "miniter should be below maxiter")
|
| assert_msg(inrange(atol, epsilon(1), 1e-1), "atol outside valid range: [1e-16, 0.1]")
|
| assert_msg(inrange(btol, epsilon(1), 1e-1), "btol outside valid range: [1e-16, 0.1]")
|
| assert_msg(inrange(conlim, 1, 1e+16), "conlim outside valid range: [1, 1e+16]")
|
| assert_in(verbose, (-1, 0, 1, 2, 3, 4), "-verbose- must be an integer between -1 and 4")
|
|
|
| if (verbose>1) logging_lsmr("start")
|
|
|
|
|
| u = normalize(b, A.weights, β=., "Failed βu = b")
|
|
|
| v = normalize(A.mult_transpose(u), 1, α=., "Failed αv = A'u")
|
|
|
|
|
|
|
| h = v
|
| h_bar = x = J(n, 1, 0)
|
| α_bar = α
|
| ζ_bar = α * β
|
| ρ = ρ_bar = 1
|
| P_bar = I(2)
|
|
|
|
|
| β_umlaut = β
|
| β_dot = τ_tilde = θ_tilde = ζ = 0
|
| ρ_dot = 1
|
| ρ_tilde = 1
|
| norm_b = vector_norm(b, A.weights, "Failed ||b||")
|
|
|
| if (norm_b == 0) {
|
| if (verbose>1) logging_lsmr("end")
|
| if (verbose>0) printf("{txt} - LSMR converged in 0 iterations (trivial solution: 'b' is zero)\n")
|
| sol.stop_code = 1
|
| sol.alphas = J(n, 1, 0)
|
| sol.data = b
|
| sol.iteration_count = 0
|
| return(sol)
|
| }
|
|
|
|
|
| norm_A = cond_A = norm_x = -1
|
| norm_A2 = α ^ 2
|
| max_ρ_bar = 0
|
| min_ρ_bar= 1e100
|
|
|
|
|
| for (iter=1; iter<=maxiter; iter++) {
|
|
|
|
|
| last_ρ = ρ
|
| last_ρ_bar = ρ_bar
|
|
|
|
|
| last_ζ = ζ
|
| last_θ_tilde = θ_tilde
|
|
|
|
|
|
|
|
|
| assert_msg(!hasmissing(v), sprintf("iter !%g", iter))
|
| u = normalize(A.mult(v) - α * u, A.weights, β=., "Failed βu = Av - αu")
|
|
|
|
|
| v = normalize(A.mult_transpose(u) - β * v, 1, α=., "Failed αv = A'u - βv")
|
|
|
|
|
|
|
|
|
| P = givens_rotation(α_bar, β, ρ=.)
|
|
|
|
|
| assign(P * (α \ 0), α_bar=., θ=.)
|
|
|
|
|
| assign(P_bar * (ρ \ 0), ρ_temp=., θ_bar=.)
|
|
|
|
|
| P_bar = givens_rotation(ρ_temp, θ, ρ_bar=.)
|
|
|
|
|
| assign(P_bar * (0 \ ζ_bar), ζ_bar=., ζ=.)
|
|
|
|
|
| h_bar = h - (θ_bar * ρ) / (last_ρ * last_ρ_bar) * h_bar
|
| x = x + ζ / (ρ * ρ_bar) * h_bar
|
| h = v - (θ / ρ) * h
|
|
|
|
|
|
|
|
|
| assign(P * (0 \ β_umlaut), β_umlaut=., β_hat=.)
|
|
|
| if (iter > 1) {
|
|
|
| P_tilde = givens_rotation(ρ_dot, θ_bar, ρ_tilde=.)
|
|
|
|
|
| assign(P_tilde * (ρ_bar \ 0), ρ_dot=., θ_tilde=.)
|
|
|
|
|
| assign(P_tilde * (β_hat \ β_dot), β_dot=., β_tilde=.)
|
| }
|
|
|
|
|
| τ_tilde = (last_ζ - last_θ_tilde * τ_tilde) / ρ_tilde
|
| τ_dot = (ζ - θ_tilde * τ_tilde) / ρ_dot
|
|
|
|
|
|
|
|
|
| norm_r = hypot(β_dot - τ_dot, β_umlaut)
|
|
|
|
|
| norm_A2 = norm_A2 + β ^ 2
|
| norm_A = sqrt(norm_A2)
|
| norm_A2 = norm_A2 + α ^ 2
|
|
|
|
|
| max_ρ_bar = max((max_ρ_bar, last_ρ_bar))
|
| if (iter > 1) min_ρ_bar = min((min_ρ_bar, last_ρ_bar))
|
| cond_A = max((max_ρ_bar, ρ_temp)) / min((min_ρ_bar, ρ_temp))
|
|
|
|
|
| norm_At_r = abs(ζ_bar)
|
|
|
|
|
| norm_x = vector_norm(x, 1, "Failed ||x||")
|
|
|
|
|
| rel_res = norm_r / norm_b
|
| rtol = btol + atol * norm_A * norm_x / norm_b
|
| rel_neq = norm_At_r / norm_A / norm_x
|
| if (verbose>1) logging_lsmr("iter", iter, rel_res, rtol, rel_neq, atol, 1/cond_A, 1/conlim, norm_A)
|
|
|
| if (iter < miniter) {
|
| continue
|
| }
|
|
|
| if (rel_res == . | rel_neq == .) {
|
| msg = sprintf("{err}@ LSMR stopped; missing values in rel_res or rel_neq\n")
|
| sol.stop_code = 12
|
| break
|
| }
|
| else if (rel_res <= rtol) {
|
| msg = sprintf("{txt} - LSMR converged in %g iterations (criterion: relative residual)\n", iter)
|
| sol.stop_code = 2
|
| break
|
| }
|
| else if (rel_neq <= atol) {
|
| msg = sprintf("{txt} - LSMR converged in %g iterations (criterion: normal equation)\n", iter)
|
| sol.stop_code = 3
|
| break
|
| }
|
| else if (cond_A >= conlim) {
|
| msg = sprintf("{err}@ LSMR stopped; A is ill-conditioned\n")
|
| sol.stop_code = 11
|
| break
|
| }
|
| }
|
|
|
| if (sol.stop_code == 0) {
|
| assert_msg(iter == maxiter+1)
|
| iter = maxiter
|
| msg = sprintf("{err}@ LSMR stopped; maximum number of iterations reached\n")
|
| sol.stop_code = 13
|
| }
|
|
|
| if (verbose>1) logging_lsmr("end")
|
| if (verbose>0 | (verbose>-1 & sol.stop_code>=10)) printf(msg)
|
|
|
| sol.data = b - A.mult(x)
|
| swap(sol.alphas, x)
|
| sol.iteration_count = iter
|
| return(sol)
|
| }
|
|
|
|
|
| `Void' logging_lsmr(`String' step, | `Integer' iter, `Real' rel_res, `Real' rtol, `Real' rel_neq, `Real' atol, `Real' invcond, `Real' ctol, `Real' norm_A)
|
| {
|
| `Integer' col
|
| `String' table_row, color1, color2, color3
|
|
|
|
|
| if (step == "start") {
|
| printf("\n{txt}## Solving linear system via LSMR:\n")
|
| printf(" {txt}{c TLC}{hline 5}{c TT}{hline 22}{c TT}{hline 22}{c TT}{hline 22}{c TT}{hline 10}{c TRC}\n")
|
| printf(" {txt}{c |}{space 5}{c |} Rule 1 {c |} Rule 2 {c |} Rule 3 {c |}{space 10}{c |}\n")
|
| printf(" {txt}{c |} i {c LT}{hline 12}{c TT}{hline 9}{c +}{hline 12}{c TT}{hline 9}{c +}{hline 12}{c TT}{hline 9}{c RT} ||A|| {c |}\n")
|
| printf(" {txt}{c |}{space 5}{c |} rel.res. {c |} rtol {c |} rel.neq. {c |} atol {c |} 1/cond(A) {c |} ctol {c |}{space 10}{c |}\n")
|
| printf(" {txt}{c LT}{hline 5}{c +}{hline 12}{c +}{hline 9}{c +}{hline 12}{c +}{hline 9}{c +}{hline 12}{c +}{hline 9}{c +}{hline 10}{c RT}\n")
|
| }
|
| else if (step == "end") {
|
| printf(" {txt}{c BLC}{hline 5}{c BT}{hline 12}{c BT}{hline 9}{c BT}{hline 12}{c BT}{hline 9}{c BT}{hline 12}{c BT}{hline 9}{c BT}{hline 10}{c BRC}\n")
|
| }
|
| else {
|
| col = 0
|
| color1 = (rel_res <= rtol) ? "{res}" : "{txt}"
|
| color2 = (rel_neq <= atol) ? "{res}" : "{txt}"
|
| color3 = (invcond <= ctol) ? "{res}" : "{txt}"
|
| table_row = sprintf("{txt} {c |} %3.0f {col %3.0f}{c |}", iter, col = col + 8)
|
|
|
| table_row = table_row + sprintf("%s%11.0e{txt}{col %3.0f}{c |}", color1, rel_res, col = col + 13)
|
| table_row = table_row + sprintf("%6.0e{col %3.0f}{c |}", rtol, col = col + 10)
|
|
|
|
|
| table_row = table_row + sprintf("%s%11.0e{txt}{col %3.0f}{c |}", color2, rel_neq, col = col + 13)
|
| table_row = table_row + sprintf("%6.0e{col %3.0f}{c |}", atol, col = col + 10)
|
|
|
|
|
| table_row = table_row + sprintf("%s%11.0e{txt}{col %3.0f}{c |}", color3, invcond, col = col + 13)
|
| table_row = table_row + sprintf("%6.0e{col %3.0f}{c |}", ctol, col = col + 10)
|
|
|
|
|
| table_row = table_row + sprintf("%9.1f {col %3.0f}{c |}", norm_A, col = col + 11)
|
| printf(table_row + "\n")
|
| }
|
|
|
| displayflush()
|
| }
|
|
|
| end
|
| mata:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Solution' lsqr(`FixedEffects' A,
|
| `Matrix' b,
|
| `Integer' miniter,
|
| | `Integer' maxiter,
|
| `Real' atol,
|
| `Real' btol,
|
| `Real' ctol,
|
| `Integer' verbose)
|
| {
|
|
|
|
|
|
|
| `Integer' m, n, iter
|
| `Vector' x
|
| `Solution' sol
|
|
|
|
|
| `Real' α, β, ρ, ρ1, θ, ρ_bar, φ, φ_bar, φ_bar1, ζ, ζ_bar
|
| `Real' rhs, δ, γ_bar, γ, cs2, sn2, extra_var
|
| `Vector' u, v, w
|
| `Matrix' P, P_bar
|
|
|
|
|
| `Real' norm_r, norm_A, norm_b, norm_x, norm_r1, norm_r2, norm_dd, norm_xx, cond_A
|
| `Real' norm_Ar, res1, res2, r1sq, test1, test2
|
| `Real' ρ_temp, w_ρ, cs1, sn1, ψ, τ, t1, t2, test3, rtol
|
|
|
| `String' msg
|
|
|
| assert(A.weights != .)
|
|
|
|
|
| m = A.num_rows()
|
| n = A.num_cols()
|
| sol = Solution()
|
|
|
|
|
| if (miniter == .) miniter = 0
|
| if (maxiter == .) maxiter = 1e5
|
| if (atol == .) atol = 1e-6
|
| if (btol == .) btol = 1e-6
|
| if (ctol == .) ctol = 1e-6
|
| if (verbose == .) verbose = 0
|
|
|
|
|
| if (atol < epsilon(1)) atol = epsilon(1)
|
| if (btol < epsilon(1)) btol = epsilon(1)
|
| if (ctol < epsilon(1)) ctol = epsilon(1)
|
|
|
|
|
| assert_msg(m == rows(b), "Matrix A and vector b not conformable")
|
| assert_msg(inrange(miniter, 0, 1e10), "miniter outside valid range: [0, 1e+10]")
|
| assert_msg(inrange(maxiter, 1, 1e10), "maxiter outside valid range: [1, 1e+10]")
|
| assert_msg(miniter <= maxiter, "miniter should be below maxiter")
|
| assert_msg(inrange(atol, 1e-16, 1e-1), "atol outside valid range: [1e-16, 0.1]")
|
| assert_msg(inrange(btol, 1e-16, 1e-1), "btol outside valid range: [1e-16, 0.1]")
|
| assert_msg(inrange(ctol, 1e-16, 1e-1), "ctol outside valid range: [1e-16, 0.1]")
|
| assert_in(verbose, (-1, 0, 1, 2, 3, 4), "-verbose- must be an integer between -1 and 4")
|
| if (verbose>1) logging_lsqr("start")
|
|
|
|
|
|
|
| u = normalize(b, A.weights, β=., "Failed βu = b")
|
| v = normalize(A.mult_transpose(u), A.weights, α=., "Failed αv = A'u")
|
|
|
|
|
| norm_b = vector_norm(b, A.weights, "Failed ||b||")
|
| if (norm_b == 0) {
|
| if (verbose>1) logging_lsqr("end")
|
| if (verbose>0) printf("{txt} - LSQR converged in 0 iterations (trivial solution: 'b' is zero)\n")
|
| sol.stop_code = 1
|
| sol.alphas = J(n, 1, 0)
|
| sol.data = b
|
| sol.iteration_count = 0
|
| return(sol)
|
| }
|
|
|
|
|
| x = J(n, 1, 0)
|
| w = v
|
| norm_A = cond_A = norm_dd = res2 = norm_x = norm_xx = sn2 = ζ = 0
|
| cs2 = -1
|
| norm_Ar = α * β
|
| ρ_bar = α
|
| φ_bar = norm_b = norm_r = norm_r1 = norm_r2 = β
|
|
|
|
|
| for (iter=1; iter<=maxiter; iter++) {
|
|
|
|
|
|
|
|
|
| u = normalize(A.mult(v) - α * u, A.weights, β=., "Failed βu = Av - αu")
|
| v = normalize(A.mult_transpose(u) - β * v, A.weights, α=., "Failed αv = A'u - βv")
|
|
|
|
|
|
|
|
|
| φ_bar1 = φ_bar
|
| ρ = hypot(ρ_bar, β)
|
|
|
|
|
| P = givens_rotation(ρ_bar, β, ρ=.)
|
|
|
|
|
|
|
| assign(P * (0 \ -α), θ = ., ρ_bar = .)
|
|
|
| assign(P * (φ_bar1 \ 0), φ = ., φ_bar = .)
|
| assign(P * (φ \ 0) , τ = ., extra_var = .)
|
|
|
|
|
|
|
| t1 = φ / ρ
|
| t2 =-θ / ρ
|
|
|
|
|
| x = t1 * w + x
|
| w = t2 * w + v
|
|
|
|
|
|
|
|
|
|
|
|
|
| γ_bar = -ρ
|
| P_bar = givens_rotation(γ_bar, θ, γ = .)
|
|
|
|
|
| assign(P_bar * (0 \ -ρ), δ = ., γ_bar = .)
|
|
|
|
|
| rhs = φ - (δ * ζ)
|
| ζ_bar = rhs / γ_bar
|
|
|
| norm_x = sqrt(norm_xx + (ζ_bar^2))
|
| γ = hypot(γ_bar, θ)
|
|
|
| ζ = rhs / γ
|
| norm_xx = norm_xx + ζ^2
|
|
|
|
|
| w_ρ = w * (1/ρ)
|
| norm_dd = norm_dd + norm(w_ρ)
|
|
|
| norm_A = sqrt(norm_A^2 + α^2 + β^2)
|
|
|
| cond_A = norm_A * sqrt(norm_dd)
|
| res1 = φ_bar ^ 2
|
| norm_Ar = α * abs(τ)
|
| norm_r = sqrt(res1)
|
|
|
|
|
| r1sq = norm_r^2
|
| norm_r1 = sqrt(r1sq)
|
| norm_r2 = norm_r
|
|
|
|
|
|
|
| test1 = norm_r / norm_b
|
| test2 = norm_Ar / (norm_A * norm_r)
|
| test3 = 1 / cond_A
|
| t1 = test1 / (1 + norm_A*norm_x/norm_b)
|
| rtol = btol + atol*norm_A*norm_x/norm_b
|
| if (verbose>1) logging_lsqr("iter", iter, test1, rtol, test2, atol, test3, ctol, norm_A)
|
|
|
| if (iter < miniter) {
|
| continue
|
| }
|
|
|
| if (test1 == . | test2 == .) {
|
| msg = sprintf("{err}@ LSQR stopped; missing values in rel_res or rel_neq\n")
|
| sol.stop_code = 12
|
| break
|
| }
|
| else if (test1 <= rtol) {
|
| msg = sprintf("{txt} - LSQR converged in %g iterations (criterion: relative residual)\n", iter)
|
| sol.stop_code = 2
|
| break
|
| }
|
| else if (test2 <= atol) {
|
| msg = sprintf("{txt} - LSQR converged in %g iterations (criterion: normal equation)\n", iter)
|
| sol.stop_code = 3
|
| break
|
| }
|
| else if (test3 <= ctol) {
|
| msg = sprintf("{err}@ LSQR stopped; A is ill-conditioned\n")
|
| sol.stop_code = 11
|
| break
|
| }
|
| }
|
| if (sol.stop_code == 0) {
|
| assert_msg(iter == maxiter+1)
|
| iter = maxiter
|
| msg = sprintf("{err}@ LSQR stopped; maximum number of iterations reached\n")
|
| sol.stop_code = 13
|
| }
|
| if (verbose>1) logging_lsqr("end")
|
| if (verbose>0 | (verbose>-1 & sol.stop_code>=10)) printf(msg)
|
|
|
| sol.data = b - A.mult(x)
|
| swap(sol.alphas, x)
|
| sol.iteration_count = iter
|
| return(sol)
|
| }
|
|
|
|
|
| `Void' logging_lsqr(`String' step, | `Integer' iter, `Real' test1, `Real' rtol, `Real' test2, `Real' atol, `Real' test3, `Real' ctol, `Real' norm_A){
|
| `Integer' col
|
| `String' table_row, color1, color2, color3
|
|
|
| if (step == "start") {
|
| printf("\n{txt}## Solving linear system via LSQR:\n")
|
| printf(" {txt}{c TLC}{hline 5}{c TT}{hline 22}{c TT}{hline 22}{c TT}{hline 22}{c TT}{hline 10}{c TRC}\n")
|
| printf(" {txt}{c |}{space 5}{c |} Rule 1 {c |} Rule 2 {c |} Rule 3 {c |}{space 10}{c |}\n")
|
| printf(" {txt}{c |} i {c LT}{hline 12}{c TT}{hline 9}{c +}{hline 12}{c TT}{hline 9}{c +}{hline 12}{c TT}{hline 9}{c RT} ||A|| {c |}\n")
|
| printf(" {txt}{c |}{space 5}{c |} rel.res. {c |} rtol {c |} rel.neq. {c |} atol {c |} 1/cond(A) {c |} ctol {c |}{space 10}{c |}\n")
|
| printf(" {txt}{c LT}{hline 5}{c +}{hline 12}{c +}{hline 9}{c +}{hline 12}{c +}{hline 9}{c +}{hline 12}{c +}{hline 9}{c +}{hline 10}{c RT}\n")
|
| }
|
| else if (step == "end") {
|
| printf(" {txt}{c BLC}{hline 5}{c BT}{hline 12}{c BT}{hline 9}{c BT}{hline 12}{c BT}{hline 9}{c BT}{hline 12}{c BT}{hline 9}{c BT}{hline 10}{c BRC}\n")
|
| }
|
| else {
|
| col = 0
|
| color1 = (test1 <= rtol) ? "{res}" : "{txt}"
|
| color2 = (test2 <= atol) ? "{res}" : "{txt}"
|
| color3 = (test3 <= ctol) ? "{res}" : "{txt}"
|
| table_row = sprintf("{txt} {c |} %3.0f {col %3.0f}{c |}", iter, col = col + 8)
|
|
|
| table_row = table_row + sprintf("%s%11.0e{txt}{col %3.0f}{c |}", color1, test1, col = col + 13)
|
| table_row = table_row + sprintf("%6.0e{col %3.0f}{c |}", rtol, col = col + 10)
|
|
|
| table_row = table_row + sprintf("%s%11.0e{txt}{col %3.0f}{c |}", color2, test2, col = col + 13)
|
| table_row = table_row + sprintf("%6.0e{col %3.0f}{c |}", atol, col = col + 10)
|
|
|
| table_row = table_row + sprintf("%s%11.0e{txt}{col %3.0f}{c |}", color3, test3, col = col + 13)
|
| table_row = table_row + sprintf("%6.0e{col %3.0f}{c |}", ctol, col = col + 10)
|
|
|
| table_row = table_row + sprintf("%9.1f {col %3.0f}{c |}", norm_A, col = col + 11)
|
| printf(table_row + "\n")
|
| }
|
| displayflush()
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| `Void' map_solver(`FixedEffects' S,
|
| `Matrix' data,
|
| `Integer' poolsize,
|
| `FunctionP' fun_accel,
|
| `FunctionP' fun_transform)
|
| {
|
| `Integer' i
|
|
|
| S.solution.converged = 0
|
|
|
|
|
| if (S.G==1 & S.factors[1].method=="none" & !S.factors[1].num_slopes & !(S.storing_alphas & S.factors[1].save_fe)) {
|
| assert(S.factors[1].num_levels == 1)
|
|
|
| data = data :- mean(data, S.factors[1].weights)
|
| S.solution.converged = 1
|
| S.solution.iteration_count = 1
|
| }
|
| else {
|
| if (poolsize >= cols(data)) {
|
| data = (*fun_accel)(S, data, fun_transform)
|
| }
|
| else {
|
| assert(S.solution.converged == 0)
|
| for (i=1; i<=S.solution.K; i++) {
|
| S.solution.converged = 0
|
| data[., i] = (*fun_accel)(S, data[., i], fun_transform)
|
| }
|
| }
|
| }
|
|
|
| swap(S.solution.data, data)
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| `Variables' function accelerate_test(`FixedEffects' S, `Variables' y, `FunctionP' T) {
|
| `Integer' iter, g
|
| `Variables' resid
|
| `FE_Factor' f
|
| pragma unset resid
|
|
|
| assert_msg(S.solution.converged == 0, "solution had already converged")
|
|
|
| for (iter=1; iter<=S.maxiter; iter++) {
|
| for (g=1; g<=S.G; g++) {
|
| f = S.factors[g]
|
| if (g==1) resid = y - f.panelmean(y, 1)[f.levels, .]
|
| else resid = resid - f.panelmean(resid, 1)[f.levels, .]
|
| }
|
| if (check_convergence(S, iter, resid, y)) break
|
| y = resid
|
| }
|
| return(resid)
|
| }
|
|
|
|
|
|
|
| `Variables' function accelerate_none(`FixedEffects' S, `Variables' y, `FunctionP' T) {
|
| `Integer' iter
|
| `Variables' resid
|
| pragma unset resid
|
| assert_msg(S.solution.converged == 0, "solution had already converged")
|
|
|
| for (iter=1; iter<=S.maxiter; iter++) {
|
| (*T)(S, y, resid)
|
| if (check_convergence(S, iter, resid, y)) break
|
| y = resid
|
| }
|
| return(resid)
|
| }
|
|
|
|
|
|
|
| `Variables' function accelerate_hybrid(`FixedEffects' S, `Variables' y, `FunctionP' T) {
|
| `Integer' iter, accel_start
|
| `Variables' resid
|
| pragma unset resid
|
|
|
| accel_start = 6
|
| assert_msg(S.solution.converged == 0, "solution had already converged")
|
|
|
| for (iter=1; iter<=accel_start; iter++) {
|
| (*T)(S, y, resid)
|
| if (check_convergence(S, iter, resid, y)) break
|
| y = resid
|
| }
|
|
|
| T = &transform_sym_kaczmarz()
|
|
|
| return(accelerate_cg(S, y, T))
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Variables' function accelerate_cg(`FixedEffects' S, `Variables' y, `FunctionP' T) {
|
|
|
| `Integer' iter, d, Q
|
| `Variables' r, u, v
|
| `RowVector' alpha, beta, ssr, ssr_old, improvement_potential
|
| `Matrix' recent_ssr
|
| pragma unset r
|
| pragma unset v
|
|
|
| assert_msg(S.solution.converged == 0, "solution had already converged")
|
| Q = cols(y)
|
|
|
| d = 1
|
|
|
|
|
|
|
| improvement_potential = weighted_quadcolsum(S, y, y)
|
| recent_ssr = J(d, Q, .)
|
|
|
| (*T)(S, y, r, 1)
|
| ssr = weighted_quadcolsum(S, r, r)
|
| u = r
|
|
|
| for (iter=1; iter<=S.maxiter; iter++) {
|
| (*T)(S, u, v, 1)
|
| alpha = safe_divide( ssr , weighted_quadcolsum(S, u, v) )
|
| recent_ssr[1 + mod(iter-1, d), .] = alpha :* ssr
|
| improvement_potential = improvement_potential - alpha :* ssr
|
| y = y - alpha :* u
|
|
|
| r = r - alpha :* v
|
| ssr_old = ssr
|
| if (S.verbose>=5) r
|
| ssr = weighted_quadcolsum(S, r, r)
|
| beta = safe_divide( ssr , ssr_old)
|
| u = r + beta :* u
|
|
|
| if ( check_convergence(S, iter, colsum(recent_ssr), improvement_potential, "hestenes") ) {
|
| break
|
| }
|
| }
|
| return(y)
|
| }
|
|
|
|
|
|
|
| `Variables' function accelerate_sd(`FixedEffects' S, `Variables' y, `FunctionP' T) {
|
| `Integer' iter, g
|
| `Variables' proj
|
| `RowVector' t
|
| pragma unset proj
|
|
|
| assert_msg(S.solution.converged == 0, "solution had already converged")
|
|
|
| for (iter=1; iter<=S.maxiter; iter++) {
|
| (*T)(S, y, proj, 1)
|
| if (check_convergence(S, iter, y-proj, y)) break
|
| t = safe_divide( weighted_quadcolsum(S, y, proj) , weighted_quadcolsum(S, proj, proj) )
|
| if (uniform(1,1)<0.1) t = 1
|
|
|
| y = y - t :* proj
|
|
|
|
|
| if (S.storing_alphas) {
|
| for (g=1; g<=S.G; g++) {
|
| if (S.factors[g].save_fe) {
|
| S.factors[g].alphas = S.factors[g].alphas + t :* S.factors[g].tmp_alphas
|
| }
|
| }
|
| }
|
| }
|
|
|
|
|
| if (S.storing_alphas) {
|
| for (g=1; g<=S.G; g++) {
|
| if (S.factors[g].save_fe) {
|
| S.factors[g].tmp_alphas = J(0, 0, .)
|
| }
|
| }
|
| }
|
|
|
| return(y-proj)
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| `Variables' function accelerate_aitken(`FixedEffects' S, `Variables' y, `FunctionP' T) {
|
| `Integer' iter
|
| `Variables' resid, y_old, delta_sq
|
| `Boolean' accelerate
|
| `RowVector' t
|
| pragma unset resid
|
|
|
| assert_msg(S.solution.converged == 0, "solution had already converged")
|
| y_old = J(rows(y), cols(y), .)
|
|
|
| for (iter=1; iter<=S.maxiter; iter++) {
|
|
|
| (*T)(S, y, resid)
|
| accelerate = iter>=S.accel_start & !mod(iter,S.accel_freq)
|
|
|
|
|
| if (accelerate) {
|
| delta_sq = resid - 2 * y + y_old
|
|
|
| t = safe_divide( weighted_quadcolsum(S, (resid - y) , delta_sq) , weighted_quadcolsum(S, delta_sq , delta_sq) )
|
| resid = resid - t :* (resid - y)
|
| }
|
|
|
|
|
|
|
|
|
|
|
| if (check_convergence(S, iter, resid, y)) break
|
| y_old = y
|
| y = resid
|
| }
|
| return(resid)
|
| }
|
|
|
|
|
|
|
| `Boolean' check_convergence(`FixedEffects' S, `Integer' iter, `Variables' y_new, `Variables' y_old,| `String' method) {
|
| `Boolean' is_last_iter
|
| `Real' update_error
|
| `Real' eps_threshold
|
|
|
|
|
| if (args()<5) method = "vectors"
|
|
|
| if (S.G==1 & !S.storing_alphas) {
|
|
|
| update_error = 0
|
| }
|
| else if (method=="vectors") {
|
| update_error = max(mean(reldif(y_new, y_old), S.weights))
|
| }
|
| else if (method=="hestenes") {
|
|
|
|
|
|
|
| eps_threshold = 1e-15
|
| if (S.verbose > 0 & all(y_new :< eps_threshold)) {
|
| printf("{txt} note: eps. is very close to zero (%g), so hestenes assumed convergence to avoid numerical precision errors\n", min(y_new))
|
| }
|
| update_error = safe_divide(edittozerotol(y_new, eps_threshold ),
|
| editmissing(y_old, epsilon(1)),
|
| epsilon(1) )
|
| update_error = sqrt(max(update_error))
|
| }
|
| else {
|
| exit(error(100))
|
| }
|
|
|
| assert_msg(!missing(update_error), "update error is missing")
|
|
|
| S.solution.converged = S.solution.converged + (update_error <= S.tolerance)
|
| is_last_iter = iter==S.maxiter
|
|
|
| if (S.solution.converged >= S.min_ok) {
|
| S.solution.iteration_count = max((iter, S.solution.iteration_count))
|
| S.solution.accuracy = max((S.solution.accuracy, update_error))
|
| if (S.verbose==1) printf("{txt} converged in %g iterations (last error =%3.1e)\n", iter, update_error)
|
| if (S.verbose>1) printf("\n{txt} - Converged in %g iterations (last error =%3.1e)\n", iter, update_error)
|
| }
|
| else if (is_last_iter & S.abort) {
|
| printf("\n{err}convergence not achieved in %g iterations (last error=%e); try increasing maxiter() or decreasing tol().\n", S.maxiter, update_error)
|
| exit(430)
|
| }
|
| else {
|
| if ((S.verbose>=2 & S.verbose<=3 & mod(iter,1)==0) | (S.verbose==1 & mod(iter,1)==0)) {
|
| printf("{res}.{txt}")
|
| displayflush()
|
| }
|
| if ( (S.verbose>=2 & S.verbose<=3 & mod(iter,100)==0) | (S.verbose==1 & mod(iter,100)==0) ) {
|
| printf("{txt}%9.1f\n ", update_error/S.tolerance)
|
| }
|
|
|
| if (S.verbose==4 & method!="hestenes") printf("{txt} iter={res}%4.0f{txt}\tupdate_error={res}%-9.6e\n", iter, update_error)
|
| if (S.verbose==4 & method=="hestenes") printf("{txt} iter={res}%4.0f{txt}\tupdate_error={res}%-9.6e {txt}norm(ssr)={res}%g\n", iter, update_error, norm(y_new))
|
|
|
| if (S.verbose>=5) {
|
| printf("\n{txt} iter={res}%4.0f{txt}\tupdate_error={res}%-9.6e{txt}\tmethod={res}%s\n", iter, update_error, method)
|
| "old:"
|
| y_old
|
| "new:"
|
| y_new
|
| }
|
| }
|
| return(S.solution.converged >= S.min_ok)
|
| }
|
|
|
|
|
|
|
| `Matrix' weighted_quadcolsum(`FixedEffects' S, `Matrix' x, `Matrix' y) {
|
|
|
|
|
|
|
| if (S.has_weights) {
|
| if (cols(x) < 14) {
|
| return(quadcross(x :* y, S.weights)')
|
| }
|
| else {
|
| return(diagonal(quadcross(x, S.weights, y))')
|
| }
|
| }
|
| else {
|
| if (cols(x) < 25) {
|
| return(diagonal(quadcross(x, y))')
|
| }
|
| else {
|
| return(colsum(x :* y))
|
| }
|
| }
|
| }
|
|
|
|
|
|
|
|
|
| `Real' reghdfe_rre_benchmark(`Vector' resid, `Vector' true_resid, `Real' norm_y) {
|
| `Real' ans
|
| ans = norm(resid - true_resid) / norm_y
|
| return(ans)
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
| mata:
|
|
|
| `Void' function transform_cimmino(`FixedEffects' S, `Variables' y, `Variables' ans,| `Boolean' get_proj) {
|
| `Integer' g
|
| if (args()<4 | get_proj==.) get_proj = 0
|
| assert_boolean(get_proj)
|
|
|
| if (get_proj & S.G == 2) {
|
| ans = (S.project_one_fe(y, 1) + S.project_one_fe(y, 2)) / S.G
|
| }
|
| else if (get_proj & S.G == 3) {
|
| ans = (S.project_one_fe(y, 1) + S.project_one_fe(y, 2) + S.project_one_fe(y, 3)) / S.G
|
| }
|
| else {
|
|
|
| ans = S.project_one_fe(y, 1)
|
| for (g=2; g<=S.G; g++) {
|
| ans = ans + S.project_one_fe(y, g)
|
| }
|
| ans = get_proj ? ans / S.G : y - ans / S.G
|
| }
|
| }
|
|
|
|
|
|
|
| `Void' function transform_kaczmarz(`FixedEffects' S, `Variables' y, `Variables' ans,| `Boolean' get_proj) {
|
| `Integer' g
|
|
|
| if (args()<4 | get_proj==.) get_proj = 0
|
|
|
| ans = y - S.project_one_fe(y, 1)
|
| for (g=2; g<=S.G; g++) {
|
| ans = ans - S.project_one_fe(ans, g)
|
| }
|
| if (get_proj) ans = y - ans
|
| }
|
|
|
|
|
|
|
| `Void' function transform_rand_kaczmarz(`FixedEffects' S, `Variables' y, `Variables' ans,| `Boolean' get_proj) {
|
| `Integer' g
|
| `Vector' rand
|
| if (args()<4 | get_proj==.) get_proj = 0
|
| rand = sort( ( (1::S.G) , uniform(S.G,1) ) , 2 )[.,1]
|
| ans = y - S.project_one_fe(y, rand[1])
|
| for (g=2; g<=S.G; g++) {
|
| ans = ans - S.project_one_fe(ans, rand[g])
|
| }
|
| for (g=S.G-1; g>=1; g--) {
|
| ans = ans - S.project_one_fe(ans, rand[g])
|
| }
|
| if (get_proj) ans = y - ans
|
| }
|
|
|
|
|
|
|
| `Void' function transform_sym_kaczmarz(`FixedEffects' S, `Variables' y, `Variables' ans,| `Boolean' get_proj) {
|
| `Integer' g
|
| if (args()<4 | get_proj==.) get_proj = 0
|
| ans = y - S.project_one_fe(y, 1)
|
| for (g=2; g<=S.G; g++) {
|
| ans = ans - S.project_one_fe(ans, g)
|
| }
|
| for (g=S.G-1; g>=1; g--) {
|
| ans = ans - S.project_one_fe(ans, g)
|
| }
|
| if (get_proj) ans = y - ans
|
| }
|
|
|
| end
|
|
|
|
|
|
|
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| mata:
|
|
|
|
|
|
|
|
|
| `Void' cleanup_for_parallel(`FixedEffects' HDFE)
|
| {
|
| `Integer' g
|
|
|
| if (HDFE.verbose>0) printf("\n{txt}# Cleaning up the HDFE object so it can be saved/loaded from disk\n\n")
|
|
|
|
|
|
|
|
|
| for (g=1; g<=HDFE.G; g++) {
|
| HDFE.factors[g].cleanup_before_saving()
|
|
|
|
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
| }
|
|
|
|
|
|
|
|
|
|
|
| `Void' save_before_parallel(`String' parallel_dir,
|
| `FixedEffects' HDFE,
|
| `Matrix' data)
|
| {
|
| `Integer' verbose
|
| `Integer' parallel_poolsize, parallel_numproc
|
| `Integer' num_cols, left_index, right_index, remaining_cols, remaining_workers
|
| `String' fn
|
| `Integer' fh
|
|
|
| verbose = HDFE.verbose
|
| num_cols = cols(data)
|
| assert(HDFE.parallel_maxproc > 1 | HDFE.parallel_force == 1)
|
|
|
|
|
|
|
| parallel_poolsize = ceil(num_cols / HDFE.parallel_maxproc)
|
| if (verbose > 0) printf("\n{txt}## [Parallel] Loading and partialling %g variables using up to %g worker processes\n", num_cols, HDFE.parallel_maxproc)
|
| if (verbose > 0) printf("{txt} Each process will work in blocks of %g-%g variables\n", parallel_poolsize-1, parallel_poolsize)
|
| if (verbose > 0) printf("{txt} Temporary files will be saved in %s\n", parallel_dir)
|
|
|
|
|
| mkdir(parallel_dir, 1)
|
| fn = pathjoin(parallel_dir, "data0.tmp")
|
| fh = fopen(fn, "w")
|
| fputmatrix(fh, HDFE)
|
| fclose(fh)
|
| if (verbose > 0) printf("{txt} - HDFE object saved in %s\n", fn)
|
|
|
| HDFE.parallel_poolsizes = J(1, 0, .)
|
|
|
|
|
| for (left_index=parallel_numproc=1; left_index<=num_cols; parallel_numproc++) {
|
| remaining_cols = num_cols - left_index + 1
|
| remaining_workers = HDFE.parallel_maxproc - parallel_numproc + 1
|
| parallel_poolsize = ceil(remaining_cols / remaining_workers)
|
| right_index = min((left_index+parallel_poolsize-1, num_cols))
|
|
|
| fn = pathjoin(parallel_dir, sprintf("data%f.tmp", parallel_numproc))
|
| fh = fopen(fn, "w")
|
| fputmatrix(fh, data[., left_index..right_index])
|
| fclose(fh)
|
|
|
| if (verbose > 0) printf("{txt} - Data block #%f with %f cols saved in %s\n", parallel_numproc, right_index-left_index+1, fn)
|
| left_index = right_index + 1
|
| HDFE.parallel_numproc = parallel_numproc
|
| HDFE.parallel_poolsizes = HDFE.parallel_poolsizes , parallel_poolsize
|
| }
|
| if (verbose > 0) printf("\n")
|
| assert(HDFE.parallel_numproc <= HDFE.parallel_maxproc)
|
| }
|
|
|
|
|
|
|
|
|
|
|
| `Void' worker_partial_out(`String' hdfe_fn, `String' data_fn)
|
| {
|
| `FixedEffects' HDFE
|
| `Matrix' data
|
| `Integer' fh
|
|
|
| fh = fopen(hdfe_fn, "r")
|
| HDFE = fgetmatrix(fh, 1)
|
| fclose(fh)
|
|
|
| fh = fopen(data_fn, "r")
|
| data = fgetmatrix(fh, 1)
|
| fclose(fh)
|
|
|
| inner_worker_partial_out(HDFE, data)
|
|
|
| unlink(data_fn)
|
| fh = fopen(data_fn, "w")
|
| fputmatrix(fh, HDFE.solution.data)
|
| fclose(fh)
|
|
|
|
|
|
|
| }
|
|
|
|
|
| `Void' inner_worker_partial_out(`FixedEffects' HDFE,
|
| `Matrix' data)
|
| {
|
|
|
|
|
| `FunctionP' fun_transform
|
| `FunctionP' fun_accel
|
| `String' technique
|
| `String' transform
|
| `String' acceleration
|
| `Integer' verbose
|
| `Integer' poolsize
|
| `Solution' temp_sol
|
| `Integer' i
|
| `Boolean' solver_failed
|
|
|
| technique = HDFE.technique
|
| transform = HDFE.transform
|
| acceleration = HDFE.acceleration
|
| verbose = HDFE.verbose
|
| poolsize = HDFE.poolsize
|
|
|
| if (technique == "map") {
|
|
|
| if (transform=="cimmino") fun_transform = &transform_cimmino()
|
| if (transform=="kaczmarz") fun_transform = &transform_kaczmarz()
|
| if (transform=="symmetric_kaczmarz") fun_transform = &transform_sym_kaczmarz()
|
| if (transform=="random_kaczmarz") fun_transform = &transform_rand_kaczmarz()
|
|
|
|
|
| if (acceleration=="test") fun_accel = &accelerate_test()
|
| if (acceleration=="none") fun_accel = &accelerate_none()
|
| if (acceleration=="conjugate_gradient") fun_accel = &accelerate_cg()
|
| if (acceleration=="steepest_descent") fun_accel = &accelerate_sd()
|
| if (acceleration=="aitken") fun_accel = &accelerate_aitken()
|
| if (acceleration=="hybrid") fun_accel = &accelerate_hybrid()
|
|
|
| if (verbose>0) printf("{txt} - Running solver (acceleration={res}%s{txt}, transform={res}%s{txt} tol={res}%-1.0e{txt})\n", acceleration, transform, HDFE.tolerance)
|
| if (verbose==1) printf("{txt} - Iterating:")
|
| if (verbose>1) printf("{txt} ")
|
|
|
| map_solver(HDFE, data, poolsize, fun_accel, fun_transform)
|
|
|
| }
|
| else if (technique == "lsmr" | technique == "lsqr") {
|
|
|
| if (verbose > 0) printf("{txt} - Partialling out (%s) in a pools up to size %g\n", strupper(technique), poolsize)
|
| for (i=1; i<=cols(data); i++) {
|
| if (technique == "lsmr") {
|
| temp_sol = lsmr(HDFE, data[., i], HDFE.miniter, HDFE.maxiter, HDFE.tolerance, HDFE.tolerance, ., verbose)
|
| }
|
| else {
|
| temp_sol = lsqr(HDFE, data[., i], HDFE.miniter, HDFE.maxiter, HDFE.tolerance, HDFE.tolerance, ., verbose)
|
| }
|
| assert(temp_sol.stop_code > 0)
|
| solver_failed = (temp_sol.stop_code >= 10)
|
| if (solver_failed) printf("{err}convergence not achieved in %s iterations (stop code=%g); try increasing maxiter() or decreasing tol().\n", strtrim(sprintf("%8.0fc", temp_sol.iteration_count)), temp_sol.stop_code)
|
| if (solver_failed & temp_sol.stop_code == 13 & HDFE.abort==0) solver_failed = 0
|
| if (solver_failed) {
|
| data = J(rows(data), cols(data), .)
|
| exit(430)
|
| }
|
|
|
|
|
|
|
|
|
| data[., i] = temp_sol.data
|
| }
|
| swap(HDFE.solution.data, data)
|
| }
|
| else {
|
| _assert_abort(90, "ALGORITHM NOT CURRENTLY IMPLEMENTED", 1)
|
| }
|
|
|
| }
|
|
|
|
|
| `Void' parallel_combine(`FixedEffects' HDFE)
|
| {
|
| `Integer' num_cols, proc, left_index, right_index
|
| `String' fn
|
| `Integer' fh
|
| `Matrix' data_slice
|
|
|
|
|
| num_cols = cols(HDFE.solution.means)
|
| assert(num_cols == cols(HDFE.solution.norm2))
|
|
|
| assert(cols(HDFE.parallel_poolsizes) == HDFE.parallel_numproc)
|
|
|
| HDFE.solution.data = J(HDFE.N, num_cols, .)
|
|
|
|
|
| left_index = 1
|
| for (proc=1; proc<=HDFE.parallel_numproc; proc++)
|
| {
|
| right_index = left_index + HDFE.parallel_poolsizes[proc] - 1
|
| if (HDFE.verbose > 0) printf("{txt} - Loading data block #%f with %f cols from %s\n", proc, right_index-left_index+1, fn)
|
|
|
| fn = pathjoin(HDFE.parallel_dir, sprintf("data%f.tmp", proc))
|
| fh = fopen(fn, "r")
|
| data_slice = fgetmatrix(fh, 1)
|
| fclose(fh)
|
|
|
| assert_msg(cols(data_slice) == right_index - left_index + 1)
|
|
|
| HDFE.solution.data[., left_index..right_index] = data_slice
|
| left_index = right_index + 1
|
| }
|
|
|
| if (HDFE.verbose == 0) printf(`"{txt}({browse "http://scorreia.com/research/hdfe.pdf":MWFE estimator} converged in %s iteration%s)\n"', "??", "s")
|
| if (HDFE.verbose > 0) printf("\n")
|
|
|
| assert_msg(!hasmissing(HDFE.solution.data), "error partialling out; missing values found")
|
| }
|
|
|
| end
|
|
|
| exit
|
|
|