repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/build/lib/dccp/problem.py
|
__author__ = "Xinyue"
import numpy as np
import cvxpy as cvx
import logging
from dccp.objective import convexify_obj
from dccp.objective import convexify_para_obj
from dccp.constraint import convexify_para_constr
from dccp.constraint import convexify_constr
logger = logging.getLogger("dccp")
logger.addHandler(logging.FileHandler(filename="dccp.log", mode="w", delay=True))
logger.setLevel(logging.INFO)
logger.propagate = False
def dccp(
self,
max_iter=100,
tau=0.005,
mu=1.2,
tau_max=1e8,
solver=None,
ccp_times=1,
max_slack=1e-3,
ep=1e-5,
**kwargs
):
"""
main algorithm ccp
:param max_iter: maximum number of iterations in ccp
:param tau: initial weight on slack variables
:param mu: increment of weight on slack variables
:param tau_max: maximum weight on slack variables
:param solver: specify the solver for the transformed problem
:param ccp_times: times of running ccp to solve a problem with random initial values on variables
:return
if the transformed problem is infeasible, return None;
"""
if not is_dccp(self):
raise Exception("Problem is not DCCP.")
result = None
if self.objective.NAME == "minimize":
cost_value = float("inf") # record on the best cost value
else:
cost_value = -float("inf")
for t in range(ccp_times): # for each time of running ccp
dccp_ini(
self, random=(ccp_times > 1), solver=solver, **kwargs
) # initialization; random initial value is mandatory if ccp_times>1
# iterations
result_temp = iter_dccp(
self, max_iter, tau, mu, tau_max, solver, ep, max_slack, **kwargs
)
# first iteration
if t == 0:
self._status = result_temp[-1]
result = result_temp
cost_value = result_temp[0]
result_record = {}
for var in self.variables():
result_record[var] = var.value
else:
if result_temp[-1] == "Converged":
self._status = result_temp[-1]
if result_temp[0] is not None:
if (
(cost_value is None)
or (
self.objective.NAME == "minimize"
and result_temp[0] < cost_value
)
or (
self.objective.NAME == "maximize"
and result_temp[0] > cost_value
)
): # find a better cost value
# no slack; slack small enough
if len(result_temp) < 4 or result_temp[1] < max_slack:
result = result_temp # update the result
cost_value = result_temp[
0
] # update the record on the best cost value
for var in self.variables():
result_record[var] = var.value
else:
for var in self.variables():
var.value = result_record[var]
return result
def dccp_ini(self, times=1, random=0, solver=None, **kwargs):
"""
set initial values
:param times: number of random projections for each variable
:param random: mandatory random initial values
"""
dom_constr = self.objective.args[0].domain # domain of the objective function
for arg in self.constraints:
for l in range(2):
for dom in arg.args[l].domain:
dom_constr.append(dom) # domain on each side of constraints
var_store = [] # store initial values for each variable
init_flag = [] # indicate if any variable is initialized by the user
var_user_ini = []
for var in self.variables():
var_store.append(np.zeros(var.shape)) # to be averaged
init_flag.append(var.value is None)
if var.value is None:
var_user_ini.append(np.zeros(var.shape))
else:
var_user_ini.append(var.value)
# setup the problem
ini_cost = 0
var_ind = 0
value_para = []
for var in self.variables():
if (
init_flag[var_ind] or random
): # if the variable is not initialized by the user, or random initialization is mandatory
value_para.append(cvx.Parameter(var.shape))
ini_cost += cvx.pnorm(var - value_para[-1], 2)
var_ind += 1
ini_obj = cvx.Minimize(ini_cost)
ini_prob = cvx.Problem(ini_obj, dom_constr)
# solve it several times with random points
for t in range(times): # for each time of random projection
count_para = 0
var_ind = 0
for var in self.variables():
# if the variable is not initialized by the user, or random
# initialization is mandatory
if init_flag[var_ind] or random:
# set a random point
if len(var.shape) > 1:
value_para[count_para].value = (
np.random.randn(var.shape[0], var.shape[1]) * 10
)
else:
value_para[count_para].value = np.random.randn(var.size) * 10
count_para += 1
var_ind += 1
if solver is None:
ini_prob.solve(**kwargs)
else:
ini_prob.solve(solver=solver, **kwargs)
var_ind = 0
for var in self.variables():
var_store[var_ind] = var_store[var_ind] + var.value / float(
times
) # average
var_ind += 1
# set initial values
var_ind = 0
for var in self.variables():
if init_flag[var_ind] or random:
var.value = var_store[var_ind]
else:
var.value = var_user_ini[var_ind]
var_ind += 1
def is_dccp(problem):
"""
:param
a problem
:return
a boolean indicating if the problem is dccp
"""
if problem.objective.expr.curvature == "UNKNOWN":
return False
for constr in problem.constraints:
for arg in constr.args:
if arg.curvature == "UNKNOWN":
return False
return True
def iter_dccp(self, max_iter, tau, mu, tau_max, solver, ep, max_slack_tol, **kwargs):
"""
ccp iterations
:param max_iter: maximum number of iterations in ccp
:param tau: initial weight on slack variables
:param mu: increment of weight on slack variables
:param tau_max: maximum weight on slack variables
:param solver: specify the solver for the transformed problem
:return
value of the objective function, maximum value of slack variables, value of variables
"""
# split non-affine equality constraints
constr = []
for constraint in self.constraints:
if (
str(type(constraint)) == "<class 'cvxpy.constraints.zero.Equality'>"
and not constraint.is_dcp()
):
constr.append(constraint.args[0] <= constraint.args[1])
constr.append(constraint.args[0] >= constraint.args[1])
else:
constr.append(constraint)
obj = self.objective
self = cvx.Problem(obj, constr)
it = 1
converge = False
# keep the values from the previous iteration or initialization
previous_cost = float("inf")
previous_org_cost = self.objective.value
variable_pres_value = []
for var in self.variables():
variable_pres_value.append(var.value)
# each non-dcp constraint needs a slack variable
var_slack = []
for constr in self.constraints:
if not constr.is_dcp():
var_slack.append(cvx.Variable(constr.shape))
while it <= max_iter and all(var.value is not None for var in self.variables()):
constr_new = []
# objective
convexified_obj = convexify_obj(self.objective)
if not self.objective.is_dcp():
# non-sub/super-diff
while convexified_obj is None:
# damping
var_index = 0
for var in self.variables():
var.value = 0.8 * var.value + 0.2 * variable_pres_value[var_index]
var_index += 1
convexified_obj = convexify_obj(self.objective)
# domain constraints
for dom in self.objective.expr.domain:
constr_new.append(dom)
# new cost function
cost_new = convexified_obj.expr
# constraints
count_slack = 0
for arg in self.constraints:
temp = convexify_constr(arg)
if not arg.is_dcp():
while temp is None:
# damping
var_index = 0
for var in self.variables():
var.value = (
0.8 * var.value + 0.2 * variable_pres_value[var_index]
)
var_index += 1
temp = convexify_constr(arg)
newcon = temp[0] # new constraint without slack variable
for dom in temp[1]: # domain
constr_new.append(dom)
constr_new.append(newcon.expr <= var_slack[count_slack])
constr_new.append(var_slack[count_slack] >= 0)
count_slack = count_slack + 1
else:
constr_new.append(arg)
# objective
if self.objective.NAME == "minimize":
for var in var_slack:
cost_new += tau * cvx.sum(var)
obj_new = cvx.Minimize(cost_new)
else:
for var in var_slack:
cost_new -= tau * cvx.sum(var)
obj_new = cvx.Maximize(cost_new)
# new problem
prob_new = cvx.Problem(obj_new, constr_new)
# keep previous value of variables
variable_pres_value = []
for var in self.variables():
variable_pres_value.append(var.value)
# solve
if solver is None:
prob_new_cost_value = prob_new.solve(**kwargs)
else:
prob_new_cost_value = prob_new.solve(solver=solver, **kwargs)
if prob_new_cost_value is not None:
logger.info(
"iteration=%d, cost value=%.5f, tau=%.5f, solver status=%s",
it,
prob_new_cost_value,
tau,
prob_new.status,
)
else:
logger.info(
"iteration=%d, cost value=%.5f, tau=%.5f, solver status=%s",
it,
np.nan,
tau,
prob_new.status,
)
max_slack = None
# print slack
if (
prob_new._status == "optimal" or prob_new._status == "optimal_inaccurate"
) and not var_slack == []:
slack_values = [v.value for v in var_slack if v.value is not None]
max_slack = max([np.max(v) for v in slack_values] + [-np.inf])
logger.info("max slack = %.5f", max_slack)
# terminate
if (
prob_new.value is not None
and np.abs(previous_cost - prob_new.value) <= ep
and np.abs(self.objective.value - previous_org_cost) <= ep
and (max_slack is None or max_slack <= max_slack_tol)
):
it = max_iter + 1
converge = True
else:
previous_cost = prob_new.value
previous_org_cost = self.objective.value
tau = min([tau * mu, tau_max])
it += 1
# return
if converge:
self._status = "Converged"
else:
self._status = "Not_converged"
var_value = []
for var in self.variables():
var_value.append(var.value)
if not var_slack == []:
return (self.objective.value, max_slack, var_value, self._status)
else:
return (self.objective.value, var_value, self._status)
cvx.Problem.register_solve("dccp", dccp)
| 12,115
| 35.059524
| 101
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/build/lib/dccp/constraint.py
|
__author__ = "Xinyue"
from dccp.linearize import linearize, linearize_para
import cvxpy as cvx
# from dccp.linearize import linearize_para
def convexify_para_constr(self):
"""
input:
self: a constraint of a problem
return:
if the constraint is dcp, return itself;
otherwise, return
a convexified constraint
para: [left side, right side]
if the left/right-hand side of the the constraint is linearized,
left/right side = [zero order parameter, {variable: [value parameter, [gradient parameter]]}]
else,
left/right side = []
dom: domain
"""
if not self.is_dcp():
dom = [] # domain
para = [] # a list for parameters
if self.expr.args[0].curvature == "CONCAVE": # left-hand concave
lin = linearize_para(self.expr.args[0]) # linearize the expression
left = lin[0]
para.append(
[lin[1], lin[2]]
) # [zero order parameter, {variable: [value parameter, [gradient parameter]]}]
for con in lin[3]:
dom.append(con)
else:
left = self.expr.args[0]
para.append(
[]
) # appending an empty list indicates the expression has the right curvature
if (
self.expr.args[1].curvature == "CONCAVE"
): # negative right-hand must be concave (right-hand is convex)
lin = linearize_para(self.expr.args[1]) # linearize the expression
neg_right = lin[0]
para.append([lin[1], lin[2]])
for con in lin[3]:
dom.append(con)
else:
neg_right = self.expr.args[1]
para.append([])
return left + neg_right <= 0, para, dom
else:
return self
def convexify_constr(constr):
"""
:param constr: a constraint of a problem
:return:
for a dcp constraint, return itself;
for a non-dcp constraint, return a convexified constraint and domain constraints;
return None if non-sub/super-diff
"""
if not constr.is_dcp():
dom = []
# left hand concave
if constr.args[0].curvature == "CONCAVE":
left = linearize(constr.args[0])
if left is None:
return None
else:
for con in constr.args[0].domain:
dom.append(con)
else:
left = constr.args[0]
# right hand convex
if constr.args[1].curvature == "CONVEX":
right = linearize(constr.args[1])
if right is None:
return None
else:
for con in constr.args[1].domain:
dom.append(con)
else:
right = constr.args[1]
return left - right <= 0, dom
else:
return constr
| 2,925
| 32.632184
| 109
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/build/lib/dccp/linearize.py
|
__author__ = "Xinyue"
import numpy as np
import cvxpy as cvx
def linearize_para(expr):
"""
input:
expr: an expression
return:
linear_expr: linearized expression
zero_order: zero order parameter
linear_dictionary: {variable: [value parameter, [gradient parameter]]}
dom: domain
"""
zero_order = cvx.Parameter(expr.shape) # zero order
linear_expr = zero_order
linear_dictionary = {}
for var in expr.variables():
value_para = cvx.Parameter(var.shape)
if var.ndim > 1: # matrix to vector
gr = []
for d in range(var.shape[1]):
g = cvx.Parameter((var.shape[0], expr.shape[0]))
# g = g.T
linear_expr += g.T @ (var[:, d] - value_para[:, d]) # first order
gr.append(g)
linear_dictionary[var] = [value_para, gr]
else: # vector to vector
g = cvx.Parameter(var.shape[0], expr.shape[0])
linear_expr += g.T @ (var[:, d] - value_para[:, d]) # first order
gr.append(g)
linear_dictionary[var] = [value_para, gr]
dom = expr.domain
return linear_expr, zero_order, linear_dictionary, dom
def linearize(expr):
"""Returns the tangent approximation to the expression.
Gives an elementwise lower (upper) bound for convex (concave)
expressions. No guarantees for non-DCP expressions.
Args:
expr: An expression.
Returns:
An affine expression.
"""
if expr.is_affine():
return expr
else:
if np.any(np.iscomplex(expr.value)):
tangent = np.real(expr.value) + np.imag(expr.value)
else:
tangent = expr.value
if tangent is None:
raise ValueError(
"Cannot linearize non-affine expression with missing variable values."
)
grad_map = expr.grad
for var in expr.variables():
if grad_map[var] is None:
return None
complex_flag = False
if var.is_complex() or np.any(np.iscomplex(grad_map[var])):
complex_flag = True
if var.ndim > 1:
temp = cvx.reshape(
cvx.vec(var - var.value), (var.shape[0] * var.shape[1], 1)
)
if complex_flag:
flattened = np.transpose(np.real(grad_map[var])) @ cvx.real(temp) + \
np.transpose(np.imag(grad_map[var])) @ cvx.imag(temp)
else:
flattened = np.transpose(grad_map[var]) @ temp
tangent = tangent + cvx.reshape(flattened, expr.shape)
elif var.size > 1:
if complex_flag:
tangent = tangent + np.transpose(np.real(grad_map[var])) @ (cvx.real(var) - np.real(var.value)) \
+ np.transpose(np.imag(grad_map[var])) @ (cvx.imag(var) - np.imag(var.value))
else:
tangent = tangent + np.transpose(grad_map[var]) @ (var - var.value)
else:
if complex_flag:
tangent = tangent + np.real(grad_map[var]) * (cvx.real(var) - np.real(var.value)) \
+ np.imag(grad_map[var]) * (cvx.imag(var) - np.imag(var.value))
else:
tangent = tangent + grad_map[var] * (var - var.value)
return tangent
| 3,432
| 36.725275
| 117
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/build/lib/dccp/__init__.py
|
from dccp.problem import is_dccp
from dccp.linearize import linearize
from dccp.objective import convexify_obj
from dccp.constraint import convexify_constr
__author__ = "Xinyue Shen"
__version__ = "1.0.3"
| 206
| 24.875
| 44
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/fairness_metrics.py
|
import torch
import cvxpy as cp
import numpy as np
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).mean()
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).mean())
def energy_distance_forloop(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
d11 = torch.tensor(0.)
d12 = torch.tensor(0.)
d22 = torch.tensor(0.)
for y_ in y1:
d11 += (y_-y1).abs().mean()
d12 += (y_-y2).abs().mean()
d11 = d11/(y1.shape[0])
d12 = d12/(y1.shape[0])
for y_ in y2:
d22 += (y_-y2).abs().mean()
d22 = d22/(y2.shape[0])
return 2*d12-d11-d22
# +------------------------------------------+
# | Metric 2: Wasserstein Distance |
# +------------------------------------------+
def W1dist(y1,y2):
'''
Compute type 1 Wasserstein distance between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1))
C_np = C.data.numpy()
# solve OT problem
T = cp.Variable(C_np.shape)
ones_1 = np.ones((C_np.shape[0], 1))
ones_2 = np.ones((C_np.shape[1], 1))
objective = cp.Minimize(cp.sum(cp.multiply(C_np,T)))
constraints = [
T >=0,
T@ones_2==ones_1/len(ones_1),
T.T@ones_1==ones_2/len(ones_2)
]
problem = cp.Problem(objective, constraints)
problem.solve(solver=cp.GUROBI)
# objective value for gradient computation
return (torch.Tensor(T.value)*C).sum()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Regression Metric 1: MSE |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
# print('Accuracy of the network on the 10000 test images: %d %%' % (
# 100 * correct / total))
return torch.tensor(correct / total * 100)
| 8,196
| 30.771318
| 100
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/load_data_.py
|
import numpy as np
import pandas as pd
import sklearn.preprocessing as preprocessing
from collections import namedtuple
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # for plotting stuff
import os
import collections
def load_compas_data(COMPAS_INPUT_FILE):
FEATURES_CLASSIFICATION = ["age_cat", "race", "sex", "priors_count",
"c_charge_degree"] # features to be used for classification
CONT_VARIABLES = [
"priors_count"] # continuous features, will need to be handled separately from categorical features, categorical features will be encoded using one-hot
CLASS_FEATURE = "two_year_recid" # the decision variable
SENSITIVE_ATTRS = ["race"]
# COMPAS_INPUT_FILE = DIR_DATA + "compas/compas-scores-two-years.csv"
print('Loading COMPAS dataset...')
# load the data and get some stats
df = pd.read_csv(COMPAS_INPUT_FILE)
df = df.dropna(subset=["days_b_screening_arrest"]) # dropping missing vals
# convert to np array
data = df.to_dict('list')
for k in data.keys():
data[k] = np.array(data[k])
""" Filtering the data """
# These filters are the same as propublica (refer to https://github.com/propublica/compas-analysis)
# If the charge date of a defendants Compas scored crime was not within 30 days from when the person was arrested, we assume that because of data quality reasons, that we do not have the right offense.
idx = np.logical_and(data["days_b_screening_arrest"] <= 30, data["days_b_screening_arrest"] >= -30)
# We coded the recidivist flag -- is_recid -- to be -1 if we could not find a compas case at all.
idx = np.logical_and(idx, data["is_recid"] != -1)
# In a similar vein, ordinary traffic offenses -- those with a c_charge_degree of 'O' -- will not result in Jail time are removed (only two of them).
idx = np.logical_and(idx, data["c_charge_degree"] != "O") # F: felony, M: misconduct
# We filtered the underlying data from Broward county to include only those rows representing people who had either recidivated in two years, or had at least two years outside of a correctional facility.
idx = np.logical_and(idx, data["score_text"] != "NA")
# we will only consider blacks and whites for this analysis
idx = np.logical_and(idx, np.logical_or(data["race"] == "African-American", data["race"] == "Caucasian"))
# select the examples that satisfy this criteria
for k in data.keys():
data[k] = data[k][idx]
""" Feature normalization and one hot encoding """
# convert class label 0 to -1
y = data[CLASS_FEATURE]
# y[y == 0] = -1
print("\nNumber of people recidivating within two years")
print(pd.Series(y).value_counts())
print("\n")
X = np.array([]).reshape(len(y),
0) # empty array with num rows same as num examples, will hstack the features to it
x_control = collections.defaultdict(list)
feature_names = []
for attr in FEATURES_CLASSIFICATION:
vals = data[attr]
if attr in SENSITIVE_ATTRS:
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
x_control[attr] = vals
pass
else:
if attr in CONT_VARIABLES:
vals = [float(v) for v in vals]
vals = preprocessing.scale(vals) # 0 mean and 1 variance
vals = np.reshape(vals, (len(y), -1)) # convert from 1-d arr to a 2-d arr with one col
else: # for binary categorical variables, the label binarizer uses just one var instead of two
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
# add to sensitive features dict
# add to learnable features
X = np.hstack((X, vals))
if attr in CONT_VARIABLES: # continuous feature, just append the name
feature_names.append(attr)
else: # categorical features
if vals.shape[1] == 1: # binary features that passed through lib binarizer
feature_names.append(attr)
else:
for k in lb.classes_: # non-binary categorical features, need to add the names for each cat
feature_names.append(attr + "_" + str(k))
# convert the sensitive feature to 1-d array
x_control = dict(x_control)
for k in x_control.keys():
assert (x_control[k].shape[1] == 1) # make sure that the sensitive feature is binary after one hot encoding
x_control[k] = np.array(x_control[k]).flatten()
# sys.exit(1)
# """permute the date randomly"""
# perm = range(0, X.shape[0])
# shuffle(perm)
# X = X[perm]
# y = y[perm]
for k in x_control.keys():
x_control[k] = x_control[k][:]
# intercept = np.ones(X.shape[0]).reshape(X.shape[0], 1)
# X = np.concatenate((intercept, X), axis=1)
assert (len(feature_names) == X.shape[1])
print("Features we will be using for classification are:", feature_names, "\n")
x_control = x_control['race']
return X, y, x_control
def load_drug_data(DIR_DATA):
g = pd.read_csv(DIR_DATA, header=None, sep=',')
# g = pd.read_csv("drug_consumption.data.txt", header=None, sep=',')
g = np.array(g)
data = np.array(g[:, 1:13]) # Remove the ID and labels
labels = g[:, 13:]
yfalse_value = 'CL0'
y = np.array([1.0 if yy == yfalse_value else 0.0 for yy in labels[:, 5]])
dataset = namedtuple('_', 'data, target')(data, y)
print('Loading Drug (black vs others) dataset...')
# dataset_train = load_drug()
sensible_feature = 4 # ethnicity
a = np.array([1.0 if el == -0.31685 else 0 for el in data[:, sensible_feature]])
X = np.delete(data, sensible_feature, axis=1).astype(float)
return X, y, a
def load_arrhythmia(DIR_DATA):
from scipy.stats import mode
arrhythmia = pd.read_csv(DIR_DATA, header=None)
arrhythmia = np.where(np.isnan(arrhythmia), mode(arrhythmia, axis=0), arrhythmia)[1]
y = np.array([1.0 if yy == 1 else 0 for yy in arrhythmia[:, -1]])
data = arrhythmia[:, :-1]
sensible_feature = 1 # gender
print('Load Arrhythmiad dataset...')
print('Different values of the sensible feature', sensible_feature, ':',
set(data[:, sensible_feature]))
X = np.delete(data, sensible_feature, axis=1).astype(float)
a = data[:, sensible_feature]
data_red = X[:, :12]
return data_red, y, a
def generate_synthetic_data_zafar(plot_data=True, n_samples = 1200):
"""
Code for generating the synthetic data.
We will have two non-sensitive features and one sensitive feature.
A sensitive feature value of 0.0 means the example is considered to be in protected group (e.g., female) and 1.0 means it's in non-protected group (e.g., male).
"""
# generate these many data points per class
disc_factor = math.pi / 4.0 # this variable determines the initial discrimination in the data -- decraese it to generate more discrimination
def gen_gaussian(mean_in, cov_in, class_label):
nv = multivariate_normal(mean=mean_in, cov=cov_in)
X = nv.rvs(n_samples)
y = np.ones(n_samples, dtype=float) * class_label
return nv, X, y
""" Generate the non-sensitive features randomly """
# We will generate one gaussian cluster for each class
mu1, sigma1 = [2, 2], [[5, 1], [1, 5]]
mu2, sigma2 = [-2, -2], [[10, 1], [1, 3]]
nv1, X1, y1 = gen_gaussian(mu1, sigma1, 1) # positive class
nv2, X2, y2 = gen_gaussian(mu2, sigma2, -1) # negative class
# join the posisitve and negative class clusters
X = np.vstack((X1, X2))
y = np.hstack((y1, y2))
# shuffle the data
perm = np.random.randint(0, X.shape[0], n_samples * 2)
X = X[perm]
y = y[perm]
rotation_mult = np.array(
[[math.cos(disc_factor), -math.sin(disc_factor)], [math.sin(disc_factor), math.cos(disc_factor)]])
X_aux = np.dot(X, rotation_mult)
""" Generate the sensitive feature here """
x_control = [] # this array holds the sensitive feature value
for i in range(len(X)):
x = X_aux[i]
# probability for each cluster that the point belongs to it
p1 = nv1.pdf(x)
p2 = nv2.pdf(x)
# normalize the probabilities from 0 to 1
s = p1 + p2
p1 = p1 / s
p2 = p2 / s
r = np.random.uniform() # generate a random number from 0 to 1
if r < p1: # the first cluster is the positive class
x_control.append(1.0) # 1.0 means its male
else:
x_control.append(0.0) # 0.0 -> female
x_control = np.array(x_control)
""" Show the data """
if plot_data:
num_to_draw = 200 # we will only draw a small number of points to avoid clutter
x_draw = X[:num_to_draw]
y_draw = y[:num_to_draw]
x_control_draw = x_control[:num_to_draw]
X_s_0 = x_draw[x_control_draw == 0.0]
X_s_1 = x_draw[x_control_draw == 1.0]
y_s_0 = y_draw[x_control_draw == 0.0]
y_s_1 = y_draw[x_control_draw == 1.0]
plt.scatter(X_s_0[y_s_0 == 1.0][:, 0], X_s_0[y_s_0 == 1.0][:, 1], color='green', marker='x', s=30,
linewidth=1.5, label="Prot. +ve")
plt.scatter(X_s_0[y_s_0 == -1.0][:, 0], X_s_0[y_s_0 == -1.0][:, 1], color='red', marker='x', s=30,
linewidth=1.5, label="Prot. -ve")
plt.scatter(X_s_1[y_s_1 == 1.0][:, 0], X_s_1[y_s_1 == 1.0][:, 1], color='green', marker='o', facecolors='none',
s=30, label="Non-prot. +ve")
plt.scatter(X_s_1[y_s_1 == -1.0][:, 0], X_s_1[y_s_1 == -1.0][:, 1], color='red', marker='o', facecolors='none',
s=30, label="Non-prot. -ve")
plt.tick_params(axis='x', which='both', bottom='off', top='off',
labelbottom='off') # dont need the ticks to see the data distribution
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.legend(loc=2, fontsize=15)
plt.xlim((-15, 10))
plt.ylim((-10, 15))
plt.show()
y[y==-1] = 0
return X, y, x_control
def load_adult(DIR_DATA, smaller=False, scaler=True):
'''
:param smaller: selecting this flag it is possible to generate a smaller version of the training and test sets.
:param scaler: if True it applies a StandardScaler() (from sklearn.preprocessing) to the data.
:return: train and test data.
Features of the Adult dataset:
0. age: continuous.
1. workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
2. fnlwgt: continuous.
3. education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th,
Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
4. education-num: continuous.
5. marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
Married-spouse-absent, Married-AF-spouse.
6. occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty,
Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv,
Protective-serv, Armed-Forces.
7. relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
8. race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
9. sex: Female, Male.
10. capital-gain: continuous.
11. capital-loss: continuous.
12. hours-per-week: continuous.
13. native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc),
India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico,
Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala,
Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
(14. label: <=50K, >50K)
'''
data = pd.read_csv(
DIR_DATA,
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
len_train = len(data.values[:, -1])
data_test = pd.read_csv(
DIR_DATA + "adult/adult.test",
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
data = pd.concat([data, data_test])
# Considering the relative low portion of missing data, we discard rows with missing data
domanda = data["workclass"][4].values[1]
data = data[data["workclass"] != domanda]
data = data[data["occupation"] != domanda]
data = data[data["native-country"] != domanda]
# Here we apply discretisation on column marital_status
data.replace(['Divorced', 'Married-AF-spouse',
'Married-civ-spouse', 'Married-spouse-absent',
'Never-married', 'Separated', 'Widowed'],
['not married', 'married', 'married', 'married',
'not married', 'not married', 'not married'], inplace=True)
# categorical fields
category_col = ['workclass', 'race', 'education', 'marital-status', 'occupation',
'relationship', 'gender', 'native-country', 'income']
for col in category_col:
b, c = np.unique(data[col], return_inverse=True)
data[col] = c
datamat = data.values
target = np.array([-1.0 if val == 0 else 1.0 for val in np.array(datamat)[:, -1]])
datamat = datamat[:, :-1]
if scaler:
scaler = StandardScaler()
scaler.fit(datamat)
datamat = scaler.transform(datamat)
if smaller:
print('A smaller version of the dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train // 20, :-1], target[:len_train // 20])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
else:
print('The dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train, :-1], target[:len_train])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
return data, data_test
# def load_toy_test():
# # Load toy test
# n_samples = 100 * 2
# n_samples_low = 20 * 2
# n_dimensions = 10
# X, y, sensible_feature_id, _, _ = generate_toy_data(n_samples=n_samples,
# n_samples_low=n_samples_low,
# n_dimensions=n_dimensions)
# data = namedtuple('_', 'data, target')(X, y)
# return data, data
| 15,165
| 43.737463
| 207
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/algorithm.py
|
import random
import IPython
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dataloader import CustomDataset
from utils import measures_from_Yhat
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
def train_fair_classifier(dataset, net, optimizer, lr_scheduler, fairness, lambda_, h, delta, device, n_epochs=200, batch_size=2048, seed=0):
# Retrieve train/test splitted pytorch tensors for index=split
train_tensors, test_tensors = dataset.get_dataset_in_tensor()
X_train, Y_train, Z_train, XZ_train = train_tensors
X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
data_loader = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# An empty dataframe for logging experimental results
df = pd.DataFrame()
df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(data_loader):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.detach().reshape(-1))
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch)
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(),h,tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
if (i + 1) % 10 == 0 or (i + 1) == len(data_loader):
print('Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(epoch+1, n_epochs,
i+1, len(data_loader),
cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
Yhat_train = net(XZ_train).squeeze().detach().cpu().numpy()
df_temp = measures_from_Yhat(Y_train_np, Z_train_np, Yhat=Yhat_train, threshold=tau)
df_temp['epoch'] = epoch * len(data_loader) + i + 1
df_ckpt = df_ckpt.append(df_temp)
# Plot (cost, train accuracies, fairness measures) curves per 50 epochs
if (epoch + 1) % 50 == 0:
IPython.display.clear_output()
print('Currently working on - seed: {}'.format(seed))
plt.figure(figsize=(15,5), dpi=100)
plt.subplot(1,3,1)
plt.plot(costs)
plt.xlabel('x10 iterations')
plt.title('cost')
plt.subplot(1,3,2)
plt.plot(df_ckpt['acc'].to_numpy())
plt.xlabel('epoch')
plt.title('Accuracy')
plt.subplot(1,3,3)
if fairness == 'DP':
plt.plot(df_ckpt['DDP'].to_numpy())
plt.title('DDP')
elif fairness == 'EO':
plt.plot(df_ckpt['DEO'].to_numpy())
plt.title('DEO')
plt.xlabel('epoch')
plt.show()
Yhat_test = net(XZ_test).squeeze().detach().cpu().numpy()
df_test = measures_from_Yhat(Y_test_np, Z_test_np, Yhat=Yhat_test, threshold=tau)
return df_test
| 7,375
| 41.390805
| 141
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/dataloader.py
|
import os
import copy
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import data_loader
from tempeh.configurations import datasets
from sklearn.datasets import make_moons
from sklearn.preprocessing import LabelEncoder, StandardScaler
def arrays_to_tensor(X, Y, Z, XZ, device):
return torch.FloatTensor(X).to(device), torch.FloatTensor(Y).to(device), torch.FloatTensor(Z).to(device), torch.FloatTensor(XZ).to(device)
def adult(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
def compas_data_loader():
""" Downloads COMPAS data from the propublica GitHub repository.
:return: pandas.DataFrame with columns 'sex', 'age', 'juv_fel_count', 'juv_misd_count',
'juv_other_count', 'priors_count', 'two_year_recid', 'age_cat_25 - 45',
'age_cat_Greater than 45', 'age_cat_Less than 25', 'race_African-American',
'race_Caucasian', 'c_charge_degree_F', 'c_charge_degree_M'
"""
data = pd.read_csv("./data/compas/compas-scores-two-years.csv") # noqa: E501
# filter similar to
# https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
data = data[(data['days_b_screening_arrest'] <= 30) &
(data['days_b_screening_arrest'] >= -30) &
(data['is_recid'] != -1) &
(data['c_charge_degree'] != "O") &
(data['score_text'] != "N/A")]
# filter out all records except the ones with the most common two races
data = data[(data['race'] == 'African-American') | (data['race'] == 'Caucasian')]
# Select relevant columns for machine learning.
# We explicitly leave in age_cat to allow linear classifiers to be non-linear in age
data = data[["sex", "age", "age_cat", "race", "juv_fel_count", "juv_misd_count",
"juv_other_count", "priors_count", "c_charge_degree", "two_year_recid"]]
# map string representation of feature "sex" to 0 for Female and 1 for Male
data = data.assign(sex=(data["sex"] == "Male") * 1)
data = pd.get_dummies(data)
return data
class CustomDataset():
def __init__(self, X, Y, Z):
self.X = X
self.Y = Y
self.Z = Z
def __len__(self):
return len(self.Y)
def __getitem__(self, index):
x, y, z = self.X[index], self.Y[index], self.Z[index]
return x, y, z
class FairnessDataset():
def __init__(self, dataset, device=torch.device('cuda')):
self.dataset = dataset
self.device = device
np.random.seed(12345678)
if self.dataset == 'AdultCensus':
self.get_adult_data()
elif self.dataset == 'COMPAS':
self.get_compas_data()
elif self.dataset == 'CreditDefault':
self.get_credit_default_data()
elif self.dataset == 'Lawschool':
self.get_lawschool_data()
elif self.dataset == 'Moon':
self.get_moon_data()
else:
raise ValueError('Your argument {} for dataset name is invalid.'.format(self.dataset))
self.prepare_ndarray()
def get_adult_data(self):
X_train, Y_train, X_test, Y_test = adult('./data/adult/')
self.Z_train_ = X_train['Sex']
self.Z_test_ = X_test['Sex']
self.X_train_ = X_train.drop(labels=['Sex'], axis=1)
self.X_train_ = pd.get_dummies(self.X_train_)
self.X_test_ = X_test.drop(labels=['Sex'], axis=1)
self.X_test_ = pd.get_dummies(self.X_test_)
le = LabelEncoder()
self.Y_train_ = le.fit_transform(Y_train)
self.Y_train_ = pd.Series(self.Y_train_, name='>50k')
self.Y_test_ = le.fit_transform(Y_test)
self.Y_test_ = pd.Series(self.Y_test_, name='>50k')
# def get_compas_data(self):
# dataset = datasets['compas']()
# # dataset = compas_data_loader()
# X_train, X_test = dataset.get_X(format=pd.DataFrame)
# Y_train, Y_test = dataset.get_y(format=pd.Series)
# Z_train, Z_test = dataset.get_sensitive_features('race', format=pd.Series)
# self.X_train_ = X_train
# self.Y_train_ = Y_train
# self.Z_train_ = (Z_train != 'African-American').astype(float)
# self.X_test_ = X_test
# self.Y_test_ = Y_test
# self.Z_test_ = (Z_test != 'African-American').astype(float)
def get_compas_data(self):
dataset = datasets['compas']()
ds = data_loader.Compas()
ds.split_test()
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test = ds.get_test_data()
self.X_train_ = X
self.Y_train_ = Y
self.Z_train_ = A
self.X_test_ = X_test
self.Y_test_ = Y_test
self.Z_test_ = A_test
def get_credit_default_data(self):
rawdata = pd.read_excel('./data/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
Z = 2 - preproc_data['SEX']
self.X_train_ = X.loc[list(range(24000)), :]
self.Y_train_ = Y.loc[list(range(24000))]
self.Z_train_ = Z.loc[list(range(24000))]
self.X_test_ = X.loc[list(range(24000,30000)), :]
self.Y_test_ = Y.loc[list(range(24000,30000))]
self.Z_test_ = Z.loc[list(range(24000,30000))]
def get_lawschool_data(self):
rawdata = pd.read_sas('./data/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
Z = rawdata['White']
self.X_train_ = X.loc[list(range(77267)), :]
self.Y_train_ = Y.loc[list(range(77267))]
self.Z_train_ = Z.loc[list(range(77267))]
self.X_test_ = X.loc[list(range(77267,96584)), :]
self.Y_test_ = Y.loc[list(range(77267,96584))]
self.Z_test_ = Z.loc[list(range(77267,96584))]
def get_moon_data(self):
n_train = 10000
n_test = 5000
X, Y = make_moons(n_samples=n_train+n_test, noise=0.2, random_state=0)
Z = np.zeros_like(Y)
np.random.seed(0)
for i in range(n_train + n_test):
if Y[i] == 0:
if -0.734 < X[i][0] < 0.734:
Z[i] = np.random.binomial(1, 0.90)
else:
Z[i] = np.random.binomial(1, 0.35)
elif Y[i] == 1:
if 0.262 < X[i][0] < 1.734:
Z[i] = np.random.binomial(1, 0.55)
else:
Z[i] = np.random.binomial(1, 0.10)
X = pd.DataFrame(X, columns=['x_1', 'x_2'])
Y = pd.Series(Y, name='label')
Z = pd.Series(Z, name='sensitive attribute')
self.X_train_ = X.loc[list(range(10000)), :]
self.Y_train_ = Y.loc[list(range(10000))]
self.Z_train_ = Z.loc[list(range(10000))]
self.X_test_ = X.loc[list(range(10000,15000)), :]
self.Y_test_ = Y.loc[list(range(10000,15000))]
self.Z_test_ = Z.loc[list(range(10000,15000))]
def prepare_ndarray(self):
self.normalized = False
self.X_train = self.X_train_.to_numpy(dtype=np.float64)
self.Y_train = self.Y_train_.to_numpy(dtype=np.float64)
self.Z_train = self.Z_train_.to_numpy(dtype=np.float64)
self.XZ_train = np.concatenate([self.X_train, self.Z_train.reshape(-1,1)], axis=1)
self.X_test = self.X_test_.to_numpy(dtype=np.float64)
self.Y_test = self.Y_test_.to_numpy(dtype=np.float64)
self.Z_test = self.Z_test_.to_numpy(dtype=np.float64)
self.XZ_test = np.concatenate([self.X_test, self.Z_test.reshape(-1,1)], axis=1)
self.sensitive_attrs = sorted(list(set(self.Z_train)))
return None
def normalize(self):
self.normalized = True
scaler_XZ = StandardScaler()
self.XZ_train = scaler_XZ.fit_transform(self.XZ_train)
self.XZ_test = scaler_XZ.transform(self.XZ_test)
scaler_X = StandardScaler()
self.X_train = scaler_X.fit_transform(self.X_train)
self.X_test = scaler_X.transform(self.X_test)
return None
def get_dataset_in_ndarray(self):
return (self.X_train, self.Y_train, self.Z_train, self.XZ_train),\
(self.X_test, self.Y_test, self.Z_test, self.XZ_test)
def get_dataset_in_tensor(self, validation=False, val_portion=.0):
X_train_, Y_train_, Z_train_, XZ_train_ = arrays_to_tensor(
self.X_train, self.Y_train, self.Z_train, self.XZ_train, self.device)
X_test_, Y_test_, Z_test_, XZ_test_ = arrays_to_tensor(
self.X_test, self.Y_test, self.Z_test, self.XZ_test, self.device)
return (X_train_, Y_train_, Z_train_, XZ_train_),\
(X_test_, Y_test_, Z_test_, XZ_test_)
| 11,649
| 40.459075
| 159
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/data_loader_or.py
|
# data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
# TODO: possibly some form of (cross) validation
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, use_tensor=True, device='cpu', info='No Info Available'):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
self.X_test = None
self.A_test = None
self.Y_test = None
self.info = info
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
# def stratified_batch_generator(self, n_samples, n_iterates):
# # get propoertions of protected attribute
# p_A1 = self.A.mean()
# p_A0 = 1-p_A1
# # build index set of protected and unprotected attribute
# ind_A1 = (self.A==1).nonzero()[:,0]
# ind_A0 = (self.A==0).nonzero()[:,0]
# # number of samples to sample from each distribution
# n_batch_1 = int(p_A1*n_samples)
# n_batch_0 = int(p_A0*n_samples)
# replacement = False
# for _ in range(n_iterates):
# # sample indexes for protected and unprotected class
# batch_idx1 = ind_A1[(torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
# num_samples=n_batch_1,
# replacement=replacement)]
# batch_idx0 = ind_A0[(torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
# num_samples=n_batch_0,
# replacement=replacement)]
# yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
# torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
# torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
# n_epochs = 100
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
# print(p_A0)
total_samples = self.A.shape[0]
# batch_size = 32
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
# print(epoch)
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
# print(ind_A1.shape)
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(10000, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self):
X, Y, A = load_compas_data('data/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
| 11,586
| 38.546075
| 121
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/utils.py
|
import numpy as np
import pandas as pd
def measures_from_Yhat(Y, Z, Yhat=None, threshold=0.5):
assert isinstance(Y, np.ndarray)
assert isinstance(Z, np.ndarray)
assert Yhat is not None
assert isinstance(Yhat, np.ndarray)
if Yhat is not None:
Ytilde = (Yhat >= threshold).astype(np.float32)
assert Ytilde.shape == Y.shape and Y.shape == Z.shape
# Accuracy
acc = (Ytilde == Y).astype(np.float32).mean()
# DP
DDP = abs(np.mean(Ytilde[Z==0])-np.mean(Ytilde[Z==1]))
# EO
Y_Z0, Y_Z1 = Y[Z==0], Y[Z==1]
Y1_Z0 = Y_Z0[Y_Z0==1]
Y0_Z0 = Y_Z0[Y_Z0==0]
Y1_Z1 = Y_Z1[Y_Z1==1]
Y0_Z1 = Y_Z1[Y_Z1==0]
FPR, FNR = {}, {}
FPR[0] = np.sum(Ytilde[np.logical_and(Z==0, Y==0)])/len(Y0_Z0)
FPR[1] = np.sum(Ytilde[np.logical_and(Z==1, Y==0)])/len(Y0_Z1)
FNR[0] = np.sum(1 - Ytilde[np.logical_and(Z==0, Y==1)])/len(Y1_Z0)
FNR[1] = np.sum(1 - Ytilde[np.logical_and(Z==1, Y==1)])/len(Y1_Z1)
TPR_diff = abs((1-FNR[0]) - (1-FNR[1]))
FPR_diff = abs(FPR[0] - FPR[1])
DEO = TPR_diff + FPR_diff
data = [acc, DDP, DEO]
columns = ['acc', 'DDP', 'DEO']
return pd.DataFrame([data], columns=columns)
| 1,205
| 29.923077
| 70
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/models.py
|
import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, n_layers, n_inputs, n_hidden_units):
super(Classifier, self).__init__()
layers = []
if n_layers == 1: # Logistic Regression
layers.append(nn.Linear(n_inputs, 1))
layers.append(nn.Sigmoid())
else:
layers.append(nn.Linear(n_inputs, n_hidden_units))
layers.append(nn.ReLU())
for i in range(n_layers-2):
layers.append(nn.Linear(n_hidden_units, n_hidden_units))
layers.append(nn.ReLU())
layers.append(nn.Linear(n_hidden_units,1))
layers.append(nn.Sigmoid())
self.layers = nn.Sequential(*layers)
def forward(self, x):
x = self.layers(x)
return x
| 829
| 33.583333
| 72
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/fair_KDE_.py
|
# Baseline Fair KDE : https://proceedings.neurips.cc//paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import fairness_metrics
import data_loader
from tqdm import tqdm
from collections import namedtuple
from sklearn.metrics import log_loss
from copy import deepcopy
import os, sys
import time
import random
import matplotlib.pyplot as plt
import torch.optim as optim
from models import Classifier
from dataloader import FairnessDataset
from algorithm import train_fair_classifier
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from dataloader import CustomDataset
from utils import measures_from_Yhat
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
# act on experiment parameters:
data_loader.set_seed(0)
gamma_candidates = np.logspace(-2, 2, num=10)
ds = data_loader.Compas()
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
dataset_name = 'COMPAS' # ['Moon', 'Lawschool', 'AdultCensus', 'CreditDefault', 'COMPAS']
##### Which fairness notion to consider (Demographic Parity / Equalized Odds) #####
fairness = 'DP' # ['DP', 'EO']
##### Model specifications #####
n_layers = 2 # [positive integers]
n_hidden_units = 20 # [positive integers]
##### Our algorithm hyperparameters #####
h = 0.1 # Bandwidth hyperparameter in KDE [positive real numbers]
delta = 1.0 # Delta parameter in Huber loss [positive real numbers]
lambda_ = 0.05 # regularization factor of DDP/DEO; Positive real numbers \in [0.0, 1.0]
##### Other training hyperparameters #####
batch_size = 2048
lr = 2e-4
lr_decay = 1.0 # Exponential decay factor of LR scheduler
n_seeds = 5 # Number of random seeds to try
n_epochs = 200
seed = 5
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
##### Whether to enable GPU training or not
device = torch.device('cpu') # or torch.device('cpu')
# Import dataset
# dataset = FairnessDataset(dataset=dataset_name, device=device)
# dataset.normalize()
input_dim = k + 1
net = Classifier(n_layers=n_layers, n_inputs=input_dim, n_hidden_units=n_hidden_units)
net = net.to(device)
# Set an optimizer
optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay) # None
# X, Y, A = ds.get_data()
# X_test, Y_test, A_test = ds.get_test_data()
# x_train = X.cpu().detach().numpy()
# Y_train = Y.cpu().detach().numpy().flatten()
# a_train = A.cpu().detach().numpy().flatten()
# x_test = X_test.cpu().detach().numpy()
# y_test = Y_test.cpu().detach().numpy().flatten()
# a_test = A_test.cpu().detach().numpy().flatten()
# train_tensors, test_tensors = dataset.get_dataset_in_tensor()
# X_train, Y_train, Z_train, XZ_train = train_tensors
# X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
X_train, Y_train, Z_train = ds.get_data()
X_test, Y_test, Z_test = ds.get_test_data()
XZ_test = torch.cat([X_test, Z_test], 1)
XZ_train = torch.cat([X_train, Z_train], 1)
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
generator = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# # An empty dataframe for logging experimental results
# df = pd.DataFrame()
# df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
results_test = []
results_train = []
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(generator):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.squeeze())
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch.squeeze())
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(), h, tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
if (i + 1) % 10 == 0 or (i + 1) == len(generator):
print('Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(epoch+1, n_epochs,
i+1, len(generator),
cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
def predict(XZ):
Y_hat_ = net(XZ)
Y_hat_[Y_hat_>=0.5] = 1
Y_hat_[Y_hat_ < 0.5] = 0
return Y_hat_
# metrics on train set
y_hat = predict(XZ_train).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_train==1]
y_hat_0 = y_hat[Z_train==0]
y_1 = Y_train[Z_train==1]
y_0 = Y_train[Z_train==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(XZ_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_test==1]
y_hat_0 = y_hat[Z_test==0]
y_1 = Y_test[Z_test==1]
y_0 = Y_test[Z_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
results_train.append(train_results)
results_test.append(test_results)
# df_train = pd.DataFrame(data=results_train)
# df_test = pd.DataFrame(data=results_test)
# df_train.to_csv('results/{}_zafar_{}_train.csv'.format(args.dataset, 0))
# df_test.to_csv('results/{}_zafar_{}_test.csv'.format(args.dataset, 0))
| 9,960
| 34.830935
| 136
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/zafar_method/funcs_disp_mist.py
|
from __future__ import division
import os, sys
import traceback
import numpy as np
from random import seed, shuffle
from collections import defaultdict
from copy import deepcopy
import cvxpy
import dccp
from dccp.problem import is_dccp
from zafar_method import utils as ut
def train_model_disp_mist(x, y, x_control, loss_function, EPS, cons_params):
# cons_type, sensitive_attrs_to_cov_thresh, take_initial_sol, gamma, tau, mu, EPS, cons_type
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "disparate_mistreatment/synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
EPS: stopping criteria for the convex solver. check the CVXPY documentation for details. default for CVXPY is 1e-6
cons_params: is None when we do not want to apply any constraints
otherwise: cons_params is a dict with keys as follows:
- cons_type:
- 0 for all misclassifications
- 1 for FPR
- 2 for FNR
- 4 for both FPR and FNR
- tau: DCCP parameter, controls how much weight to put on the constraints, if the constraints are not satisfied, then increase tau -- default is DCCP val 0.005
- mu: DCCP parameter, controls the multiplicative factor by which the tau increases in each DCCP iteration -- default is the DCCP val 1.2
- take_initial_sol: whether the starting point for DCCP should be the solution for the original (unconstrained) classifier -- default value is True
- sensitive_attrs_to_cov_thresh: covariance threshold for each cons_type, eg, key 1 contains the FPR covariance
----
Outputs:
w: the learned weight vector for the classifier
"""
max_iters = 100 # for the convex program
max_iter_dccp = 50 # for the dccp algo
num_points, num_features = x.shape
w = cvxpy.Variable(num_features) # this is the weight vector
# initialize a random value of w
w.value = np.random.rand(x.shape[1])
if cons_params is None: # just train a simple classifier, no fairness constraints
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, cons_params["sensitive_attrs_to_cov_thresh"],
cons_params["cons_type"], w)
if loss_function == "logreg":
# constructing the logistic loss problem
loss = cvxpy.sum(
cvxpy.logistic(cvxpy.multiply(-y, x * w))) / num_points # we are converting y to a diagonal matrix for consistent
# sometimes, its a good idea to give a starting point to the constrained solver
# this starting point for us is the solution to the unconstrained optimization problem
# another option of starting point could be any feasible solution
if cons_params is not None:
if cons_params.get("take_initial_sol") is None: # true by default
take_initial_sol = True
elif cons_params["take_initial_sol"] == False:
take_initial_sol = False
if take_initial_sol == True: # get the initial solution
p = cvxpy.Problem(cvxpy.Minimize(loss), [])
p.solve()
# construct the cvxpy problem
prob = cvxpy.Problem(cvxpy.Minimize(loss), constraints)
# print "\n\n"
# print "Problem is DCP (disciplined convex program):", prob.is_dcp()
# print "Problem is DCCP (disciplined convex-concave program):", is_dccp(prob)
try:
tau, mu = 0.005, 1.2 # default dccp parameters, need to be varied per dataset
if cons_params is not None: # in case we passed these parameters as a part of dccp constraints
if cons_params.get("tau") is not None: tau = cons_params["tau"]
if cons_params.get("mu") is not None: mu = cons_params["mu"]
if cons_params.get("gamma") is not None: gamma = cons_params["gamma"]
prob.solve(method='dccp', tau=tau, mu=mu, tau_max=1e10,
solver=cvxpy.ECOS, verbose=False,
feastol=EPS, abstol=EPS, reltol=EPS, feastol_inacc=EPS, abstol_inacc=EPS, reltol_inacc=EPS,
max_iters=max_iters, max_iter=max_iter_dccp)
assert (prob.status == "Converged" or prob.status == "optimal")
# print "Optimization done, problem status:", prob.status
except:
traceback.print_exc()
sys.stdout.flush()
sys.exit(1)
# check that the fairness constraint is satisfied
# for f_c in constraints:
# assert (
# f_c.value == True) # can comment this out if the solver fails too often, but make sure that the constraints are satisfied empirically. alternatively, consider increasing tau parameter
# pass
w = np.array(w.value).flatten() # flatten converts it to a 1d array
return w
def get_clf_stats(w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs):
assert (len(sensitive_attrs) == 1) # ensure that we have just one sensitive attribute
s_attr = "s1" # for now, lets compute the accuracy for just one sensitive attr
# compute distance from boundary
distances_boundary_train = get_distance_boundary(w, x_train, x_control_train[s_attr])
distances_boundary_test = get_distance_boundary(w, x_test, x_control_test[s_attr])
# compute the class labels
all_class_labels_assigned_train = np.sign(distances_boundary_train)
all_class_labels_assigned_test = np.sign(distances_boundary_test)
train_score, test_score, correct_answers_train, correct_answers_test = ut.check_accuracy(None, x_train, y_train,
x_test, y_test,
all_class_labels_assigned_train,
all_class_labels_assigned_test)
cov_all_train = {}
cov_all_test = {}
for s_attr in sensitive_attrs:
print_stats = False # we arent printing the stats for the train set to avoid clutter
# uncomment these lines to print stats for the train fold
# print "*** Train ***"
# print "Accuracy: %0.3f" % (train_score)
# print_stats = True
s_attr_to_fp_fn_train = get_fpr_fnr_sensitive_features(y_train, all_class_labels_assigned_train,
x_control_train, sensitive_attrs, print_stats)
cov_all_train[s_attr] = get_sensitive_attr_constraint_fpr_fnr_cov(None, x_train, y_train,
distances_boundary_train,
x_control_train[s_attr])
print_stats = True # only print stats for the test fold
s_attr_to_fp_fn_test = get_fpr_fnr_sensitive_features(y_test, all_class_labels_assigned_test, x_control_test,
sensitive_attrs, print_stats)
cov_all_test[s_attr] = get_sensitive_attr_constraint_fpr_fnr_cov(None, x_test, y_test, distances_boundary_test,
x_control_test[s_attr])
return train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test,\
all_class_labels_assigned_train, all_class_labels_assigned_test
def get_distance_boundary(w, x, s_attr_arr):
"""
if we have boundaries per group, then use those separate boundaries for each sensitive group
else, use the same weight vector for everything
"""
distances_boundary = np.zeros(x.shape[0])
if isinstance(w, dict): # if we have separate weight vectors per group
for k in w.keys(): # for each w corresponding to each sensitive group
d = np.dot(x, w[k])
distances_boundary[s_attr_arr == k] = d[
s_attr_arr == k] # set this distance only for people with this sensitive attr val
else: # we just learn one w for everyone else
distances_boundary = np.dot(x, w)
return distances_boundary
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs_to_cov_thresh, cons_type, w):
"""
get the list of constraints to be fed to the minimizer
cons_type == -1: means the whole combined misclassification constraint (without FNR or FPR)
cons_type == 1: FPR constraint
cons_type == 2: FNR constraint
cons_type == 4: both FPR as well as FNR constraints
sensitive_attrs_to_cov_thresh: is a dict like {s: {cov_type: val}}
s is the sensitive attr
cov_type is the covariance type. contains the covariance for all misclassifications, FPR and for FNR etc
"""
constraints = []
# print(sensitive_attrs_to_cov_thresh.keys())
for attr in ["s1"]:
attr_arr = x_control_train[attr]
attr_arr_transformed = attr_arr
s_val_to_total = {ct: {} for ct in [0, 1, 2]} # constrain type -> sens_attr_val -> total number
s_val_to_avg = {ct: {} for ct in [0, 1, 2]}
cons_sum_dict = {ct: {} for ct in
[0, 1, 2]} # sum of entities (females and males) in constraints are stored here
for v in set(attr_arr):
s_val_to_total[0][v] = sum(x_control_train[attr] == v)
s_val_to_total[1][v] = sum(np.logical_and(x_control_train[attr] == v,
y_train == -1)) # FPR constraint so we only consider the ground truth negative dataset for computing the covariance
s_val_to_total[2][v] = sum(np.logical_and(x_control_train[attr] == v, y_train == +1))
for ct in [0, 1, 2]:
s_val_to_avg[ct][0] = s_val_to_total[ct][1] / float(s_val_to_total[ct][0] + s_val_to_total[ct][
1]) # N1/N in our formulation, differs from one constraint type to another
s_val_to_avg[ct][1] = 1.0 - s_val_to_avg[ct][0] # N0/N
for v in set(attr_arr):
idx = x_control_train[attr] == v
#################################################################
# #DCCP constraints
dist_bound_prod = cvxpy.multiply(y_train[idx], x_train[idx] * w) # y.f(x)
cons_sum_dict[0][v] = cvxpy.sum(cvxpy.minimum(0, dist_bound_prod)) * (
s_val_to_avg[0][v] / len(x_train)) # avg misclassification distance from boundary
cons_sum_dict[1][v] = cvxpy.sum(
cvxpy.minimum(0, cvxpy.multiply((1 - y_train[idx]) / 2.0, dist_bound_prod))) * (
s_val_to_avg[1][v] / sum(
y_train == -1)) # avg false positive distance from boundary (only operates on the ground truth neg dataset)
cons_sum_dict[2][v] = cvxpy.sum(
cvxpy.minimum(0, cvxpy.multiply((1 + y_train[idx]) / 2.0, dist_bound_prod))) * (
s_val_to_avg[2][v] / sum(
y_train == +1)) # avg false negative distance from boundary
#################################################################
if cons_type == 4:
cts = [1, 2]
elif cons_type in [0, 1, 2]:
cts = [cons_type]
else:
raise Exception("Invalid constraint type")
#################################################################
# DCCP constraints
for ct in cts:
thresh = cvxpy.abs(sensitive_attrs_to_cov_thresh[attr][ct][1] - sensitive_attrs_to_cov_thresh[attr][ct][0])
constraints.append(cons_sum_dict[ct][1] <= cons_sum_dict[ct][0] + thresh)
constraints.append(cons_sum_dict[ct][1] >= cons_sum_dict[ct][0] - thresh)
#################################################################
return constraints
def get_fpr_fnr_sensitive_features(y_true, y_pred, x_control, sensitive_attrs, verbose=False):
# we will make some changes to x_control in this function, so make a copy in order to preserve the origianl referenced object
x_control_internal = deepcopy(x_control)
s_attr_to_fp_fn = {}
for s in sensitive_attrs:
s_attr_to_fp_fn[s] = {}
s_attr_vals = x_control_internal[s]
for s_val in sorted(list(set(s_attr_vals))):
s_attr_to_fp_fn[s][s_val] = {}
y_true_local = y_true[s_attr_vals == s_val]
y_pred_local = y_pred[s_attr_vals == s_val]
acc = float(sum(y_true_local == y_pred_local)) / len(y_true_local)
fp = sum(np.logical_and(y_true_local == -1,
y_pred_local == +1.0)) # something which is -ve but is misclassified as +ve
fn = sum(np.logical_and(y_true_local == +1.0,
y_pred_local == -1.0)) # something which is +ve but is misclassified as -ve
tp = sum(np.logical_and(y_true_local == +1.0,
y_pred_local == +1.0)) # something which is +ve AND is correctly classified as +ve
tn = sum(np.logical_and(y_true_local == -1,
y_pred_local == -1.0)) # something which is -ve AND is correctly classified as -ve
all_neg = sum(y_true_local == -1)
all_pos = sum(y_true_local == +1.0)
# fpr = float(fp) / (float(fp + tn))
# fnr = float(fn) / (float(fn + tp))
# tpr = float(tp) / (float(tp + fn) )
# tnr = float(tn) / (float(tn + fp))
fpr = 0.0 if float(fp + tn) == 0 else float(fp) / float(fp + tn)
fnr = 0.0 if float(fn + tp) == 0 else float(fn) / float(fn + tp)
tpr = 0.0 if float(tp + fn) == 0 else float(tp) / float(tp + fn)
tnr = 0.0 if float(tn + fp) == 0 else float(tn) / float(tn + fp)
s_attr_to_fp_fn[s][s_val]["fp"] = fp
s_attr_to_fp_fn[s][s_val]["fn"] = fn
s_attr_to_fp_fn[s][s_val]["fpr"] = fpr
s_attr_to_fp_fn[s][s_val]["fnr"] = fnr
s_attr_to_fp_fn[s][s_val]["acc"] = (tp + tn) / (tp + tn + fp + fn)
if verbose == True:
if isinstance(s_val, float): # print the int value of the sensitive attr val
s_val = int(s_val)
return s_attr_to_fp_fn
def get_sensitive_attr_constraint_fpr_fnr_cov(model, x_arr, y_arr_true, y_arr_dist_boundary, x_control_arr,
verbose=False):
"""
Here we compute the covariance between sensitive attr val and ONLY misclassification distances from boundary for False-positives
(-N_1 / N) sum_0(min(0, y.f(x))) + (N_0 / N) sum_1(min(0, y.f(x))) for all misclassifications
(-N_1 / N) sum_0(min(0, (1-y)/2 . y.f(x))) + (N_0 / N) sum_1(min(0, (1-y)/2. y.f(x))) for FPR
y_arr_true are the true class labels
y_arr_dist_boundary are the predicted distances from the decision boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert (x_arr.shape[0] == x_control_arr.shape[0])
if len(x_control_arr.shape) > 1: # make sure we just have one column in the array
assert (x_control_arr.shape[1] == 1)
if len(set(x_control_arr)) != 2: # non binary attr
raise Exception("Non binary attr, fix to handle non bin attrs")
arr = []
if model is None:
arr = y_arr_dist_boundary * y_arr_true # simply the output labels
else:
arr = np.dot(model,
x_arr.T) * y_arr_true # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr)
s_val_to_total = {ct: {} for ct in [0, 1, 2]}
s_val_to_avg = {ct: {} for ct in [0, 1, 2]}
cons_sum_dict = {ct: {} for ct in [0, 1, 2]} # sum of entities (females and males) in constraints are stored here
for v in set(x_control_arr):
s_val_to_total[0][v] = cvxpy.sum(x_control_arr == v)
s_val_to_total[1][v] = cvxpy.sum(np.logical_and(x_control_arr == v, y_arr_true == -1))
s_val_to_total[2][v] = cvxpy.sum(np.logical_and(x_control_arr == v, y_arr_true == +1))
for ct in [0, 1, 2]:
s_val_to_avg[ct][0] = s_val_to_total[ct][1] / (s_val_to_total[ct][0] + s_val_to_total[ct][1]) # N1 / N
s_val_to_avg[ct][1] = 1.0 - s_val_to_avg[ct][0] # N0 / N
for v in set(x_control_arr):
idx = x_control_arr == v
dist_bound_prod = arr[idx]
cons_sum_dict[0][v] = cvxpy.sum(np.minimum(0, dist_bound_prod)) * (s_val_to_avg[0][v] / len(x_arr))
cons_sum_dict[1][v] = cvxpy.sum(np.minimum(0, ((1 - y_arr_true[idx]) / 2) * dist_bound_prod)) * (
s_val_to_avg[1][v] / cvxpy.sum(y_arr_true == -1))
cons_sum_dict[2][v] = cvxpy.sum(np.minimum(0, ((1 + y_arr_true[idx]) / 2) * dist_bound_prod)) * (
s_val_to_avg[2][v] / cvxpy.sum(y_arr_true == + 1))
cons_type_to_name = {0: "ALL", 1: "FPR", 2: "FNR"}
for cons_type in [0, 1, 2]:
cov_type_name = cons_type_to_name[cons_type]
cov = cons_sum_dict[cons_type][1] - cons_sum_dict[cons_type][0]
return cons_sum_dict
| 18,367
| 49.185792
| 206
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/zafar_method/loss_funcs.py
|
import sys
import os
import numpy as np
import scipy.special
from collections import defaultdict
import traceback
from copy import deepcopy
def _hinge_loss(w, X, y):
yz = y * np.dot(X,w) # y * (x.w)
yz = np.maximum(np.zeros_like(yz), (1-yz)) # hinge function
return sum(yz)
def _logistic_loss(w, X, y, return_arr=None):
"""Computes the logistic loss.
This function is used from scikit-learn source code
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
if return_arr == True:
out = -(log_logistic(yz))
else:
out = -np.sum(log_logistic(yz))
return out
def _logistic_loss_l2_reg(w, X, y, lam=None):
if lam is None:
lam = 1.0
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
logistic_loss = -np.sum(log_logistic(yz))
l2_reg = (float(lam)/2.0) * np.sum([elem*elem for elem in w])
out = logistic_loss + l2_reg
return out
def log_logistic(X):
""" This function is used from scikit-learn source code. Source link below """
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
Source code at:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
if X.ndim > 1: raise Exception("Array of samples cannot be more than 1-D!")
out = np.empty_like(X) # same dimensions and data types
idx = X>0
out[idx] = -np.log(1.0 + np.exp(-X[idx]))
out[~idx] = X[~idx] - np.log(1.0 + np.exp(X[~idx]))
return out
| 2,268
| 22.884211
| 82
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/zafar_method/loss_funcs_after.py
|
import sys
import os
import numpy as np
import scipy.special
from collections import defaultdict
import traceback
from copy import deepcopy
def _hinge_loss(w, X, y):
yz = y * np.dot(X, w) # y * (x.w)
yz = np.maximum(np.zeros_like(yz), (1 - yz)) # hinge function
return sum(yz)
def _logistic_loss(w, X, y, return_arr=None):
"""Computes the logistic loss.
This function is used from scikit-learn source code
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
yz = y * np.dot(X, w)
# Logistic loss is the negative of the log of the logistic function.
if return_arr == True:
out = -(log_logistic(yz))
else:
out = -np.sum(log_logistic(yz))
return out
def _logistic_loss_l2_reg(w, X, y, lam=None):
if lam is None:
lam = 1.0
yz = y * np.dot(X, w)
# Logistic loss is the negative of the log of the logistic function.
logistic_loss = -np.sum(log_logistic(yz))
l2_reg = (float(lam) / 2.0) * np.sum([elem * elem for elem in w])
out = logistic_loss + l2_reg
return out
def log_logistic(X):
""" This function is used from scikit-learn source code. Source link below """
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
Source code at:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
if X.ndim > 1: raise Exception("Array of samples cannot be more than 1-D!")
out = np.empty_like(X) # same dimensions and data types
idx = X > 0
idx = X > 0
out[idx] = -np.log(1.0 + np.exp(-X[idx]))
out[~idx] = X[~idx] - np.log(1.0 + np.exp(X[~idx]))
return out
| 2,356
| 27.743902
| 82
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/zafar_method/utils.py
|
import numpy as np
from random import seed, shuffle
from zafar_method import loss_funcs as lf # our implementation of loss funcs
from scipy.optimize import minimize # for loss func minimization
from multiprocessing import Pool, Process, Queue
from collections import defaultdict
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
import sys
def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma=None):
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values)
apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values)
sep_constraint: apply the fine grained accuracy constraint
for details, see Section 3.3 of arxiv.org/abs/1507.05259v3
For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py"
Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time
sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control
sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints)
gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint
----
Outputs:
w: the learned weight vector for the classifier
"""
assert((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time
max_iter = 100000 # maximum number of iterations for the minimization algorithm
if apply_fairness_constraints == 0:
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh)
if apply_accuracy_constraint == 0: #its not the reverse problem, just train w with cross cov constraints
f_args=(x, y)
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = f_args,
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = constraints
)
else:
# train on just the loss function
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = (x, y),
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = []
)
old_w = deepcopy(w.x)
def constraint_gamma_all(w, x, y, initial_loss_arr):
gamma_arr = np.ones_like(y) * gamma # set gamma for everyone
new_loss = loss_function(w, x, y)
old_loss = sum(initial_loss_arr)
return ((1.0 + gamma) * old_loss) - new_loss
def constraint_protected_people(w,x,y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class
return np.dot(w, x.T) # if this is positive, the constraint is satisfied
def constraint_unprotected_people(w,ind,old_loss,x,y):
new_loss = loss_function(w, np.array([x]), np.array(y))
return ((1.0 + gamma) * old_loss) - new_loss
constraints = []
predicted_labels = np.sign(np.dot(w.x, x.T))
unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True)
if sep_constraint == True: # separate gemma for different people
for i in range(0, len(predicted_labels)):
if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs
c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args':(x[i], y[i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier
constraints.append(c)
else:
c = ({'type': 'ineq', 'fun': constraint_unprotected_people, 'args':(i, unconstrained_loss_arr[i], x[i], y[i])})
constraints.append(c)
else: # same gamma for everyone
c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args':(x,y,unconstrained_loss_arr)})
constraints.append(c)
def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr):
cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T)
return float(abs(sum(cross_cov))) / float(x_in.shape[0])
w = minimize(fun = cross_cov_abs_optm_func,
x0 = old_w,
args = (x, x_control[sensitive_attrs[0]]),
method = 'SLSQP',
options = {"maxiter":100000},
constraints = constraints
)
try:
assert(w.success == True)
except:
print("Optimization problem did not converge.. Check the solution returned by the optimizer.")
print("Returned solution is:")
print(w)
return w.x
def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh_arr, gamma):
"""
Computes the cross validation error for the classifier subject to various fairness constraints
This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info.
Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds).
"""
train_folds = []
test_folds = []
n_samples = len(y_all)
train_fold_size = 0.7 # the rest of 0.3 is for testing
# split the data into folds for cross-validation
for i in range(0,num_folds):
perm = range(0,n_samples) # shuffle the data before creating each fold
shuffle(perm)
x_all_perm = x_all[perm]
y_all_perm = y_all[perm]
x_control_all_perm = {}
for k in x_control_all.keys():
x_control_all_perm[k] = np.array(x_control_all[k])[perm]
x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(x_all_perm, y_all_perm, x_control_all_perm, train_fold_size)
train_folds.append([x_all_train, y_all_train, x_control_all_train])
test_folds.append([x_all_test, y_all_test, x_control_all_test])
def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh):
x_train, y_train, x_control_train = train_data
x_test, y_test, x_control_test = test_data
w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train, x_test, y_test, None, None)
distances_boundary_test = (np.dot(x_test, w)).tolist()
all_class_labels_assigned_test = np.sign(distances_boundary_test)
correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test, sensitive_attrs)
cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test, sensitive_attrs)
distances_boundary_train = (np.dot(x_train, w)).tolist()
all_class_labels_assigned_train = np.sign(distances_boundary_train)
correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train, sensitive_attrs)
cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train, sensitive_attrs)
output_folds.put([fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train])
return
output_folds = Queue()
processes = [Process(target=train_test_single_fold, args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x])) for x in range(num_folds)]
# Run processes
for p in processes:
p.start()
# Get the reuslts
results = [output_folds.get() for p in processes]
for p in processes:
p.join()
test_acc_arr = []
train_acc_arr = []
correlation_dict_test_arr = []
correlation_dict_train_arr = []
cov_dict_test_arr = []
cov_dict_train_arr = []
results = sorted(results, key = lambda x : x[0]) # sort w.r.t fold num
for res in results:
fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res
test_acc_arr.append(test_score)
train_acc_arr.append(train_score)
correlation_dict_test_arr.append(correlation_dict_test)
correlation_dict_train_arr.append(correlation_dict_train)
cov_dict_test_arr.append(cov_dict_test)
cov_dict_train_arr.append(cov_dict_train)
return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr
def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name):
correlation_dict = get_avg_correlation_dict(correlation_dict_arr)
non_prot_pos = correlation_dict[s_attr_name][1][1]
prot_pos = correlation_dict[s_attr_name][0][1]
p_rule = (prot_pos / non_prot_pos) * 100.0
print("Accuracy: %0.2f" % (np.mean(acc_arr)))
print("Protected/non-protected in +ve class: %0.0f%% / %0.0f%%" % (prot_pos, non_prot_pos))
print("P-rule achieved: %0.0f%%" % (p_rule))
print("Covariance between sensitive feature and decision from distance boundary : %0.3f" % (np.mean([v[s_attr_name] for v in cov_dict_arr])))
return p_rule
def compute_p_rule(x_control, class_labels):
""" Compute the p-rule based on Doctrine of disparate impact """
non_prot_all = sum(x_control == 1.0) # non-protected group
prot_all = sum(x_control == 0.0) # protected group
non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0
print("Total data points: %d" % (len(x_control)))
print("# non-protected examples: %d" % (non_prot_all))
print("# protected examples: %d" % (prot_all))
print("Non-protected in positive class: %d (%0.0f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all))
print("Protected in positive class: %d (%0.0f%%)" % (prot_pos, prot_pos * 100.0 / prot_all))
print("P-rule is: %0.0f%%" % ( p_rule ))
return p_rule
def add_intercept(x):
""" Add intercept to the data before linear classification """
m,n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis = 1)
def check_binary(arr):
"give an array of values, see if the values are only 0 and 1"
s = sorted(set(arr))
if s[0] == 0 and s[1] == 1:
return True
else:
return False
def get_one_hot_encoding(in_arr):
"""
input: 1-D arr with int vals -- if not int vals, will raise an error
output: m (ndarray): one-hot encoded matrix
d (dict): also returns a dictionary original_val -> column in encoded matrix
"""
for k in in_arr:
if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64:
print(str(type(k)))
print("************* ERROR: Input arr does not have integer types")
return None
in_arr = np.array(in_arr, dtype=int)
assert(len(in_arr.shape)==1) # no column, means it was a 1-D arr
attr_vals_uniq_sorted = sorted(list(set(in_arr)))
num_uniq_vals = len(attr_vals_uniq_sorted)
if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1):
return in_arr, None
index_dict = {} # value to the column number
for i in range(0,len(attr_vals_uniq_sorted)):
val = attr_vals_uniq_sorted[i]
index_dict[val] = i
out_arr = []
for i in range(0,len(in_arr)):
tup = np.zeros(num_uniq_vals)
val = in_arr[i]
ind = index_dict[val]
tup[ind] = 1 # set that value of tuple to 1
out_arr.append(tup)
return np.array(out_arr), index_dict
def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted):
"""
returns the train/test accuracy of the model
we either pass the model (w)
else we pass y_predicted
"""
if model is not None and y_test_predicted is not None:
print("Either the model (w) or the predicted labels should be None")
raise Exception("Either the model (w) or the predicted labels should be None")
if model is not None:
y_test_predicted = np.sign(np.dot(x_test, model))
y_train_predicted = np.sign(np.dot(x_train, model))
def get_accuracy(y, Y_predicted):
correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy, sum(correct_answers)
train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted)
test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted)
return train_score, test_score, correct_answers_train, correct_answers_test
def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose):
"""
The covariance is computed b/w the sensitive attr val and the distance from the boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert(x_arr.shape[0] == x_control.shape[0])
if len(x_control.shape) > 1: # make sure we just have one column in the array
assert(x_control.shape[1] == 1)
arr = []
if model is None:
arr = y_arr_dist_boundary # simply the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr, dtype=np.float64)
cov = np.dot(x_control - np.mean(x_control), arr ) / float(len(x_control))
ans = thresh - abs(cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
# ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
if verbose is True:
print("Covariance is", cov)
print("Diff is:", ans)
print
return ans
def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs):
"""
reutrns the covariance between sensitive features and distance from decision boundary
"""
arr = []
if model is None:
arr = y_arr_dist_boundary # simplt the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
sensitive_attrs_to_cov_original = {}
for attr in sensitive_attrs:
attr_arr = x_control[attr]
bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals
if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
thresh = 0
if bin_attr:
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False)
sensitive_attrs_to_cov_original[attr] = cov
else: # sensitive feature has more than 2 categorical values
cov_arr = []
sensitive_attrs_to_cov_original[attr] = {}
for attr_val, ind in index_dict.items():
t = attr_arr_transformed[:,ind]
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False)
sensitive_attrs_to_cov_original[attr][attr_val] = cov
cov_arr.append(abs(cov))
cov = max(cov_arr)
return sensitive_attrs_to_cov_original
def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs):
"""
returns the fraction in positive class for sensitive feature values
"""
if model is not None:
y_predicted = np.sign(np.dot(x_test, model))
y_predicted = np.array(y_predicted)
out_dict = {}
for attr in sensitive_attrs:
attr_val = []
for v in x_control_test[attr]: attr_val.append(v)
assert(len(attr_val) == len(y_predicted))
total_per_val = defaultdict(int)
attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int))
for i in range(0, len(y_predicted)):
val = attr_val[i]
label = y_predicted[i]
# val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names
total_per_val[val] += 1
attr_to_class_labels_dict[val][label] += 1
class_labels = set(y_predicted.tolist())
local_dict_1 = {}
for k1,v1 in attr_to_class_labels_dict.items():
total_this_val = total_per_val[k1]
local_dict_2 = {}
for k2 in class_labels: # the order should be the same for printing
v2 = v1[k2]
f = float(v2) * 100.0 / float(total_this_val)
local_dict_2[k2] = f
local_dict_1[k1] = local_dict_2
out_dict[attr] = local_dict_1
return out_dict
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh):
"""
get the list of constraints to be fed to the minimizer
"""
constraints = []
for attr in sensitive_attrs:
attr_arr = x_control_train[attr]
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
if index_dict is None: # binary attribute
thresh = sensitive_attrs_to_cov_thresh[attr]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, attr_arr_transformed,thresh, False)})
constraints.append(c)
else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately
for attr_val, ind in index_dict.items():
attr_name = attr_val
thresh = sensitive_attrs_to_cov_thresh[attr][attr_name]
t = attr_arr_transformed[:,ind]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, t ,thresh, False)})
constraints.append(c)
return constraints
def split_into_train_test(x_all, y_all, x_control_all, train_fold_size):
split_point = int(round(float(x_all.shape[0]) * train_fold_size))
x_all_train = x_all[:split_point]
x_all_test = x_all[split_point:]
y_all_train = y_all[:split_point]
y_all_test = y_all[split_point:]
x_control_all_train = {}
x_control_all_test = {}
for k in x_control_all.keys():
x_control_all_train[k] = x_control_all[k][:split_point]
x_control_all_test[k] = x_control_all[k][split_point:]
return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test
def get_avg_correlation_dict(correlation_dict_arr):
# make the structure for the correlation dict
correlation_dict_avg = {}
# print correlation_dict_arr
for k,v in correlation_dict_arr[0].items():
correlation_dict_avg[k] = {}
for feature_val, feature_dict in v.items():
correlation_dict_avg[k][feature_val] = {}
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = []
# populate the correlation dict
for correlation_dict in correlation_dict_arr:
for k,v in correlation_dict.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label].append(frac_class)
# now take the averages
for k,v in correlation_dict_avg.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class_arr in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr)
return correlation_dict_avg
def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs):
# very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness
it = 0.05
cov_range = np.arange(1.0, 0.0-it, -it).tolist()
if apply_accuracy_constraint == True:
if sep_constraint == False:
it = 0.1
cov_range = np.arange(0.0, 1.0 + it, it).tolist()
if sep_constraint == True:
cov_range = [0,1,5,10,20,50,100,500,1000]
positive_class_label = 1 # positive class is +1
train_acc = []
test_acc = []
positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive
# first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint, sensitive_attrs, [{} for i in range(0,num_folds)], 0)
for c in cov_range:
print("LOG: testing for multiplicative factor: %0.2f" % c)
sensitive_attrs_to_cov_original_arr_multiplied = []
for sensitive_attrs_to_cov_original in cov_dict_train_arr:
sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original)
for k in sensitive_attrs_to_cov_thresh.keys():
v = sensitive_attrs_to_cov_thresh[k]
if type(v) == type({}):
for k1 in v.keys():
v[k1] = v[k1] * c
else:
sensitive_attrs_to_cov_thresh[k] = v * c
sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh)
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied, c)
test_acc.append(np.mean(test_acc_arr))
correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr)
correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr)
# just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one
s = sensitive_attrs[0]
for k,v in correlation_dict_test[s].items():
if v.get(positive_class_label) is None:
positive_per_category[k].append(0.0)
else:
positive_per_category[k].append(v[positive_class_label])
positive_per_category = dict(positive_per_category)
p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0
ax = plt.subplot(2,1,1)
plt.plot(cov_range, positive_per_category[0], "-o" , color="green", label = "Protected")
plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label = "Non-protected")
ax.set_xlim([min(cov_range), max(cov_range)])
plt.xlabel('Multiplicative loss factor')
plt.ylabel('Perc. in positive class')
if apply_accuracy_constraint == False:
plt.gca().invert_xaxis()
plt.xlabel('Multiplicative covariance factor (c)')
ax.legend()
ax = plt.subplot(2,1,2)
plt.scatter(p_rule_arr, test_acc, color="red")
ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)])
plt.xlabel('P% rule')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.show()
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1,y2
| 27,182
| 41.606583
| 357
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/zafar_method/funcs_disp_mist_after.py
|
from __future__ import division
import os, sys
import traceback
import numpy as np
from random import seed, shuffle
from collections import defaultdict
from copy import deepcopy
import cvxpy
import dccp
from dccp.problem import is_dccp
import utils as ut
SEED = 1122334455
seed(SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(SEED)
def train_model_disp_mist(x, y, x_control, loss_function, EPS, cons_params=None):
# cons_type, sensitive_attrs_to_cov_thresh, take_initial_sol, gamma, tau, mu, EPS, cons_type
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "disparate_mistreatment/synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
EPS: stopping criteria for the convex solver. check the CVXPY documentation for details. default for CVXPY is 1e-6
cons_params: is None when we do not want to apply any constraints
otherwise: cons_params is a dict with keys as follows:
- cons_type:
- 0 for all misclassifications
- 1 for FPR
- 2 for FNR
- 4 for both FPR and FNR
- tau: DCCP parameter, controls how much weight to put on the constraints, if the constraints are not satisfied, then increase tau -- default is DCCP val 0.005
- mu: DCCP parameter, controls the multiplicative factor by which the tau increases in each DCCP iteration -- default is the DCCP val 1.2
- take_initial_sol: whether the starting point for DCCP should be the solution for the original (unconstrained) classifier -- default value is True
- sensitive_attrs_to_cov_thresh: covariance threshold for each cons_type, eg, key 1 contains the FPR covariance
----
Outputs:
w: the learned weight vector for the classifier
"""
max_iters = 100 # for the convex program
max_iter_dccp = 50 # for the dccp algo
num_points, num_features = x.shape
w = cvxpy.Variable(num_features) # this is the weight vector
# initialize a random value of w
np.random.seed(112233)
w.value = np.random.rand(x.shape[1])
if cons_params is None: # just train a simple classifier, no fairness constraints
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, cons_params["sensitive_attrs_to_cov_thresh"],
cons_params["cons_type"], w)
if loss_function == "logreg":
# constructing the logistic loss problem
loss = cvxpy.sum(
cvxpy.logistic(cvxpy.multiply(-y, x * w))) / num_points # we are converting y to a diagonal matrix for consistent
# sometimes, its a good idea to give a starting point to the constrained solver
# this starting point for us is the solution to the unconstrained optimization problem
# another option of starting point could be any feasible solution
if cons_params is not None:
if cons_params.get("take_initial_sol") is None: # true by default
take_initial_sol = True
elif cons_params["take_initial_sol"] == False:
take_initial_sol = False
if take_initial_sol == True: # get the initial solution
p = cvxpy.Problem(cvxpy.Minimize(loss), [])
p.solve()
# construct the cvxpy problem
prob = cvxpy.Problem(cvxpy.Minimize(loss), constraints)
try:
tau, mu = 0.005, 1.2 # default dccp parameters, need to be varied per dataset
if cons_params is not None: # in case we passed these parameters as a part of dccp constraints
if cons_params.get("tau") is not None: tau = cons_params["tau"]
if cons_params.get("mu") is not None: mu = cons_params["mu"]
prob.solve(method='dccp', tau=tau, mu=mu, tau_max=1e10,
solver=cvxpy.ECOS, verbose=False,
feastol=EPS, abstol=EPS, reltol=EPS, feastol_inacc=EPS, abstol_inacc=EPS, reltol_inacc=EPS,
max_iters=max_iters, max_iter=max_iter_dccp)
assert (prob.status == "Converged" or prob.status == "optimal")
# print "Optimization done, problem status:", prob.status
except:
traceback.print_exc()
sys.stdout.flush()
sys.exit(1)
# # check that the fairness constraint is satisfied
# for f_c in constraints:
# assert (f_c.value == True) # can comment this out if the solver fails too often, but make sure that the constraints are satisfied empirically. alternatively, consider increasing tau parameter
# pass
w = np.array(w.value).flatten() # flatten converts it to a 1d array
return w
def get_clf_stats(w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs):
assert (len(sensitive_attrs) == 1) # ensure that we have just one sensitive attribute
s_attr = "s1" # for now, lets compute the accuracy for just one sensitive attr
# compute distance from boundary
distances_boundary_train = get_distance_boundary(w, x_train, x_control_train[s_attr])
distances_boundary_test = get_distance_boundary(w, x_test, x_control_test[s_attr])
# compute the class labels
all_class_labels_assigned_train = np.sign(distances_boundary_train)
all_class_labels_assigned_test = np.sign(distances_boundary_test)
train_score, test_score, correct_answers_train, correct_answers_test = ut.check_accuracy(None, x_train, y_train,
x_test, y_test,
all_class_labels_assigned_train,
all_class_labels_assigned_test)
cov_all_train = {}
cov_all_test = {}
for s_attr in sensitive_attrs:
print_stats = False # we arent printing the stats for the train set to avoid clutter
# uncomment these lines to print stats for the train fold
# print "*** Train ***"
# print "Accuracy: %0.3f" % (train_score)
# print_stats = True
s_attr_to_fp_fn_train = get_fpr_fnr_sensitive_features(y_train, all_class_labels_assigned_train,
x_control_train, sensitive_attrs, print_stats)
cov_all_train[s_attr] = get_sensitive_attr_constraint_fpr_fnr_cov(None, x_train, y_train,
distances_boundary_train,
x_control_train[s_attr])
print_stats = True # only print stats for the test fold
s_attr_to_fp_fn_test = get_fpr_fnr_sensitive_features(y_test, all_class_labels_assigned_test, x_control_test,
sensitive_attrs, print_stats)
cov_all_test[s_attr] = get_sensitive_attr_constraint_fpr_fnr_cov(None, x_test, y_test, distances_boundary_test,
x_control_test[s_attr])
return train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test, \
all_class_labels_assigned_train, all_class_labels_assigned_test
def get_distance_boundary(w, x, s_attr_arr):
"""
if we have boundaries per group, then use those separate boundaries for each sensitive group
else, use the same weight vector for everything
"""
distances_boundary = np.zeros(x.shape[0])
if isinstance(w, dict): # if we have separate weight vectors per group
for k in w.keys(): # for each w corresponding to each sensitive group
d = np.dot(x, w[k])
distances_boundary[s_attr_arr == k] = d[
s_attr_arr == k] # set this distance only for people with this sensitive attr val
else: # we just learn one w for everyone else
distances_boundary = np.dot(x, w)
return distances_boundary
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs_to_cov_thresh, cons_type, w):
"""
get the list of constraints to be fed to the minimizer
cons_type == 0: means the whole combined misclassification constraint (without FNR or FPR)
cons_type == 1: FPR constraint
cons_type == 2: FNR constraint
cons_type == 4: both FPR as well as FNR constraints
sensitive_attrs_to_cov_thresh: is a dict like {s: {cov_type: val}}
s is the sensitive attr
cov_type is the covariance type. contains the covariance for all misclassifications, FPR and for FNR etc
"""
constraints = []
for attr in ["s1"]:
attr_arr = x_control_train[attr]
# attr_arr = x_control_train[attr]
# print(attr_arr)
attr_arr_transformed = attr_arr
# if index_dict is None: # binary attribute, in this case, the attr_arr_transformed is the same as the attr_arr
s_val_to_total = {ct: {} for ct in [0, 1, 2]} # constrain type -> sens_attr_val -> total number
s_val_to_avg = {ct: {} for ct in [0, 1, 2]}
cons_sum_dict = {ct: {} for ct in
[0, 1, 2]} # sum of entities (females and males) in constraints are stored here
for v in set(attr_arr):
s_val_to_total[0][v] = sum(x_control_train[attr] == v)
s_val_to_total[1][v] = sum(np.logical_and(x_control_train[attr] == v,
y_train == -1)) # FPR constraint so we only consider the ground truth negative dataset for computing the covariance
s_val_to_total[2][v] = sum(np.logical_and(x_control_train[attr] == v, y_train == +1))
for ct in [0, 1, 2]:
s_val_to_avg[ct][0] = s_val_to_total[ct][1] / float(s_val_to_total[ct][0] + s_val_to_total[ct][
1]) # N1/N in our formulation, differs from one constraint type to another
s_val_to_avg[ct][1] = 1.0 - s_val_to_avg[ct][0] # N0/N
for v in set(attr_arr):
idx = x_control_train[attr] == v
#################################################################
# #DCCP constraints
dist_bound_prod = cvxpy.multiply(y_train[idx], x_train[idx] * w) # y.f(x)
cons_sum_dict[0][v] = cvxpy.sum(cvxpy.minimum(0, dist_bound_prod)) * (
s_val_to_avg[0][v] / len(x_train)) # avg misclassification distance from boundary
cons_sum_dict[1][v] = cvxpy.sum(
cvxpy.minimum(0, cvxpy.multiply((1 - y_train[idx]) / 2.0, dist_bound_prod))) * (
s_val_to_avg[1][v] / sum(
y_train == -1)) # avg false positive distance from boundary (only operates on the ground truth neg dataset)
cons_sum_dict[2][v] = cvxpy.sum(
cvxpy.minimum(0, cvxpy.multiply((1 + y_train[idx]) / 2.0, dist_bound_prod))) * (
s_val_to_avg[2][v] / sum(
y_train == +1)) # avg false negative distance from boundary
#################################################################
if cons_type == 4:
cts = [1, 2]
elif cons_type in [0, 1, 2]:
cts = [cons_type]
else:
raise Exception("Invalid constraint type")
#################################################################
# DCCP constraints
for ct in cts:
print(ct)
thresh = abs(sensitive_attrs_to_cov_thresh[attr][ct][1] - sensitive_attrs_to_cov_thresh[attr][ct][0])
constraints.append(cons_sum_dict[ct][1] <= cons_sum_dict[ct][0] + thresh)
constraints.append(cons_sum_dict[ct][1] >= cons_sum_dict[ct][0] - thresh)
#################################################################
return constraints
def get_fpr_fnr_sensitive_features(y_true, y_pred, x_control, sensitive_attrs, verbose=False):
# we will make some changes to x_control in this function, so make a copy in order to preserve the origianl referenced object
x_control_internal = deepcopy(x_control)
s_attr_to_fp_fn = {}
for s in sensitive_attrs:
s_attr_to_fp_fn[s] = {}
s_attr_vals = x_control_internal[s]
for s_val in sorted(list(set(s_attr_vals))):
s_attr_to_fp_fn[s][s_val] = {}
y_true_local = y_true[s_attr_vals == s_val]
y_pred_local = y_pred[s_attr_vals == s_val]
acc = float(sum(y_true_local == y_pred_local)) / len(y_true_local)
fp = sum(np.logical_and(y_true_local == -1.0,
y_pred_local == +1.0)) # something which is -ve but is misclassified as +ve
fn = sum(np.logical_and(y_true_local == +1.0,
y_pred_local == -1.0)) # something which is +ve but is misclassified as -ve
tp = sum(np.logical_and(y_true_local == +1.0,
y_pred_local == +1.0)) # something which is +ve AND is correctly classified as +ve
tn = sum(np.logical_and(y_true_local == -1.0,
y_pred_local == -1.0)) # something which is -ve AND is correctly classified as -ve
all_neg = sum(y_true_local == -1.0)
all_pos = sum(y_true_local == +1.0)
# fpr = float(fp) / float(fp + tn)
# fnr = float(fn) / float(fn + tp)
# tpr = float(tp) / float(tp + fn)
# tnr = float(tn) / float(tn + fp)
fpr = 0.0 if float(fp + tn) == 0 else float(fp) / float(fp + tn)
fnr = 0.0 if float(fn + tp) == 0 else float(fn) / float(fn + tp)
tpr = 0.0 if float(tp + fn) == 0 else float(tp) / float(tp + fn)
tnr = 0.0 if float(tn + fp) == 0 else float(tn) / float(tn + fp)
s_attr_to_fp_fn[s][s_val]["fp"] = fp
s_attr_to_fp_fn[s][s_val]["fn"] = fn
s_attr_to_fp_fn[s][s_val]["fpr"] = fpr
s_attr_to_fp_fn[s][s_val]["fnr"] = fnr
s_attr_to_fp_fn[s][s_val]["acc"] = (tp + tn) / (tp + tn + fp + fn)
if verbose == True:
if isinstance(s_val, float): # print the int value of the sensitive attr val
s_val = int(s_val)
return s_attr_to_fp_fn
def get_sensitive_attr_constraint_fpr_fnr_cov(model, x_arr, y_arr_true, y_arr_dist_boundary, x_control_arr,
verbose=False):
"""
Here we compute the covariance between sensitive attr val and ONLY misclassification distances from boundary for False-positives
(-N_1 / N) sum_0(min(0, y.f(x))) + (N_0 / N) sum_1(min(0, y.f(x))) for all misclassifications
(-N_1 / N) sum_0(min(0, (1-y)/2 . y.f(x))) + (N_0 / N) sum_1(min(0, (1-y)/2. y.f(x))) for FPR
y_arr_true are the true class labels
y_arr_dist_boundary are the predicted distances from the decision boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert (x_arr.shape[0] == x_control_arr.shape[0])
if len(x_control_arr.shape) > 1: # make sure we just have one column in the array
assert (x_control_arr.shape[1] == 1)
if len(set(x_control_arr)) != 2: # non binary attr
raise Exception("Non binary attr, fix to handle non bin attrs")
arr = []
if model is None:
arr = y_arr_dist_boundary * y_arr_true # simply the output labels
else:
arr = np.dot(model,
x_arr.T) * y_arr_true # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr)
s_val_to_total = {ct: {} for ct in [0, 1, 2]}
s_val_to_avg = {ct: {} for ct in [0, 1, 2]}
cons_sum_dict = {ct: {} for ct in [0, 1, 2]} # sum of entities (females and males) in constraints are stored here
for v in set(x_control_arr):
s_val_to_total[0][v] = sum(x_control_arr == v)
s_val_to_total[1][v] = sum(np.logical_and(x_control_arr == v, y_arr_true == -1))
s_val_to_total[2][v] = sum(np.logical_and(x_control_arr == v, y_arr_true == +1))
for ct in [0, 1, 2]:
s_val_to_avg[ct][0] = s_val_to_total[ct][1] / float(s_val_to_total[ct][0] + s_val_to_total[ct][1]) # N1 / N
s_val_to_avg[ct][1] = 1.0 - s_val_to_avg[ct][0] # N0 / N
for v in set(x_control_arr):
idx = x_control_arr == v
dist_bound_prod = arr[idx]
cons_sum_dict[0][v] = sum(np.minimum(0, dist_bound_prod)) * (s_val_to_avg[0][v] / len(x_arr))
cons_sum_dict[1][v] = sum(np.minimum(0, ((1 - y_arr_true[idx]) / 2) * dist_bound_prod)) * (
s_val_to_avg[1][v] / sum(y_arr_true == -1))
cons_sum_dict[2][v] = sum(np.minimum(0, ((1 + y_arr_true[idx]) / 2) * dist_bound_prod)) * (
s_val_to_avg[2][v] / sum(y_arr_true == +1))
cons_type_to_name = {0: "ALL", 1: "FPR", 2: "FNR"}
for cons_type in [0, 1, 2]:
cov_type_name = cons_type_to_name[cons_type]
cov = cons_sum_dict[cons_type][1] - cons_sum_dict[cons_type][0]
return cons_sum_dict
| 18,388
| 49.798343
| 202
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/zafar_method/utils_after.py
|
import numpy as np
from random import seed, shuffle
import loss_funcs as lf # our implementation of loss funcs
from scipy.optimize import minimize # for loss func minimization
from multiprocessing import Pool, Process, Queue
from collections import defaultdict
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
import sys
SEED = 1122334455
seed(SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(SEED)
def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint,
sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma):
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values)
apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values)
sep_constraint: apply the fine grained accuracy constraint
for details, see Section 3.3 of arxiv.org/abs/1507.05259v3
For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py"
Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time
sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control
sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints)
gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint
----
Outputs:
w: the learned weight vector for the classifier
"""
assert ((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time
max_iter = 100000 # maximum number of iterations for the minimization algorithm
if apply_fairness_constraints == 0:
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh)
if apply_accuracy_constraint == 0: # its not the reverse problem, just train w with cross cov constraints
f_args = (x, y)
w = minimize(fun=loss_function,
x0=np.random.rand(x.shape[1], ),
args=f_args,
method='SLSQP',
options={"maxiter": max_iter},
constraints=constraints
)
else:
# train on just the loss function
w = minimize(fun=loss_function,
x0=np.random.rand(x.shape[1], ),
args=(x, y),
method='SLSQP',
options={"maxiter": max_iter},
constraints=[]
)
old_w = deepcopy(w.x)
def constraint_gamma_all(w, x, y, initial_loss_arr):
gamma_arr = np.ones_like(y) * gamma # set gamma for everyone
new_loss = loss_function(w, x, y)
old_loss = sum(initial_loss_arr)
return ((1.0 + gamma) * old_loss) - new_loss
def constraint_protected_people(w, x,
y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class
return np.dot(w, x.T) # if this is positive, the constraint is satisfied
def constraint_unprotected_people(w, ind, old_loss, x, y):
new_loss = loss_function(w, np.array([x]), np.array(y))
return ((1.0 + gamma) * old_loss) - new_loss
constraints = []
predicted_labels = np.sign(np.dot(w.x, x.T))
unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True)
if sep_constraint == True: # separate gemma for different people
for i in range(0, len(predicted_labels)):
if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][
i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs
c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args': (x[i], y[
i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier
constraints.append(c)
else:
c = ({'type': 'ineq', 'fun': constraint_unprotected_people,
'args': (i, unconstrained_loss_arr[i], x[i], y[i])})
constraints.append(c)
else: # same gamma for everyone
c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args': (x, y, unconstrained_loss_arr)})
constraints.append(c)
def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr):
cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T)
return float(abs(sum(cross_cov))) / float(x_in.shape[0])
w = minimize(fun=cross_cov_abs_optm_func,
x0=old_w,
args=(x, x_control[sensitive_attrs[0]]),
method='SLSQP',
options={"maxiter": 100000},
constraints=constraints
)
return w.x
def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints,
apply_accuracy_constraint, sep_constraint, sensitive_attrs,
sensitive_attrs_to_cov_thresh_arr, gamma):
"""
Computes the cross validation error for the classifier subject to various fairness constraints
This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info.
Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds).
"""
train_folds = []
test_folds = []
n_samples = len(y_all)
train_fold_size = 0.7 # the rest of 0.3 is for testing
# split the data into folds for cross-validation
for i in range(0, num_folds):
perm = range(0, n_samples) # shuffle the data before creating each fold
shuffle(perm)
x_all_perm = x_all[perm]
y_all_perm = y_all[perm]
x_control_all_perm = {}
for k in x_control_all.keys():
x_control_all_perm[k] = np.array(x_control_all[k])[perm]
x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(
x_all_perm, y_all_perm, x_control_all_perm, train_fold_size)
train_folds.append([x_all_train, y_all_train, x_control_all_train])
test_folds.append([x_all_test, y_all_test, x_control_all_test])
def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh):
x_train, y_train, x_control_train = train_data
x_test, y_test, x_control_test = test_data
w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints,
apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh,
gamma)
train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train,
x_test, y_test, None,
None)
distances_boundary_test = (np.dot(x_test, w)).tolist()
all_class_labels_assigned_test = np.sign(distances_boundary_test)
correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test,
sensitive_attrs)
cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test,
sensitive_attrs)
distances_boundary_train = (np.dot(x_train, w)).tolist()
all_class_labels_assigned_train = np.sign(distances_boundary_train)
correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train,
sensitive_attrs)
cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train,
sensitive_attrs)
output_folds.put(
[fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test,
cov_dict_train])
return
output_folds = Queue()
processes = [Process(target=train_test_single_fold,
args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x]))
for x in range(num_folds)]
# Run processes
for p in processes:
p.start()
# Get the reuslts
results = [output_folds.get() for p in processes]
for p in processes:
p.join()
test_acc_arr = []
train_acc_arr = []
correlation_dict_test_arr = []
correlation_dict_train_arr = []
cov_dict_test_arr = []
cov_dict_train_arr = []
results = sorted(results, key=lambda x: x[0]) # sort w.r.t fold num
for res in results:
fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res
test_acc_arr.append(test_score)
train_acc_arr.append(train_score)
correlation_dict_test_arr.append(correlation_dict_test)
correlation_dict_train_arr.append(correlation_dict_train)
cov_dict_test_arr.append(cov_dict_test)
cov_dict_train_arr.append(cov_dict_train)
return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr
def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name):
correlation_dict = get_avg_correlation_dict(correlation_dict_arr)
non_prot_pos = correlation_dict[s_attr_name][1][1]
prot_pos = correlation_dict[s_attr_name][0][1]
p_rule = (prot_pos / non_prot_pos) * 100.0
return p_rule
def compute_p_rule(x_control, class_labels):
""" Compute the p-rule based on Doctrine of disparate impact """
non_prot_all = sum(x_control == 1.0) # non-protected group
prot_all = sum(x_control == 0.0) # protected group
non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0
return p_rule
def add_intercept(x):
""" Add intercept to the data before linear classification """
m, n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis=1)
def check_binary(arr):
"give an array of values, see if the values are only 0 and 1"
s = sorted(set(arr))
if s[0] == 0 and s[1] == 1:
return True
else:
return False
def get_one_hot_encoding(in_arr):
"""
input: 1-D arr with int vals -- if not int vals, will raise an error
output: m (ndarray): one-hot encoded matrix
d (dict): also returns a dictionary original_val -> column in encoded matrix
"""
for k in in_arr:
if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64:
return None
in_arr = np.array(in_arr, dtype=int)
assert (len(in_arr.shape) == 1) # no column, means it was a 1-D arr
attr_vals_uniq_sorted = sorted(list(set(in_arr)))
num_uniq_vals = len(attr_vals_uniq_sorted)
if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1):
return in_arr, None
index_dict = {} # value to the column number
for i in range(0, len(attr_vals_uniq_sorted)):
val = attr_vals_uniq_sorted[i]
index_dict[val] = i
out_arr = []
for i in range(0, len(in_arr)):
tup = np.zeros(num_uniq_vals)
val = in_arr[i]
ind = index_dict[val]
tup[ind] = 1 # set that value of tuple to 1
out_arr.append(tup)
return np.array(out_arr), index_dict
def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted):
"""
returns the train/test accuracy of the model
we either pass the model (w)
else we pass y_predicted
"""
if model is not None and y_test_predicted is not None:
raise Exception("Either the model (w) or the predicted labels should be None")
if model is not None:
y_test_predicted = np.sign(np.dot(x_test, model))
y_train_predicted = np.sign(np.dot(x_train, model))
def get_accuracy(y, Y_predicted):
correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy, sum(correct_answers)
train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted)
test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted)
return train_score, test_score, correct_answers_train, correct_answers_test
def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose):
"""
The covariance is computed b/w the sensitive attr val and the distance from the boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert (x_arr.shape[0] == x_control.shape[0])
if len(x_control.shape) > 1: # make sure we just have one column in the array
assert (x_control.shape[1] == 1)
arr = []
if model is None:
arr = y_arr_dist_boundary # simply the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr, dtype=np.float64)
cov = np.dot(x_control - np.mean(x_control), arr) / float(len(x_control))
ans = thresh - abs(
cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
# ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
return ans
def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs):
"""
reutrns the covariance between sensitive features and distance from decision boundary
"""
arr = []
if model is None:
arr = y_arr_dist_boundary # simplt the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
sensitive_attrs_to_cov_original = {}
for attr in sensitive_attrs:
attr_arr = x_control[attr]
bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals
if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
thresh = 0
if bin_attr:
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False)
sensitive_attrs_to_cov_original[attr] = cov
else: # sensitive feature has more than 2 categorical values
cov_arr = []
sensitive_attrs_to_cov_original[attr] = {}
for attr_val, ind in index_dict.items():
t = attr_arr_transformed[:, ind]
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False)
sensitive_attrs_to_cov_original[attr][attr_val] = cov
cov_arr.append(abs(cov))
cov = max(cov_arr)
return sensitive_attrs_to_cov_original
def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs):
"""
returns the fraction in positive class for sensitive feature values
"""
if model is not None:
y_predicted = np.sign(np.dot(x_test, model))
y_predicted = np.array(y_predicted)
out_dict = {}
for attr in sensitive_attrs:
attr_val = []
for v in x_control_test[attr]: attr_val.append(v)
assert (len(attr_val) == len(y_predicted))
total_per_val = defaultdict(int)
attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int))
for i in range(0, len(y_predicted)):
val = attr_val[i]
label = y_predicted[i]
# val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names
total_per_val[val] += 1
attr_to_class_labels_dict[val][label] += 1
class_labels = set(y_predicted.tolist())
local_dict_1 = {}
for k1, v1 in attr_to_class_labels_dict.items():
total_this_val = total_per_val[k1]
local_dict_2 = {}
for k2 in class_labels: # the order should be the same for printing
v2 = v1[k2]
f = float(v2) * 100.0 / float(total_this_val)
local_dict_2[k2] = f
local_dict_1[k1] = local_dict_2
out_dict[attr] = local_dict_1
return out_dict
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh):
"""
get the list of constraints to be fed to the minimizer
"""
constraints = []
for attr in sensitive_attrs:
attr_arr = x_control_train[attr]
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
if index_dict is None: # binary attribute
thresh = sensitive_attrs_to_cov_thresh[attr]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov,
'args': (x_train, y_train, attr_arr_transformed, thresh, False)})
constraints.append(c)
else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately
for attr_val, ind in index_dict.items():
attr_name = attr_val
thresh = sensitive_attrs_to_cov_thresh[attr][attr_name]
t = attr_arr_transformed[:, ind]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov,
'args': (x_train, y_train, t, thresh, False)})
constraints.append(c)
return constraints
def split_into_train_test(x_all, y_all, x_control_all, train_fold_size):
split_point = int(round(float(x_all.shape[0]) * train_fold_size))
x_all_train = x_all[:split_point]
x_all_test = x_all[split_point:]
y_all_train = y_all[:split_point]
y_all_test = y_all[split_point:]
x_control_all_train = {}
x_control_all_test = {}
for k in x_control_all.keys():
x_control_all_train[k] = x_control_all[k][:split_point]
x_control_all_test[k] = x_control_all[k][split_point:]
return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test
def get_avg_correlation_dict(correlation_dict_arr):
# make the structure for the correlation dict
correlation_dict_avg = {}
# print correlation_dict_arr
for k, v in correlation_dict_arr[0].items():
correlation_dict_avg[k] = {}
for feature_val, feature_dict in v.items():
correlation_dict_avg[k][feature_val] = {}
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = []
# populate the correlation dict
for correlation_dict in correlation_dict_arr:
for k, v in correlation_dict.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label].append(frac_class)
# now take the averages
for k, v in correlation_dict_avg.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class_arr in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr)
return correlation_dict_avg
def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints,
apply_accuracy_constraint, sep_constraint, sensitive_attrs):
# very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness
it = 0.05
cov_range = np.arange(1.0, 0.0 - it, -it).tolist()
if apply_accuracy_constraint == True:
if sep_constraint == False:
it = 0.1
cov_range = np.arange(0.0, 1.0 + it, it).tolist()
if sep_constraint == True:
cov_range = [0, 1, 5, 10, 20, 50, 100, 500, 1000]
positive_class_label = 1 # positive class is +1
train_acc = []
test_acc = []
positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive
# first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(
x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint,
sensitive_attrs, [{} for i in range(0, num_folds)], 0)
for c in cov_range:
sensitive_attrs_to_cov_original_arr_multiplied = []
for sensitive_attrs_to_cov_original in cov_dict_train_arr:
sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original)
for k in sensitive_attrs_to_cov_thresh.keys():
v = sensitive_attrs_to_cov_thresh[k]
if type(v) == type({}):
for k1 in v.keys():
v[k1] = v[k1] * c
else:
sensitive_attrs_to_cov_thresh[k] = v * c
sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh)
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(
x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints,
apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied,
c)
test_acc.append(np.mean(test_acc_arr))
correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr)
correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr)
# just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one
s = sensitive_attrs[0]
for k, v in correlation_dict_test[s].items():
if v.get(positive_class_label) is None:
positive_per_category[k].append(0.0)
else:
positive_per_category[k].append(v[positive_class_label])
positive_per_category = dict(positive_per_category)
p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0
ax = plt.subplot(2, 1, 1)
plt.plot(cov_range, positive_per_category[0], "-o", color="green", label="Protected")
plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label="Non-protected")
ax.set_xlim([min(cov_range), max(cov_range)])
plt.xlabel('Multiplicative loss factor')
plt.ylabel('Perc. in positive class')
if apply_accuracy_constraint == False:
plt.gca().invert_xaxis()
plt.xlabel('Multiplicative covariance factor (c)')
ax.legend()
ax = plt.subplot(2, 1, 2)
plt.scatter(p_rule_arr, test_acc, color="red")
ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)])
plt.xlabel('P% rule')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.show()
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1, y2
| 26,695
| 44.094595
| 291
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/online_classification/fair_training.py
|
# fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of stochastic gradient descent
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Classification |
# +--------------------------------------------------+
def fair_learning(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm 2, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_after_sig = torch.sigmoid(y_hat)
y_after_sig = y_after_sig[:, None]
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Regression |
# +--------------------------------------------------+
def fair_learning_regression(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.MSELoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
| 5,881
| 45.314961
| 189
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/online_classification/fairness_metrics.py
|
import torch
#import ot
import cvxpy as cp
import numpy as np
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementations of the fairness metrics (e.g. energy distance, Sinkhorn divergence, statistical parity)
as well as performance metrics (e.g. MSE, accuracy) of a model mentioned in the paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*(n1-1))
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*(n2-1)))
def energy_distance_biased(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*n1)
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*n2))
# +------------------------------------------+
# | Metric 2: Sinkhorn Divergence |
# +------------------------------------------+
def sinkhorn_diver(y1, y2):
'''
Compute type Sinkhorn divergence between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink12 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y1.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink11 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y2.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink22 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
return (sink12 - 1/2 * sink11 - 1/2 * sink22).sum()
# +------------------------------------------+
# | Metric 3: MMD with RBF Kernel |
# +------------------------------------------+
def MMD_RBF(y1, y2):
n = y1.flatten().shape[0]
m = y2.flatten().shape[0]
def rbf(diff, diagzero=True):
sigma = 0.1
if diagzero:
return torch.exp(((diff*(1-torch.eye(diff.shape[0])))**2)/(2*sigma**2))
else:
return torch.exp((diff**2)/(2*sigma**2))
return (-2*rbf(y1.unsqueeze(0)-y2.unsqueeze(1), diagzero=False).sum()/(n*m)
+rbf(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n*(n-1))
+rbf(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(m*(m-1)))
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Reg/Clf Metrics: MSE, MAE, Accuracy |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
ys = torch.hstack((y1, y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
return torch.tensor(correct / total * 100)
def R2(y1_hat, y2_hat, y1, y2):
'''
Compute regression R2.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
R2 (torch.Tensor): mean squared error
'''
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
var_y = torch.var(ys, unbiased=False)
return 1.0 - torch.nn.MSELoss(reduction="mean")(yhats, ys) / var_y
| 10,438
| 34.266892
| 133
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/online_classification/data_loader.py
|
# data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
import copy
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import preprocessing
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides data loading functinality for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, X_test=None, Y_test=None, A_test=None, use_tensor=True, device='cpu', info='No Info Available', min_max_scaler=None):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
if X_test is not None:
self.X_test = to_tensor(X_test, device) if use_tensor else X_test
self.A_test = to_tensor(A_test, device) if use_tensor else A_test
self.Y_test = to_tensor(Y_test, device) if use_tensor else Y_test
self.info = info
self.min_max_scaler = None
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_adult_data(self):
return (self.X, self.Y, self.A, self.X_test, self.Y_test, self.A_test)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
total_samples = self.A.shape[0]
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
if self.min_max_scaler is not None:
x_vals = self.X_test.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
self.X_test = pd.DataFrame(x_scaled)
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class LawSchool(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_sas('./data/classification/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
A = rawdata['White']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict admission,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Drug(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x):
X, Y, A = load_drug_data('data/classification/drug/drug_consumption.data.txt')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Credit(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_excel('./data/classification/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
A = 2 - preproc_data['SEX']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Credit data'''
self.min_max_scaler = preprocessing.MinMaxScaler()
x_vals = X.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
X = pd.DataFrame(x_scaled)
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Adult(DataLoader):
def __init__(self, a_inside_x, **kwargs):
X_train_, Y_train_, X_test_, Y_test_ = adult_data_read('./data/classification/adult/')
A = X_train_['Sex']
A_test = X_test_['Sex']
le = LabelEncoder()
Y = le.fit_transform(Y_train_)
Y = pd.Series(Y, name='>50k')
Y_test = le.fit_transform(Y_test_)
Y_test = pd.Series(Y_test, name='>50k')
if not a_inside_x:
X = X_train_.drop(labels=['Sex'], axis=1)
X = pd.get_dummies(X)
X_test = X_test_.drop(labels=['Sex'], axis=1)
X_test = pd.get_dummies(X_test)
else:
X = pd.get_dummies(X_train_)
X_test = pd.get_dummies(X_test_)
info = """Adult dataset for classification. Train Test split is already provided"""
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], X_test, np.array(Y_test)[:, None], np.array(A_test)[:, None], info=info, **kwargs)
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, a_inside_x, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
if a_inside_x:
X = np.concatenate((np.array(X), np.array(A)), axis=1)
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(500, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
def get_log_data(self):
return self.get_data()
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self, a_inside_x):
X, Y, A = load_compas_data('data/classification/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
def adult_data_read(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
| 16,842
| 39.585542
| 159
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/online_classification/run.py
|
import bias_eval
from pickle import dump
from tqdm import tqdm
for seed in tqdm(range(20)):
for target_batchsize in [4, 8, 16, 32, 64, 128, 256, 512]:
results, path,_ = bias_eval.train_metrics_debiased(target_batchsize, seed=seed)
with open(f'results\\results_debiased_seed{seed}_bs{target_batchsize}.pickle', 'wb') as handle:
dump(results, handle)
with open(f'results\\path_debiased_seed{seed}_bs{target_batchsize}.pickle', 'wb') as handle:
dump(path, handle)
results, path,_ = bias_eval.train_metrics_fullbias(target_batchsize, seed=seed)
with open(f'results\\results_fullbias_seed{seed}_bs{target_batchsize}.pickle', 'wb') as handle:
dump(results, handle)
with open(f'results\\path_fullbias_seed{seed}_bs{target_batchsize}.pickle', 'wb') as handle:
dump(path, handle)
results, path,_ = bias_eval.train_metrics_noreg(target_batchsize, seed=seed)
with open(f'results\\results_seed{seed}_bs{target_batchsize}_noreg.pickle', 'wb') as handle:
dump(results, handle)
with open(f'results\\path_seed{seed}_bs{target_batchsize}_noreg.pickle', 'wb') as handle:
dump(path, handle)
| 1,246
| 53.217391
| 103
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/online_classification/bias_eval.py
|
import torch
import data_loader
import models
import fairness_metrics
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
def find_batchsize(N_target, A):
candidate_1 = torch.argmax((A.flatten().cumsum(0)==2).int()).item() + 1
candidate_0 = torch.argmax(((1-A).flatten().cumsum(0)==2).int()).item() + 1
if candidate_0<2 or candidate_1<0:
return -1
return max(N_target, candidate_1, candidate_0)
def accuracy_1(y_hat_1, y_hat_0, y_1, y_0):
return torch.Tensor((y_hat_1==y_1).float().mean())
def accuracy_0(y_hat_1, y_hat_0, y_1, y_0):
return torch.Tensor((y_hat_0==y_0).float().mean())
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy,
'accuracy1' : accuracy_1,
'accuracy0' : accuracy_0
}
def train_metrics_fullbias(target_batchsize, seed=0, plot=False, lambda_=1):
lr = 5e-4
drug = data_loader.Drug(True)
model = models.NeuralNetworkClassification(drug.get_k())
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-3)
criterion = torch.nn.BCEWithLogitsLoss()
fairloss = fairness_metrics.energy_distance_biased
data_loader.set_seed(seed)
drug.split_test()
X, Y, A = drug.get_data()
X_test, Y_test, A_test = drug.get_test_data()
N_epochs = 500
losses = []
test_losses = []
for epoch in range(N_epochs):
X,Y,A = shuffle(X,Y,A)
sumloss = 0
batchstart = 0
while batchstart<len(A):
optimizer.zero_grad()
batchsize = find_batchsize(target_batchsize, A[batchstart:])
if batchsize>0:
X_batch, Y_batch, A_batch = X[batchstart:batchstart+batchsize], Y[batchstart:batchstart+batchsize], A[batchstart:batchstart+batchsize]
batchstart = batchstart+batchsize
pred = model(X_batch)
L = criterion(pred, Y_batch)
y_after_sig = torch.sigmoid(pred)
y_after_sig = y_after_sig[:, None]
y_hat_1 = pred[A_batch.flatten()==1]
y_hat_0 = pred[A_batch.flatten()==0]
L_fair = fairloss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
loss.backward()
sumloss += loss.detach().item()
optimizer.step()
else:
batchstart = len(A)+1
losses.append(sumloss)
with torch.no_grad():
pred = model(X_test)
y_hat_1 = pred[A_test.flatten()==1]
y_hat_0 = pred[A_test.flatten()==0]
testloss = criterion(model(X_test), Y_test) + fairloss(y_hat_1, y_hat_0)
test_losses.append(testloss)
if plot:
plt.plot(losses)
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test.flatten()==1].flatten()
y_hat_0 = y_hat[A_test.flatten()==0].flatten()
y_1 = Y_test[A_test.flatten()==1].flatten()
y_0 = Y_test[A_test.flatten()==0].flatten()
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return test_results, losses, test_losses
def train_metrics_debiased(target_batchsize, seed=0, plot=False,lambda_ = 1):
lr = 5e-4
drug = data_loader.Drug(True)
model = models.NeuralNetworkClassification(drug.get_k())
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-3)
criterion = torch.nn.BCEWithLogitsLoss()
fairloss = fairness_metrics.energy_distance
data_loader.set_seed(seed)
drug.split_test()
X, Y, A = drug.get_data()
X_test, Y_test, A_test = drug.get_test_data()
N_epochs = 500
losses = []
test_losses = []
for epoch in range(N_epochs):
X,Y,A = shuffle(X,Y,A)
sumloss = 0
batchstart = 0
while batchstart<len(A):
optimizer.zero_grad()
batchsize = find_batchsize(target_batchsize, A[batchstart:])
if batchsize>0:
X_batch, Y_batch, A_batch = X[batchstart:batchstart+batchsize], Y[batchstart:batchstart+batchsize], A[batchstart:batchstart+batchsize]
batchstart = batchstart+batchsize
pred = model(X_batch)
#L = criterion(pred, Y_batch)
y_after_sig = torch.sigmoid(pred)
y_after_sig = y_after_sig[:, None]
y_hat_1 = pred[A_batch.flatten()==1]
y_hat_0 = pred[A_batch.flatten()==0]
L_fair = fairloss(y_hat_1, y_hat_0)
# overall loss
y_1 = Y_batch[A_batch.flatten()==1]
y_0 = Y_batch[A_batch.flatten()==0]
delta_1, delta_0 = 1, 1
N = len(A_batch)
N_1 = A_batch.sum()
N_0 = N-N_1
if N >= target_batchsize:
if N_1 == 2:
delta_1 = N/(2*(N-1))
delta_0 = N/((N-1))
else:
delta_1 = N/((N-1))
delta_0 = N/(2*(N-1))
weight_1 = (delta_1) * N_1/N
weight_0 = (delta_0) * N_0/N
accloss1 = criterion(y_hat_1, y_1)
accloss0 = criterion(y_hat_0, y_0)
L = (weight_0 * accloss0 + weight_1 * accloss1)
loss = L + lambda_ * L_fair
loss.backward()
sumloss += loss.detach().item()
optimizer.step()
else:
batchstart = len(A)+1
losses.append(sumloss)
with torch.no_grad():
pred = model(X_test)
y_hat_1 = pred[A_test.flatten()==1]
y_hat_0 = pred[A_test.flatten()==0]
testloss = criterion(model(X_test), Y_test) + fairloss(y_hat_1, y_hat_0)
test_losses.append(testloss)
if plot:
plt.plot(losses)
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test.flatten()==1].flatten()
y_hat_0 = y_hat[A_test.flatten()==0].flatten()
y_1 = Y_test[A_test.flatten()==1].flatten()
y_0 = Y_test[A_test.flatten()==0].flatten()
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return test_results, losses, test_losses
def train_metrics_noreg(target_batchsize, seed=0, plot=False):
lr = 5e-4
drug = data_loader.Drug(True)
model = models.NeuralNetworkClassification(drug.get_k())
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-3)
criterion = torch.nn.BCEWithLogitsLoss()
fairloss = fairness_metrics.energy_distance_biased
data_loader.set_seed(seed)
drug.split_test()
X, Y, A = drug.get_data()
X_test, Y_test, A_test = drug.get_test_data()
N_epochs = 500
losses = []
test_losses = []
for epoch in range(N_epochs):
X,Y,A = shuffle(X,Y,A)
sumloss = 0
batchstart = 0
while batchstart<len(A):
optimizer.zero_grad()
batchsize = min(target_batchsize, len(A[batchstart:]))
if batchsize>0:
X_batch, Y_batch, A_batch = X[batchstart:batchstart+batchsize], Y[batchstart:batchstart+batchsize], A[batchstart:batchstart+batchsize]
batchstart = batchstart+batchsize
pred = model(X_batch)
L = criterion(pred, Y_batch)
# overall loss
loss = L
loss.backward()
sumloss += loss.detach().item()
optimizer.step()
else:
batchstart = len(A)+1
losses.append(sumloss)
with torch.no_grad():
testloss = criterion(model(X_test), Y_test)
test_losses.append(testloss)
if plot:
plt.plot(losses)
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test.flatten()==1].flatten()
y_hat_0 = y_hat[A_test.flatten()==0].flatten()
y_1 = Y_test[A_test.flatten()==1].flatten()
y_0 = Y_test[A_test.flatten()==0].flatten()
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return test_results, losses, test_losses
| 9,003
| 36.991561
| 150
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/online_classification/models.py
|
# models.py
# models for regression
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides models for MFL and Oneta et al.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
class LinearRegression(nn.Module):
def __init__(self, k):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(k, 1, bias=True)
def forward(self, x):
return self.linear(x)
class NeuralNetwork(nn.Module):
def __init__(self, k):
super(NeuralNetwork, self).__init__()
self.linear1 = torch.nn.Linear(k, 20, bias=True)
self.linear2 = torch.nn.Linear(20, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of MFL"""
class NeuralNetworkClassification(nn.Module):
def __init__(self, k):
super(NeuralNetworkClassification, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of Oneta et al."""
class NeuralNetwork_MMD(nn.Module):
def __init__(self, k):
super(NeuralNetwork_MMD, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.sigmoid_ = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def first_layer(self, x):
return self.sigmoid_((self.linear1(x)))
def forward(self, x):
self.output = self.linear2(self.sigmoid_((self.linear1(x))))
return self.output
# loss_functions: MAE and MSE
def MSE(y_pred, y):
return ((y_pred - y) ** 2).mean()
def MAE(y_pred, y):
return (y_pred - y).abs().mean()
| 2,012
| 29.5
| 97
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/online_classification/load_data.py
|
import numpy as np
import pandas as pd
import sklearn.preprocessing as preprocessing
from collections import namedtuple
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # for plotting stuff
import os
import collections
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementaions of preprocessing of datasets.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def load_compas_data(COMPAS_INPUT_FILE):
FEATURES_CLASSIFICATION = ["age_cat", "race", "sex", "priors_count",
"c_charge_degree"] # features to be used for classification
CONT_VARIABLES = [
"priors_count"] # continuous features, will need to be handled separately from categorical features, categorical features will be encoded using one-hot
CLASS_FEATURE = "two_year_recid" # the decision variable
SENSITIVE_ATTRS = ["race"]
# COMPAS_INPUT_FILE = DIR_DATA + "compas/compas-scores-two-years.csv"
print('Loading COMPAS dataset...')
# load the data and get some stats
df = pd.read_csv(COMPAS_INPUT_FILE)
df = df.dropna(subset=["days_b_screening_arrest"]) # dropping missing vals
# convert to np array
data = df.to_dict('list')
for k in data.keys():
data[k] = np.array(data[k])
""" Filtering the data """
# These filters are the same as propublica (refer to https://github.com/propublica/compas-analysis)
# If the charge date of a defendants Compas scored crime was not within 30 days from when the person was arrested, we assume that because of data quality reasons, that we do not have the right offense.
idx = np.logical_and(data["days_b_screening_arrest"] <= 30, data["days_b_screening_arrest"] >= -30)
# We coded the recidivist flag -- is_recid -- to be -1 if we could not find a compas case at all.
idx = np.logical_and(idx, data["is_recid"] != -1)
# In a similar vein, ordinary traffic offenses -- those with a c_charge_degree of 'O' -- will not result in Jail time are removed (only two of them).
idx = np.logical_and(idx, data["c_charge_degree"] != "O") # F: felony, M: misconduct
# We filtered the underlying data from Broward county to include only those rows representing people who had either recidivated in two years, or had at least two years outside of a correctional facility.
idx = np.logical_and(idx, data["score_text"] != "NA")
# we will only consider blacks and whites for this analysis
idx = np.logical_and(idx, np.logical_or(data["race"] == "African-American", data["race"] == "Caucasian"))
# select the examples that satisfy this criteria
for k in data.keys():
data[k] = data[k][idx]
""" Feature normalization and one hot encoding """
# convert class label 0 to -1
y = data[CLASS_FEATURE]
# y[y == 0] = -1
print("\nNumber of people recidivating within two years")
print(pd.Series(y).value_counts())
print("\n")
X = np.array([]).reshape(len(y),
0) # empty array with num rows same as num examples, will hstack the features to it
x_control = collections.defaultdict(list)
feature_names = []
for attr in FEATURES_CLASSIFICATION:
vals = data[attr]
if attr in SENSITIVE_ATTRS:
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
x_control[attr] = vals
pass
else:
if attr in CONT_VARIABLES:
vals = [float(v) for v in vals]
vals = preprocessing.scale(vals) # 0 mean and 1 variance
vals = np.reshape(vals, (len(y), -1)) # convert from 1-d arr to a 2-d arr with one col
else: # for binary categorical variables, the label binarizer uses just one var instead of two
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
# add to sensitive features dict
# add to learnable features
X = np.hstack((X, vals))
if attr in CONT_VARIABLES: # continuous feature, just append the name
feature_names.append(attr)
else: # categorical features
if vals.shape[1] == 1: # binary features that passed through lib binarizer
feature_names.append(attr)
else:
for k in lb.classes_: # non-binary categorical features, need to add the names for each cat
feature_names.append(attr + "_" + str(k))
# convert the sensitive feature to 1-d array
x_control = dict(x_control)
for k in x_control.keys():
assert (x_control[k].shape[1] == 1) # make sure that the sensitive feature is binary after one hot encoding
x_control[k] = np.array(x_control[k]).flatten()
# sys.exit(1)
# """permute the date randomly"""
# perm = range(0, X.shape[0])
# shuffle(perm)
# X = X[perm]
# y = y[perm]
for k in x_control.keys():
x_control[k] = x_control[k][:]
# intercept = np.ones(X.shape[0]).reshape(X.shape[0], 1)
# X = np.concatenate((intercept, X), axis=1)
assert (len(feature_names) == X.shape[1])
print("Features we will be using for classification are:", feature_names, "\n")
x_control = x_control['race']
return X, y, x_control
def load_drug_data(DIR_DATA):
g = pd.read_csv(DIR_DATA, header=None, sep=',')
# g = pd.read_csv("drug_consumption.data.txt", header=None, sep=',')
g = np.array(g)
data = np.array(g[:, 1:13]) # Remove the ID and labels
labels = g[:, 13:]
yfalse_value = 'CL0'
y = np.array([1.0 if yy == yfalse_value else 0.0 for yy in labels[:, 5]])
dataset = namedtuple('_', 'data, target')(data, y)
#print('Loading Drug (black vs others) dataset...')
# dataset_train = load_drug()
sensible_feature = 4 # ethnicity
a = np.array([1.0 if el == -0.31685 else 0 for el in data[:, sensible_feature]])
X = np.delete(data, sensible_feature, axis=1).astype(float)
return X, y, a
def load_adult(DIR_DATA, smaller=False, scaler=True):
'''
:param smaller: selecting this flag it is possible to generate a smaller version of the training and test sets.
:param scaler: if True it applies a StandardScaler() (from sklearn.preprocessing) to the data.
:return: train and test data.
Features of the Adult dataset:
0. age: continuous.
1. workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
2. fnlwgt: continuous.
3. education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th,
Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
4. education-num: continuous.
5. marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
Married-spouse-absent, Married-AF-spouse.
6. occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty,
Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv,
Protective-serv, Armed-Forces.
7. relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
8. race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
9. sex: Female, Male.
10. capital-gain: continuous.
11. capital-loss: continuous.
12. hours-per-week: continuous.
13. native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc),
India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico,
Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala,
Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
(14. label: <=50K, >50K)
'''
data = pd.read_csv(
DIR_DATA,
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
len_train = len(data.values[:, -1])
data_test = pd.read_csv(
DIR_DATA + "adult/adult.test",
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
data = pd.concat([data, data_test])
# Considering the relative low portion of missing data, we discard rows with missing data
domanda = data["workclass"][4].values[1]
data = data[data["workclass"] != domanda]
data = data[data["occupation"] != domanda]
data = data[data["native-country"] != domanda]
# Here we apply discretisation on column marital_status
data.replace(['Divorced', 'Married-AF-spouse',
'Married-civ-spouse', 'Married-spouse-absent',
'Never-married', 'Separated', 'Widowed'],
['not married', 'married', 'married', 'married',
'not married', 'not married', 'not married'], inplace=True)
# categorical fields
category_col = ['workclass', 'race', 'education', 'marital-status', 'occupation',
'relationship', 'gender', 'native-country', 'income']
for col in category_col:
b, c = np.unique(data[col], return_inverse=True)
data[col] = c
datamat = data.values
target = np.array([-1.0 if val == 0 else 1.0 for val in np.array(datamat)[:, -1]])
datamat = datamat[:, :-1]
if scaler:
scaler = StandardScaler()
scaler.fit(datamat)
datamat = scaler.transform(datamat)
if smaller:
print('A smaller version of the dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train // 20, :-1], target[:len_train // 20])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
else:
print('The dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train, :-1], target[:len_train])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
return data, data_test
# def load_toy_test():
# # Load toy test
# n_samples = 100 * 2
# n_samples_low = 20 * 2
# n_dimensions = 10
# X, y, sensible_feature_id, _, _ = generate_toy_data(n_samples=n_samples,
# n_samples_low=n_samples_low,
# n_dimensions=n_dimensions)
# data = namedtuple('_', 'data, target')(X, y)
# return data, data
| 11,006
| 46.038462
| 207
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/fair_training.py
|
# fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of stochastic gradient descent
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Classification |
# +--------------------------------------------------+
def fair_learning(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm 2, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
y_hat_1_log = y_hat_log[(logdata[2]==1) & (logdata[1]==1)]
y_hat_0_log = y_hat_log[(logdata[2]==0) & (logdata[1]==1)]
# END CHANGE
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_after_sig = torch.sigmoid(y_hat)
y_after_sig = y_after_sig[:, None]
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
y_hat_1 = y_hat[(A==1) & (Y==1)]
y_hat_0 = y_hat[(A==0) & (Y==1)]
if (len(y_hat_0)<=1) or (len(y_hat_0)<=1):
nit = len(batch_fair_loss)
print(f'Iterate {nit}: Error for number of samples')
# END CHANGE
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Regression |
# +--------------------------------------------------+
def fair_learning_regression(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
raise NotImplementedError
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.MSELoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
| 6,302
| 45.688889
| 189
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/fairness_metrics.py
|
import torch
import ot
import cvxpy as cp
import numpy as np
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementations of the fairness metrics (e.g. energy distance, Sinkhorn divergence, statistical parity)
as well as performance metrics (e.g. MSE, accuracy) of a model mentioned in the paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*(n1-1))
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*(n2-1)))
# +------------------------------------------+
# | Metric 2: Sinkhorn Divergence |
# +------------------------------------------+
def sinkhorn_diver(y1, y2):
'''
Compute type Sinkhorn divergence between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink12 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y1.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink11 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y2.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink22 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
return (sink12 - 1/2 * sink11 - 1/2 * sink22).sum()
# +------------------------------------------+
# | Metric 3: MMD with RBF Kernel |
# +------------------------------------------+
def MMD_RBF(y1, y2):
n = y1.flatten().shape[0]
m = y2.flatten().shape[0]
def rbf(diff, diagzero=True):
sigma = 0.1
if diagzero:
return torch.exp(((diff*(1-torch.eye(diff.shape[0])))**2)/(2*sigma**2))
else:
return torch.exp((diff**2)/(2*sigma**2))
return (-2*rbf(y1.unsqueeze(0)-y2.unsqueeze(1), diagzero=False).sum()/(n*m)
+rbf(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n*(n-1))
+rbf(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(m*(m-1)))
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +-----------------------------------------+
# | Evaluation Metric 2: Equal Opportunity |
# +-----------------------------------------+
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
def equal_opportunity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute equal opportunity metric
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
y1_hat_sharp = (y1_hat>=0.5).float()[y1==1]
y2_hat_sharp = (y2_hat>=0.5).float()[y2==1]
return (y1_hat_sharp.mean()-y2_hat_sharp.mean()).abs()
# END CHANGE
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Reg/Clf Metrics: MSE, MAE, Accuracy |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
ys = torch.hstack((y1, y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
return torch.tensor(correct / total * 100)
def R2(y1_hat, y2_hat, y1, y2):
'''
Compute regression R2.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
R2 (torch.Tensor): mean squared error
'''
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
var_y = torch.var(ys, unbiased=False)
return 1.0 - torch.nn.MSELoss(reduction="mean")(yhats, ys) / var_y
| 10,671
| 34.221122
| 133
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/benchmark.py
|
# benchmark.py
# file with functions for running experiment
import fair_training
import numpy as np
import torch
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
import time
def convergence_plotter(regloss, fairloss, lambda_):
plt.figure(figsize=(16,5))
plt.subplot(131)
plt.plot(regloss)
plt.title('Regression Loss')
plt.xlabel('Iteration')
plt.ylabel('Regression Loss')
plt.subplot(132)
plt.plot(fairloss)
plt.title('Fairness Loss')
plt.xlabel('Iteration')
plt.ylabel('Fairness Loss')
plt.subplot(133)
plt.plot(lambda_*np.array(fairloss)+np.array(regloss))
plt.title('Overall Loss')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.show()
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementaion train and test function for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def train_test_fair_learning(ds, model, fair_loss, lr, batch_size, N_epochs, lambda_, metrics, lr_decay = 1, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
regloss, fairloss = fair_training.fair_learning(generator=ds.stratified_batch_generator_worep(batch_size, N_epochs),
predict=model.forward,
fair_loss=fair_loss,
params=model.parameters(),
lambda_=lambda_,
lr_decay=lr_decay,
logdata = ds.get_log_data() if plot_convergence else None,
psi=psi, lr=lr, logfairloss=logfairloss, **kwargs)
stop_time = time.time()
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training set
if train_test_split_fin:
X, Y, A, X_test, Y_test, A_test = ds.get_adult_data()
else:
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test= ds.get_test_data()
y_hat = torch.round(torch.sigmoid(model(X)))
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
return train_results, test_results
def train_test_fair_learning_regression(ds, model, fair_loss, lr, batch_size, N_epochs, lambda_, metrics, lr_decay = 1, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
raise NotImplementedError
# train the model
start_time = time.time()
regloss, fairloss = fair_training.fair_learning_regression(generator=ds.stratified_batch_generator_worep(batch_size, N_epochs),
predict=model.forward,
fair_loss=fair_loss,
params=model.parameters(),
lambda_=lambda_,
lr_decay=lr_decay,
logdata = ds.get_log_data() if plot_convergence else None,
psi=psi, lr=lr, logfairloss=logfairloss, **kwargs)
stop_time = time.time()
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training set
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test = ds.get_test_data()
y_hat = model(X)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = model(X_test)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
return train_results, test_results
| 7,159
| 40.149425
| 206
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/setup.py
|
from setuptools import setup
setup(
name="dccp",
version="1.0.3",
author="Xinyue Shen, Steven Diamond, Stephen Boyd",
author_email="xinyues@stanford.edu, diamond@cs.stanford.edu, boyd@stanford.edu",
packages=["dccp"],
license="GPLv3",
zip_safe=False,
install_requires=["cvxpy >= 0.3.5"],
use_2to3=True,
url="http://github.com/cvxgrp/dccp/",
description="A CVXPY extension for difference of convex programs.",
)
| 456
| 27.5625
| 84
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/data_loader.py
|
# data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
import copy
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import preprocessing
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides data loading functinality for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, X_test=None, Y_test=None, A_test=None, use_tensor=True, device='cpu', info='No Info Available', min_max_scaler=None):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
if X_test is not None:
self.X_test = to_tensor(X_test, device) if use_tensor else X_test
self.A_test = to_tensor(A_test, device) if use_tensor else A_test
self.Y_test = to_tensor(Y_test, device) if use_tensor else Y_test
self.info = info
self.min_max_scaler = None
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_adult_data(self):
return (self.X, self.Y, self.A, self.X_test, self.Y_test, self.A_test)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
total_samples = self.A.shape[0]
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
if self.min_max_scaler is not None:
x_vals = self.X_test.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
self.X_test = pd.DataFrame(x_scaled)
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class LawSchool(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_sas('./data/classification/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
A = rawdata['White']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict admission,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Drug(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x):
X, Y, A = load_drug_data('data/classification/drug/drug_consumption.data.txt')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Credit(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_excel('./data/classification/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
A = 2 - preproc_data['SEX']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Credit data'''
self.min_max_scaler = preprocessing.MinMaxScaler()
x_vals = X.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
X = pd.DataFrame(x_scaled)
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Adult(DataLoader):
def __init__(self, a_inside_x, **kwargs):
X_train_, Y_train_, X_test_, Y_test_ = adult_data_read('./data/classification/adult/')
A = X_train_['Sex']
A_test = X_test_['Sex']
le = LabelEncoder()
Y = le.fit_transform(Y_train_)
Y = pd.Series(Y, name='>50k')
Y_test = le.fit_transform(Y_test_)
Y_test = pd.Series(Y_test, name='>50k')
if not a_inside_x:
X = X_train_.drop(labels=['Sex'], axis=1)
X = pd.get_dummies(X)
X_test = X_test_.drop(labels=['Sex'], axis=1)
X_test = pd.get_dummies(X_test)
else:
X = pd.get_dummies(X_train_)
X_test = pd.get_dummies(X_test_)
info = """Adult dataset for classification. Train Test split is already provided"""
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], X_test, np.array(Y_test)[:, None], np.array(A_test)[:, None], info=info, **kwargs)
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, a_inside_x, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
if a_inside_x:
X = np.concatenate((np.array(X), np.array(A)), axis=1)
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(500, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
def get_log_data(self):
return self.get_data()
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self, a_inside_x):
X, Y, A = load_compas_data('data/classification/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
def adult_data_read(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
| 16,841
| 39.681159
| 159
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/run_benchmark_MMD_simple.py
|
import models
import fairness_metrics
import benchmark
import data_loader
import pickle
import argparse
import pandas as pd
import numpy as np
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementatino of MFL
An example usage
python run_benchmark.py --dataset {} --seed {} --a_inside_x True --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
Model = models.LinearRegression if args.model=='linearregression' else models.NeuralNetwork
fair_loss = fairness_metrics.MMD_RBF
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
train_test_split_fin = 0
lr = args.lr
batch_size = args.batch_size
n_epochs = args.n_epochs
lr = 5e-4
n_epochs = 500
lr_decay = 0.99
batch_size = 2048
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=args.a_inside_x)
batch_size = 128
lr_decay = 1.0
n_epochs = 300
lr = 2e-4
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=args.a_inside_x)
lr_decay = 1.0
batch_size = 128
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=args.a_inside_x)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=args.a_inside_x)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=args.a_inside_x)
train_test_split_fin = 1
n_epochs = 300
lr = 1e-2
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=args.a_inside_x)
batch_size = 128
lr_decay = 1.0
n_epochs = 200
lr = 1e-3
# if args.dataset == 'Synthetic1':
# ds = data_loader.Synthetic1(1000, 4)
# if args.dataset == 'Synthetic2':
# ds = data_loader.Synthetic2(1000, 4)
# if args.dataset == 'CommunitiesCrime':
# ds = data_loader.CommunitiesCrime()
# if args.dataset == 'CommunitiesCrimeClassification':
# ds = data_loader.CommunitiesCrimeClassification(a_inside_x=args.a_inside_x)
# if args.dataset == 'BarPass':
# ds = data_loader.BarPass()
# if args.dataset == 'StudentsMath':
# ds = data_loader.StudentPerformance(subject='Math')
# if args.dataset == 'StudentsPortugese':
# ds = data_loader.StudentPerformance(subject='Portugese')
# if args.dataset == 'Adult':
# ds = data_loader.Adult(a_inside_x=args.a_inside_x)
# train_test_split_fin = 1
# if args.dataset == 'Compas':
# ds = data_loader.Compas(a_inside_x=args.a_inside_x)
# if args.dataset == 'LawSchool':
# ds = data_loader.LawSchool(a_inside_x=args.a_inside_x)
# if args.dataset == 'Drug':
# ds = data_loader.Drug(a_inside_x=args.a_inside_x)
# if args.dataset == 'Credit':
# ds = data_loader.Credit(a_inside_x=args.a_inside_x)
logfairloss = fair_loss
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k() # Dimension
# metrics to evaluate
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
# run the test for various lambdas
for lambda_ in lambda_candidates:
print('Training MMD, for lambda_: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
model = Model(k)
# if args.algorithm==1:
# train_metrics, test_metrics = benchmark.test_algorithm_1(ds,
# model,
# reg_loss,
# fair_loss,
# args.lr,
# args.n_iterates,
# lambda_,
# metrics,
# psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay)
# elif args.algorithm==2:
# train_metrics, test_metrics = benchmark.test_algorithm_2(ds,
# model,
# reg_loss,
# fair_loss,
# args.lr,
# args.batch_size,
# args.n_epochs,
# lambda_,
# metrics,
# psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay)
train_metrics, test_metrics = benchmark.train_test_fair_learning(ds=ds,
model=model,
fair_loss=fair_loss,
lr=lr,
batch_size=batch_size,
N_epochs=n_epochs,
lambda_=lambda_,
metrics=metrics,
lr_decay=lr_decay,
psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay, train_test_split_fin=train_test_split_fin)
train_metrics['lambda_'] = lambda_
test_metrics['lambda_'] = lambda_
results_train.append(train_metrics)
results_test.append(test_metrics)
# save the results
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
if args.a_inside_x:
df_train.to_csv('results/NN_MMD/{}_{}_AinX_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_MMD/{}_{}_AinX_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
else:
print('here')
df_train.to_csv('results/NN_MMD/{}_{}_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_MMD/{}_{}_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'model_details':model.state_dict,
'L':'BCE_cross_entropy',
'fair_loss':'Energy',
'lr_decay':lr_decay,
'a_inside_x':args.a_inside_x
}
with open('results/NN_MMD/{}_{}_{}.pkl'.format(args.dataset, args.model, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
#
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--model', default='NN', choices=['linearregression', 'NN'], help='Regression Model')
# parser.add_argument('--regloss', default='L2', choices=['L1', 'L2'], help='Regression Loss')
# parser.add_argument('--fairloss', required=True, choices=['Energy', 'Wasserstein'], help='Fairness loss')
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--lr', default=1e-4, type=float, help='Learning Rate of (S)GD: Currently has no effect since Adam is used')
parser.add_argument('--batch_size', default=128, type=int, help='Batch Size for algorithm 2')
parser.add_argument('--n_epochs', default=500, type=int, help='Number of Epochs of (S)GD')
parser.add_argument('--plot_convergence', default=False, action='store_true', help='If Convergence plot should be done')
parser.add_argument('--dataset', help='Dataset to use', choices=['Synthetic1', 'Synthetic2', 'CommunitiesCrime', 'CommunitiesCrimeClassification',
'BarPass', 'StudentsMath', 'StudentsPortugese', 'Compas', 'LawSchool', 'Adult',
'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--weight_decay', help='SGD weight decay', type=float, default=0.0)
parser.add_argument('--a_inside_x', default=False, type=str2bool, help='The sensitive feature is in X')
args = parser.parse_args()
run(args)
| 10,851
| 49.240741
| 214
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/MMD_fair_run.py
|
import models
import fairness_metrics
import data_loader
import MMD_fair
import argparse
import pandas as pd
import numpy as np
import time
import pickle
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides an implementation of the paper in
https://papers.nips.cc/paper/2020/file/af9c0e0c1dee63e5acad8b7ed1a5be96-Paper.pdf
An example usage:
python .\MMD_fair_run.py --dataset {} --nlambda {} --seed {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
Model = models.NeuralNetwork_MMD
reg_loss = models.MSE
fair_loss = fairness_metrics.sinkhorn_diver
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
train_test_split_fin = 0
n_iterates = args.n_iterates
lr = 1e-1
lr_decay = 0.99
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=args.a_inside_x)
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=args.a_inside_x)
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=args.a_inside_x)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=args.a_inside_x)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=args.a_inside_x)
train_test_split_fin = 1
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=args.a_inside_x)
logfairloss = fair_loss
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k() # Dimension
# metrics to evaluate
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'equal_opportunity' : fairness_metrics.equal_opportunity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
# run the test for various lambdas
# extract the data
X, Y, A = ds.get_data()
for lambda_ in lambda_candidates:
print('Training MMD-Sinkhorn method, for lambda_: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
model = Model(k)
train_metrics, test_metrics = MMD_fair.mmd_fair_traintest(ds,
model,
reg_loss,
fair_loss,
lr,
n_iterates,
lambda_,
metrics,
psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, lr_decay=lr_decay)
train_metrics['lambda_'] = lambda_
test_metrics['lambda_'] = lambda_
results_train.append(train_metrics)
results_test.append(test_metrics)
# save the results
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
if args.a_inside_x:
df_train.to_csv('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn_AinX_train_{}.csv'.format(args.dataset, args.model, args.seed))
df_test.to_csv('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn_AinX_test_{}.csv'.format(args.dataset, args.model, args.seed))
else:
print('here')
df_train.to_csv('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
PARAMS = {'dataset':args.dataset,
'lr':lr, 'iterates':n_iterates,
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'Gradient-Descent',
'model_details':model.state_dict,
'L':'MSE',
'fair_loss':'Sinkhorn',
'a_inside_x':args.a_inside_x
}
with open('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
#
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--model', default='NN', choices=['linearregression', 'NN'], help='Regression Model')
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--lr', default=1e-3, type=float, help='Gradient descent')
parser.add_argument('--n_iterates', default=500, type=int, help='Number of Iterates of GD')
parser.add_argument('--plot_convergence', default=False, action='store_true', help='If Convergence plot should be done')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification','Compas', 'LawSchool', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--a_inside_x', default=False, type=str2bool, help='The sensitive feature is in X')
args = parser.parse_args()
run(args)
| 6,760
| 44.993197
| 158
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/zafar_classification.py
|
# Baseline 1: https://arxiv.org/pdf/1706.02409.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
from zafar_method import funcs_disp_mist
from zafar_method.utils import *
import fairness_metrics
import data_loader
from zafar_method import utils
import numpy as np
from tqdm import tqdm
import cvxpy as cp
from collections import namedtuple
from sklearn.metrics import log_loss
from zafar_method import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import pickle
from copy import deepcopy
import os, sys
# from generate_synthetic_data import *
from zafar_method import utils as ut
from zafar_method import funcs_disp_mist as fdm
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementation of http://proceedings.mlr.press/v54/zafar17a/zafar17a.pdf.
gamma parameter is the accuracy fairness tradeoff of the model.
An example usage is python zafar_classification.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
gamma_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=0)
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=0)
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=0)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=0)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=0)
train_test_split_fin = 1
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=0)
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'equal_opportunity' : fairness_metrics.equal_opportunity_classification,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
X, Y, A = ds.get_data()
X_test, Y_test, A_test = ds.get_test_data()
x_train = X.cpu().detach().numpy()
Y_train = Y.cpu().detach().numpy().flatten()
a_train = A.cpu().detach().numpy().flatten()
x_test = X_test.cpu().detach().numpy()
y_test = Y_test.cpu().detach().numpy().flatten()
a_test = A_test.cpu().detach().numpy().flatten()
loss_function = "logreg" # perform the experiments with logistic regression
Y_test_ = y_test.copy()
Y_train_ = Y_train.copy()
Y_test_[y_test == 0] = -1
Y_train_[Y_train_ == 0] = -1
# run the test for various lambdas
y_train = Y_train_
y_test = Y_test_
x_control_train = {"s1": a_train}
x_control_test = {"s1": a_test}
cons_params = None # constraint parameters, will use them later
EPS = 1e-6
for gamma in gamma_candidates:
print('Training Zafar method, for gamma: {}/{}, seed:{}'.format(gamma, args.nlambda, args.seed))
start_time = time.time()
# mult_range = np.arange(1.0, 0.0 - it, -it).tolist()
# sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
apply_fairness_constraints = 0 # set this flag to one since we want to optimize accuracy subject to fairness constraints
apply_accuracy_constraint = 1
sep_constraint = 0
# for m in mult_range:
# sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
# for s_attr in sensitive_attrs_to_cov_thresh.keys():
# for cov_type in sensitive_attrs_to_cov_thresh[s_attr].keys():
# for s_val in sensitive_attrs_to_cov_thresh[s_attr][cov_type]:
# sensitive_attrs_to_cov_thresh[s_attr][cov_type][s_val] *= m
sensitive_attrs_to_cov_thresh = {"s1":0}
w = train_model(x_train, y_train, x_control_train, lf._logistic_loss, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], sensitive_attrs_to_cov_thresh, gamma)
# y_test_predicted = np.sign(np.dot(x_test, w))
# correct_answers = (y_test_predicted == y_test).astype(int) # will have 1 when the prediction and the actual label match
# accuracy = float(sum(correct_answers)) / float(len(correct_answers))
# y_test_predict[y_test_predict == -1] = 0
# w = torch.tensor(w).float()
# theta0 = torch.tensor(w).float()
stop_time = time.time()
predict = lambda X: torch.tensor(np.maximum(np.sign(np.dot(X.cpu().detach().numpy(), w)), 0)).float()
# metrics on train set
y_hat = predict(X).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(X_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = gamma
train_results['time'] = stop_time - start_time
test_results['lambda_'] = gamma
results_train.append(train_results)
results_test.append(test_results)
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/zafar/{}_zafar_{}_train.csv'.format(args.dataset, args.seed))
df_test.to_csv('results/zafar/{}_zafar_{}_test.csv'.format(args.dataset, args.seed))
PARAMS = {'dataset':args.dataset,
'method':'zafar',
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'a_inside_x': False
}
with open('results/zafar/{}_zafar_{}.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=1, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification', 'Compas', 'LawSchool', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=25)
args = parser.parse_args()
run(args)
| 8,147
| 42.340426
| 195
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/run.py
|
import os
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides results for Table~7.
Example usage python run.py
The results are saved under ./results folder.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
if __name__ == '__main__':
# experiments for energy distance
nlambda = 25
for dataset in ['CommunitiesCrimeClassification', 'Compas', 'Adult', 'Drug']:
print(dataset)
for seed in range(10):
print('Running for seed {}'.format(seed))
os.system('python run_benchmark1.py --dataset {} --seed {} --a_inside_x True --nlambda {} --model linear'.format(dataset, seed, nlambda))
os.system('python zafar_classification.py --dataset {} --seed {} --nlambda {}'.format(dataset, seed, nlambda))
if not dataset == 'Adult':
os.system('python MMD_fair_run.py --dataset {} --nlambda {} --a_inside_x True --seed {}'.format(dataset, nlambda, seed))
os.system('python run_benchmark.py --dataset {} --seed {} --a_inside_x True --nlambda {}'.format(dataset, seed, nlambda))
os.system('python fair_KDE.py --dataset {} --seed {} --nlambda {}'.format(dataset, seed, nlambda))
| 1,317
| 56.304348
| 149
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/MMD_fair.py
|
# fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
import time
from tqdm import tqdm
# +---------------------------------+
# | Algorithm 1: Gradient Descent |
# +---------------------------------+
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides an implementation of the paper in
https://papers.nips.cc/paper/2020/file/af9c0e0c1dee63e5acad8b7ed1a5be96-Paper.pdf
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def mmd_gradient_descent(X, Y, A, model, predict, reg_loss, fair_loss, params, lr, N_iterates, lambda_, verbose=False, log=False, logfairloss=None, lr_decay=1, **kwargs):
'''
Train model using Algorithm 1, which uses simple gradient descent.
Args:
X (torch.Tensor): X data
Y (torch.Tensor): Y data
A (torch.Tensor): A data
predict (fct handle): Prediction function handle, maps X-->Y_hat
reg_loss (fct handle): Regression Loss function handle, maps Y_hat, Y-->L_reg
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lr (float): SGD Learning Rate
N_iterates (int): Number of iterates for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
psi (fct handle, optional):Transformation function maps from Y_hat, Y --> score, fair_loss is computed on score
verbose (bool, optional): Verbosity
log (bool, optional): Return training path
logfairloss (optional): Sinkhorn divergence
Returns:
Trainig Loss over Training if log=True, but changes params
'''
optimizer = optim.SGD(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
# optimizer = optim.Adam(params)
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
epoch_reg_loss = []
epoch_fair_loss = []
for iterate in tqdm(range(N_iterates)):
# zero grad accumulator
optimizer.zero_grad()
# predict
y_hat = predict(X)
y_hat_first_layer = model.first_layer(X)
L_reg = criterion(y_hat, Y)
# y_hat = torch.sigmoid(y_hat)
# compute regression and fairness loss
y_hat_1 = y_hat_first_layer[(A.squeeze()==1) & (Y.squeeze()==1)]
y_hat_0 = y_hat_first_layer[(A.squeeze()==0) & (Y.squeeze()==1)]
L_fair = fair_loss(y_hat_1, y_hat_0)
# all_linear1_params = torch.cat([x.view(-1) for x in model.linear1.parameters()])
# all_linear2_params = torch.cat([x.view(-1) for x in model.linear2.parameters()])
# W_froben = torch.norm(all_linear1_params, 2) ** 2
# V_froben = torch.norm(all_linear2_params, 2) ** 2
# overall loss
# reg_weight = 0.1
loss = L_reg + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L_reg.data.item(), L_fair.data.item()))
if log:
epoch_fair_loss.append(L_fair.data.item())
epoch_reg_loss.append(L_reg.data.item())
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
#scheduler.step()
return epoch_reg_loss, epoch_fair_loss
def mmd_fair_traintest(ds, model, reg_loss, fair_loss, lr, n_iterates, lambda_, metrics, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, lr_decay=1, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
if train_test_split_fin:
X, Y, A, X_test, Y_test, A_test = ds.get_adult_data()
else:
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test= ds.get_test_data()
regloss, fairloss = mmd_gradient_descent(X, Y, A, model, model.forward,
reg_loss,
fair_loss,
model.parameters(), lr, n_iterates,
lambda_,
logdata = ds.get_log_data() if plot_convergence else None, logfairloss=logfairloss, lr_decay=lr_decay, **kwargs)
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training seed
stop_time = time.time()
y_hat = torch.round(torch.sigmoid(model.forward(X)))
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
# metrics on test set
y_hat = torch.round(torch.sigmoid(model.forward(X_test)))
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return train_results, test_results
| 6,671
| 44.387755
| 187
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/models.py
|
# models.py
# models for regression
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides models for MFL and Oneta et al.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
class LinearRegression(nn.Module):
def __init__(self, k):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(k, 1, bias=True)
def forward(self, x):
return self.linear(x)
class NeuralNetwork(nn.Module):
def __init__(self, k):
super(NeuralNetwork, self).__init__()
self.linear1 = torch.nn.Linear(k, 20, bias=True)
self.linear2 = torch.nn.Linear(20, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of MFL"""
class NeuralNetworkClassification(nn.Module):
def __init__(self, k):
super(NeuralNetworkClassification, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of Oneta et al."""
class NeuralNetwork_MMD(nn.Module):
def __init__(self, k):
super(NeuralNetwork_MMD, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.sigmoid_ = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def first_layer(self, x):
return self.sigmoid_((self.linear1(x)))
def forward(self, x):
self.output = self.linear2(self.sigmoid_((self.linear1(x))))
return self.output
# loss_functions: MAE and MSE
def MSE(y_pred, y):
return ((y_pred - y) ** 2).mean()
def MAE(y_pred, y):
return (y_pred - y).abs().mean()
| 2,012
| 29.5
| 97
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/load_data.py
|
import numpy as np
import pandas as pd
import sklearn.preprocessing as preprocessing
from collections import namedtuple
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # for plotting stuff
import os
import collections
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementaions of preprocessing of datasets.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def load_compas_data(COMPAS_INPUT_FILE):
FEATURES_CLASSIFICATION = ["age_cat", "race", "sex", "priors_count",
"c_charge_degree"] # features to be used for classification
CONT_VARIABLES = [
"priors_count"] # continuous features, will need to be handled separately from categorical features, categorical features will be encoded using one-hot
CLASS_FEATURE = "two_year_recid" # the decision variable
SENSITIVE_ATTRS = ["race"]
# COMPAS_INPUT_FILE = DIR_DATA + "compas/compas-scores-two-years.csv"
print('Loading COMPAS dataset...')
# load the data and get some stats
df = pd.read_csv(COMPAS_INPUT_FILE)
df = df.dropna(subset=["days_b_screening_arrest"]) # dropping missing vals
# convert to np array
data = df.to_dict('list')
for k in data.keys():
data[k] = np.array(data[k])
""" Filtering the data """
# These filters are the same as propublica (refer to https://github.com/propublica/compas-analysis)
# If the charge date of a defendants Compas scored crime was not within 30 days from when the person was arrested, we assume that because of data quality reasons, that we do not have the right offense.
idx = np.logical_and(data["days_b_screening_arrest"] <= 30, data["days_b_screening_arrest"] >= -30)
# We coded the recidivist flag -- is_recid -- to be -1 if we could not find a compas case at all.
idx = np.logical_and(idx, data["is_recid"] != -1)
# In a similar vein, ordinary traffic offenses -- those with a c_charge_degree of 'O' -- will not result in Jail time are removed (only two of them).
idx = np.logical_and(idx, data["c_charge_degree"] != "O") # F: felony, M: misconduct
# We filtered the underlying data from Broward county to include only those rows representing people who had either recidivated in two years, or had at least two years outside of a correctional facility.
idx = np.logical_and(idx, data["score_text"] != "NA")
# we will only consider blacks and whites for this analysis
idx = np.logical_and(idx, np.logical_or(data["race"] == "African-American", data["race"] == "Caucasian"))
# select the examples that satisfy this criteria
for k in data.keys():
data[k] = data[k][idx]
""" Feature normalization and one hot encoding """
# convert class label 0 to -1
y = data[CLASS_FEATURE]
# y[y == 0] = -1
print("\nNumber of people recidivating within two years")
print(pd.Series(y).value_counts())
print("\n")
X = np.array([]).reshape(len(y),
0) # empty array with num rows same as num examples, will hstack the features to it
x_control = collections.defaultdict(list)
feature_names = []
for attr in FEATURES_CLASSIFICATION:
vals = data[attr]
if attr in SENSITIVE_ATTRS:
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
x_control[attr] = vals
pass
else:
if attr in CONT_VARIABLES:
vals = [float(v) for v in vals]
vals = preprocessing.scale(vals) # 0 mean and 1 variance
vals = np.reshape(vals, (len(y), -1)) # convert from 1-d arr to a 2-d arr with one col
else: # for binary categorical variables, the label binarizer uses just one var instead of two
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
# add to sensitive features dict
# add to learnable features
X = np.hstack((X, vals))
if attr in CONT_VARIABLES: # continuous feature, just append the name
feature_names.append(attr)
else: # categorical features
if vals.shape[1] == 1: # binary features that passed through lib binarizer
feature_names.append(attr)
else:
for k in lb.classes_: # non-binary categorical features, need to add the names for each cat
feature_names.append(attr + "_" + str(k))
# convert the sensitive feature to 1-d array
x_control = dict(x_control)
for k in x_control.keys():
assert (x_control[k].shape[1] == 1) # make sure that the sensitive feature is binary after one hot encoding
x_control[k] = np.array(x_control[k]).flatten()
# sys.exit(1)
# """permute the date randomly"""
# perm = range(0, X.shape[0])
# shuffle(perm)
# X = X[perm]
# y = y[perm]
for k in x_control.keys():
x_control[k] = x_control[k][:]
# intercept = np.ones(X.shape[0]).reshape(X.shape[0], 1)
# X = np.concatenate((intercept, X), axis=1)
assert (len(feature_names) == X.shape[1])
print("Features we will be using for classification are:", feature_names, "\n")
x_control = x_control['race']
return X, y, x_control
def load_drug_data(DIR_DATA):
g = pd.read_csv(DIR_DATA, header=None, sep=',')
# g = pd.read_csv("drug_consumption.data.txt", header=None, sep=',')
g = np.array(g)
data = np.array(g[:, 1:13]) # Remove the ID and labels
labels = g[:, 13:]
yfalse_value = 'CL0'
y = np.array([1.0 if yy == yfalse_value else 0.0 for yy in labels[:, 5]])
dataset = namedtuple('_', 'data, target')(data, y)
print('Loading Drug (black vs others) dataset...')
# dataset_train = load_drug()
sensible_feature = 4 # ethnicity
a = np.array([1.0 if el == -0.31685 else 0 for el in data[:, sensible_feature]])
X = np.delete(data, sensible_feature, axis=1).astype(float)
return X, y, a
def load_adult(DIR_DATA, smaller=False, scaler=True):
'''
:param smaller: selecting this flag it is possible to generate a smaller version of the training and test sets.
:param scaler: if True it applies a StandardScaler() (from sklearn.preprocessing) to the data.
:return: train and test data.
Features of the Adult dataset:
0. age: continuous.
1. workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
2. fnlwgt: continuous.
3. education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th,
Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
4. education-num: continuous.
5. marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
Married-spouse-absent, Married-AF-spouse.
6. occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty,
Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv,
Protective-serv, Armed-Forces.
7. relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
8. race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
9. sex: Female, Male.
10. capital-gain: continuous.
11. capital-loss: continuous.
12. hours-per-week: continuous.
13. native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc),
India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico,
Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala,
Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
(14. label: <=50K, >50K)
'''
data = pd.read_csv(
DIR_DATA,
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
len_train = len(data.values[:, -1])
data_test = pd.read_csv(
DIR_DATA + "adult/adult.test",
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
data = pd.concat([data, data_test])
# Considering the relative low portion of missing data, we discard rows with missing data
domanda = data["workclass"][4].values[1]
data = data[data["workclass"] != domanda]
data = data[data["occupation"] != domanda]
data = data[data["native-country"] != domanda]
# Here we apply discretisation on column marital_status
data.replace(['Divorced', 'Married-AF-spouse',
'Married-civ-spouse', 'Married-spouse-absent',
'Never-married', 'Separated', 'Widowed'],
['not married', 'married', 'married', 'married',
'not married', 'not married', 'not married'], inplace=True)
# categorical fields
category_col = ['workclass', 'race', 'education', 'marital-status', 'occupation',
'relationship', 'gender', 'native-country', 'income']
for col in category_col:
b, c = np.unique(data[col], return_inverse=True)
data[col] = c
datamat = data.values
target = np.array([-1.0 if val == 0 else 1.0 for val in np.array(datamat)[:, -1]])
datamat = datamat[:, :-1]
if scaler:
scaler = StandardScaler()
scaler.fit(datamat)
datamat = scaler.transform(datamat)
if smaller:
print('A smaller version of the dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train // 20, :-1], target[:len_train // 20])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
else:
print('The dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train, :-1], target[:len_train])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
return data, data_test
# def load_toy_test():
# # Load toy test
# n_samples = 100 * 2
# n_samples_low = 20 * 2
# n_dimensions = 10
# X, y, sensible_feature_id, _, _ = generate_toy_data(n_samples=n_samples,
# n_samples_low=n_samples_low,
# n_dimensions=n_dimensions)
# data = namedtuple('_', 'data, target')(X, y)
# return data, data
| 11,005
| 46.034188
| 207
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/fair_KDE.py
|
# Baseline Fair KDE : https://proceedings.neurips.cc//paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import fairness_metrics
import data_loader
from tqdm import tqdm
from collections import namedtuple
from sklearn.metrics import log_loss
from copy import deepcopy
import os, sys
import time
import pickle
import random
import matplotlib.pyplot as plt
import torch.optim as optim
from Fair_KDE.models import Classifier
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from Fair_KDE.dataloader import CustomDataset
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of https://proceedings.neurips.cc/paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
An example usage python fair_KDE.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
# act on experiment parameters:
def run(args):
# act on experiment parameters:
seed = args.seed
data_loader.set_seed(args.seed)
##### Other training hyperparameters #####
lr = 2e-4
n_epochs = 200
lr_decay = 1.0
batch_size = 2048
n_epochs = 500
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=0)
batch_size = 128
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=0)
lr = 5e-4
batch_size = 2048
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=0)
lr = 2e-4
batch_size = 2048
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=0)
lr = 5e-4
n_batch = 2048
if args.dataset == 'Adult':
ds = data_loader.Adult(0)
train_test_split_fin = 1
batch_size = 2048
lr = 1e-1
lr_decay - 0.98
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=0)
batch_size = 128
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k()
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'equal_opportunity' : fairness_metrics.equal_opportunity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# END CHANGE
# storage of results
results_train = []
results_test = []
##### Which fairness notion to consider (Demographic Parity / Equalized Odds) #####
fairness = 'DP' # ['DP', 'EO']
##### Model specifications #####
n_layers = 2 # [positive integers]
n_hidden_units = 16 # [positive integers]
##### Our algorithm hyperparameters #####
h = 0.1 # Bandwidth hyperparameter in KDE [positive real numbers]
delta = 1.0 # Delta parameter in Huber loss [positive real numbers]
lambda_ = 0.05 # regularization factor of DDP/DEO; Positive real numbers \in [0.0, 1.0]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
##### Whether to enable GPU training or not
device = torch.device('cpu') # or torch.device('cpu')
# Import dataset
# dataset = FairnessDataset(dataset=dataset_name, device=device)
# dataset.normalize()
input_dim = k + 1
net = Classifier(n_layers=n_layers, n_inputs=input_dim, n_hidden_units=n_hidden_units)
net = net.to(device)
# Set an optimizer
optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay) # None
# X, Y, A = ds.get_data()
# X_test, Y_test, A_test = ds.get_test_data()
# x_train = X.cpu().detach().numpy()
# Y_train = Y.cpu().detach().numpy().flatten()
# a_train = A.cpu().detach().numpy().flatten()
# x_test = X_test.cpu().detach().numpy()
# y_test = Y_test.cpu().detach().numpy().flatten()
# a_test = A_test.cpu().detach().numpy().flatten()
# train_tensors, test_tensors = dataset.get_dataset_in_tensor()
# X_train, Y_train, Z_train, XZ_train = train_tensors
# X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
if args.dataset == 'Adult':
X_train, Y_train, Z_train, X_test, Y_test, Z_test = ds.get_adult_data()
else:
X_train, Y_train, Z_train = ds.get_data()
X_test, Y_test, Z_test = ds.get_test_data()
XZ_test = torch.cat([X_test, Z_test], 1)
XZ_train = torch.cat([X_train, Z_train], 1)
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
generator = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# # An empty dataframe for logging experimental results
# df = pd.DataFrame()
# df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
time_track = []
for lambda_ in lambda_candidates:
print('Training FKDE method, for lambda: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
start_time = time.time()
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(generator):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.squeeze())
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch.squeeze())
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(), h, tau)
for z in range(1):
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[(z_batch==z) & (y_batch==1)],h,tau)
m_z = z_batch[(z_batch==z) & (y_batch==1)].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[(z_batch==z) & (y_batch==1)])/h).view(-1),
Yhat[(z_batch==z) & (y_batch==1)].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
# END CHANGE
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
# if (i + 1) % 10 == 0 or (i + 1) == len(generator):
# print('Lambda:{}, Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(lambda_, epoch+1, n_epochs,
# i+1, len(generator),
# cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
stop_time = time.time()
def predict(XZ):
Y_hat_ = net(XZ)
Y_hat_[Y_hat_>=0.5] = 1
Y_hat_[Y_hat_ < 0.5] = 0
return Y_hat_
# metrics on train set
y_hat = predict(XZ_train).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_train==1]
y_hat_0 = y_hat[Z_train==0]
y_1 = Y_train[Z_train==1]
y_0 = Y_train[Z_train==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
# metrics on test set
y_hat = predict(XZ_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_test==1]
y_hat_0 = y_hat[Z_test==0]
y_1 = Y_test[Z_test==1]
y_0 = Y_test[Z_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
print(train_results)
results_train.append(train_results)
results_test.append(test_results)
# df_train = pd.DataFrame(data=results_train)
# df_test = pd.DataFrame(data=results_test)
# df_train.to_csv('results/{}_zafar_{}_train.csv'.format(args.dataset, 0))
# df_test.to_csv('results/{}_zafar_{}_test.csv'.format(args.dataset, 0))
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/FKDE/{}_FKDE_{}_train.csv'.format(args.dataset, args.seed))
df_test.to_csv('results/FKDE/{}_FKDE_{}_test.csv'.format(args.dataset, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'method':'FKDE',
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'L':'BCE_cross_entropy',
'lr_decay':lr_decay,
'a_inside_x': True
}
with open('results/FKDE/{}_FKDE_{}.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification', 'LawSchool', 'Compas', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
args = parser.parse_args()
run(args)
| 14,571
| 40.280453
| 153
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/run_benchmark.py
|
import models
import fairness_metrics
import benchmark
import data_loader
import pickle
import argparse
import pandas as pd
import numpy as np
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementatino of MFL
An example usage python run_benchmark.py --dataset {} --seed {} --a_inside_x True --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
Model = models.LinearRegression if args.model=='linear' else models.NeuralNetworkClassification
fair_loss = fairness_metrics.energy_distance
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
train_test_split_fin = 0
lr = args.lr
n_epochs = args.n_epochs
#lr = 5e-4
n_epochs = 500
lr_decay = 0.99
batch_size = 2048
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=args.a_inside_x)
batch_size = 512
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=args.a_inside_x)
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=args.a_inside_x)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=args.a_inside_x)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=args.a_inside_x)
train_test_split_fin = 1
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=args.a_inside_x)
batch_size = 512
logfairloss = fair_loss
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k() # Dimension
# metrics to evaluate
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'equal_opportunity' : fairness_metrics.equal_opportunity_classification,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# END CHANGE
# storage of results
results_train = []
results_test = []
# run the test for various lambdas
for lambda_ in lambda_candidates:
print('Training Our method, for lambda_: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
model = Model(k)
print(Model)
train_metrics, test_metrics = benchmark.train_test_fair_learning(ds=ds,
model=model,
fair_loss=fair_loss,
lr=lr,
batch_size=batch_size,
N_epochs=n_epochs,
lambda_=lambda_,
metrics=metrics,
lr_decay=lr_decay,
psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay, train_test_split_fin=train_test_split_fin)
train_metrics['lambda_'] = lambda_
test_metrics['lambda_'] = lambda_
results_train.append(train_metrics)
results_test.append(test_metrics)
# save the results
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
if args.a_inside_x:
df_train.to_csv('results/NN_energy/{}_{}_AinX_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_energy/{}_{}_AinX_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
else:
print('here')
df_train.to_csv('results/NN_energy/{}_{}_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_energy/{}_{}_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'model_details':model.state_dict,
'L':'BCE_cross_entropy',
'fair_loss':'Energy',
'lr_decay':lr_decay,
'a_inside_x':args.a_inside_x
}
with open('results/NN_energy/{}_{}_{}.pkl'.format(args.dataset, args.model, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
#
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--model', default='NN', choices=['linear', 'NN'], help='Model')
# parser.add_argument('--regloss', default='L2', choices=['L1', 'L2'], help='Regression Loss')
# parser.add_argument('--fairloss', required=True, choices=['Energy', 'Wasserstein'], help='Fairness loss')
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--lr', default=5e-4, type=float, help='Learning Rate of (S)GD: Currently has no effect since Adam is used')
parser.add_argument('--n_epochs', default=500, type=int, help='Number of Epochs of (S)GD')
parser.add_argument('--plot_convergence', default=False, action='store_true', help='If Convergence plot should be done')
parser.add_argument('--dataset', help='Dataset to use', choices=['Synthetic1', 'Synthetic2', 'CommunitiesCrime', 'CommunitiesCrimeClassification',
'BarPass', 'StudentsMath', 'StudentsPortugese', 'Compas', 'LawSchool', 'Adult',
'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--weight_decay', help='SGD weight decay', type=float, default=0.0)
parser.add_argument('--a_inside_x', default=False, type=str2bool, help='The sensitive feature is in X')
args = parser.parse_args()
run(args)
| 7,662
| 46.596273
| 214
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/run_benchmark_regression.py
|
import models
import fairness_metrics
import benchmark
import data_loader
import pickle
import argparse
import pandas as pd
import numpy as np
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementatino of MFL
An example usage
python run_benchmark_regression.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
Model = models.LinearRegression if args.model=='linearregression' else models.NeuralNetwork
fair_loss = fairness_metrics.energy_distance
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
train_test_split_fin = 0
lr = args.lr
lr_decay = 1
batch_size = args.batch_size
n_epochs = args.n_epochs
if args.dataset == 'CommunitiesCrime':
ds = data_loader.CommunitiesCrime()
if args.dataset == 'BarPass':
ds = data_loader.BarPass()
if args.dataset == 'StudentsMath':
ds = data_loader.StudentPerformance(subject='Math')
if args.dataset == 'StudentsPortugese':
ds = data_loader.StudentPerformance(subject='Portugese')
logfairloss = fair_loss
ds.split_test()
k = ds.get_k() # Dimension
# metrics to evaluate
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'R2' : fairness_metrics.R2
}
# storage of results
results_train = []
results_test = []
# run the test for various lambdas
for lambda_ in lambda_candidates:
print('Training Our method, for lambda_: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
model = Model(k)
train_metrics, test_metrics = benchmark.train_test_fair_learning_regression(ds=ds,
model=model,
fair_loss=fair_loss,
lr=lr,
batch_size=batch_size,
N_epochs=n_epochs,
lambda_=lambda_,
metrics=metrics,
lr_decay=lr_decay,
psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay, train_test_split_fin=train_test_split_fin)
train_metrics['lambda_'] = lambda_
test_metrics['lambda_'] = lambda_
results_train.append(train_metrics)
results_test.append(test_metrics)
# save the results
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/NN_energy_regression/{}_{}_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_energy_regression/{}_{}_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'model_details':model.state_dict,
'L':'MSE',
'fair_loss':'Energy',
'lr_decay':lr_decay
}
with open('results/NN_energy_regression/{}_{}_{}.pkl'.format(args.dataset, args.model, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--model', default='NN', choices=['linearregression', 'NN'], help='Regression Model')
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=1, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--lr', default=1e-4, type=float, help='Learning Rate of (S)GD: Currently has no effect since Adam is used')
parser.add_argument('--batch_size', default=128, type=int, help='Batch Size for algorithm 2')
parser.add_argument('--n_epochs', default=500, type=int, help='Number of Epochs of (S)GD')
parser.add_argument('--plot_convergence', default=False, action='store_true', help='If Convergence plot should be done')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrime', 'BarPass', 'StudentsMath', 'StudentsPortugese'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--weight_decay', help='SGD weight decay', type=float, default=0.0)
args = parser.parse_args()
run(args)
| 6,191
| 45.208955
| 214
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/Fair_KDE/fairness_metrics.py
|
import torch
import cvxpy as cp
import numpy as np
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).mean()
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).mean())
def energy_distance_forloop(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
d11 = torch.tensor(0.)
d12 = torch.tensor(0.)
d22 = torch.tensor(0.)
for y_ in y1:
d11 += (y_-y1).abs().mean()
d12 += (y_-y2).abs().mean()
d11 = d11/(y1.shape[0])
d12 = d12/(y1.shape[0])
for y_ in y2:
d22 += (y_-y2).abs().mean()
d22 = d22/(y2.shape[0])
return 2*d12-d11-d22
# +------------------------------------------+
# | Metric 2: Wasserstein Distance |
# +------------------------------------------+
def W1dist(y1,y2):
'''
Compute type 1 Wasserstein distance between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1))
C_np = C.data.numpy()
# solve OT problem
T = cp.Variable(C_np.shape)
ones_1 = np.ones((C_np.shape[0], 1))
ones_2 = np.ones((C_np.shape[1], 1))
objective = cp.Minimize(cp.sum(cp.multiply(C_np,T)))
constraints = [
T >=0,
T@ones_2==ones_1/len(ones_1),
T.T@ones_1==ones_2/len(ones_2)
]
problem = cp.Problem(objective, constraints)
problem.solve(solver=cp.GUROBI)
# objective value for gradient computation
return (torch.Tensor(T.value)*C).sum()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Regression Metric 1: MSE |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
# print('Accuracy of the network on the 10000 test images: %d %%' % (
# 100 * correct / total))
return torch.tensor(correct / total * 100)
| 8,196
| 30.771318
| 100
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/Fair_KDE/load_data_.py
|
import numpy as np
import pandas as pd
import sklearn.preprocessing as preprocessing
from collections import namedtuple
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # for plotting stuff
import os
import collections
def load_compas_data(COMPAS_INPUT_FILE):
FEATURES_CLASSIFICATION = ["age_cat", "race", "sex", "priors_count",
"c_charge_degree"] # features to be used for classification
CONT_VARIABLES = [
"priors_count"] # continuous features, will need to be handled separately from categorical features, categorical features will be encoded using one-hot
CLASS_FEATURE = "two_year_recid" # the decision variable
SENSITIVE_ATTRS = ["race"]
# COMPAS_INPUT_FILE = DIR_DATA + "compas/compas-scores-two-years.csv"
print('Loading COMPAS dataset...')
# load the data and get some stats
df = pd.read_csv(COMPAS_INPUT_FILE)
df = df.dropna(subset=["days_b_screening_arrest"]) # dropping missing vals
# convert to np array
data = df.to_dict('list')
for k in data.keys():
data[k] = np.array(data[k])
""" Filtering the data """
# These filters are the same as propublica (refer to https://github.com/propublica/compas-analysis)
# If the charge date of a defendants Compas scored crime was not within 30 days from when the person was arrested, we assume that because of data quality reasons, that we do not have the right offense.
idx = np.logical_and(data["days_b_screening_arrest"] <= 30, data["days_b_screening_arrest"] >= -30)
# We coded the recidivist flag -- is_recid -- to be -1 if we could not find a compas case at all.
idx = np.logical_and(idx, data["is_recid"] != -1)
# In a similar vein, ordinary traffic offenses -- those with a c_charge_degree of 'O' -- will not result in Jail time are removed (only two of them).
idx = np.logical_and(idx, data["c_charge_degree"] != "O") # F: felony, M: misconduct
# We filtered the underlying data from Broward county to include only those rows representing people who had either recidivated in two years, or had at least two years outside of a correctional facility.
idx = np.logical_and(idx, data["score_text"] != "NA")
# we will only consider blacks and whites for this analysis
idx = np.logical_and(idx, np.logical_or(data["race"] == "African-American", data["race"] == "Caucasian"))
# select the examples that satisfy this criteria
for k in data.keys():
data[k] = data[k][idx]
""" Feature normalization and one hot encoding """
# convert class label 0 to -1
y = data[CLASS_FEATURE]
# y[y == 0] = -1
print("\nNumber of people recidivating within two years")
print(pd.Series(y).value_counts())
print("\n")
X = np.array([]).reshape(len(y),
0) # empty array with num rows same as num examples, will hstack the features to it
x_control = collections.defaultdict(list)
feature_names = []
for attr in FEATURES_CLASSIFICATION:
vals = data[attr]
if attr in SENSITIVE_ATTRS:
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
x_control[attr] = vals
pass
else:
if attr in CONT_VARIABLES:
vals = [float(v) for v in vals]
vals = preprocessing.scale(vals) # 0 mean and 1 variance
vals = np.reshape(vals, (len(y), -1)) # convert from 1-d arr to a 2-d arr with one col
else: # for binary categorical variables, the label binarizer uses just one var instead of two
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
# add to sensitive features dict
# add to learnable features
X = np.hstack((X, vals))
if attr in CONT_VARIABLES: # continuous feature, just append the name
feature_names.append(attr)
else: # categorical features
if vals.shape[1] == 1: # binary features that passed through lib binarizer
feature_names.append(attr)
else:
for k in lb.classes_: # non-binary categorical features, need to add the names for each cat
feature_names.append(attr + "_" + str(k))
# convert the sensitive feature to 1-d array
x_control = dict(x_control)
for k in x_control.keys():
assert (x_control[k].shape[1] == 1) # make sure that the sensitive feature is binary after one hot encoding
x_control[k] = np.array(x_control[k]).flatten()
# sys.exit(1)
# """permute the date randomly"""
# perm = range(0, X.shape[0])
# shuffle(perm)
# X = X[perm]
# y = y[perm]
for k in x_control.keys():
x_control[k] = x_control[k][:]
# intercept = np.ones(X.shape[0]).reshape(X.shape[0], 1)
# X = np.concatenate((intercept, X), axis=1)
assert (len(feature_names) == X.shape[1])
print("Features we will be using for classification are:", feature_names, "\n")
x_control = x_control['race']
return X, y, x_control
def load_drug_data(DIR_DATA):
g = pd.read_csv(DIR_DATA, header=None, sep=',')
# g = pd.read_csv("drug_consumption.data.txt", header=None, sep=',')
g = np.array(g)
data = np.array(g[:, 1:13]) # Remove the ID and labels
labels = g[:, 13:]
yfalse_value = 'CL0'
y = np.array([1.0 if yy == yfalse_value else 0.0 for yy in labels[:, 5]])
dataset = namedtuple('_', 'data, target')(data, y)
print('Loading Drug (black vs others) dataset...')
# dataset_train = load_drug()
sensible_feature = 4 # ethnicity
a = np.array([1.0 if el == -0.31685 else 0 for el in data[:, sensible_feature]])
X = np.delete(data, sensible_feature, axis=1).astype(float)
return X, y, a
def load_arrhythmia(DIR_DATA):
from scipy.stats import mode
arrhythmia = pd.read_csv(DIR_DATA, header=None)
arrhythmia = np.where(np.isnan(arrhythmia), mode(arrhythmia, axis=0), arrhythmia)[1]
y = np.array([1.0 if yy == 1 else 0 for yy in arrhythmia[:, -1]])
data = arrhythmia[:, :-1]
sensible_feature = 1 # gender
print('Load Arrhythmiad dataset...')
print('Different values of the sensible feature', sensible_feature, ':',
set(data[:, sensible_feature]))
X = np.delete(data, sensible_feature, axis=1).astype(float)
a = data[:, sensible_feature]
data_red = X[:, :12]
return data_red, y, a
def generate_synthetic_data_zafar(plot_data=True, n_samples = 1200):
"""
Code for generating the synthetic data.
We will have two non-sensitive features and one sensitive feature.
A sensitive feature value of 0.0 means the example is considered to be in protected group (e.g., female) and 1.0 means it's in non-protected group (e.g., male).
"""
# generate these many data points per class
disc_factor = math.pi / 4.0 # this variable determines the initial discrimination in the data -- decraese it to generate more discrimination
def gen_gaussian(mean_in, cov_in, class_label):
nv = multivariate_normal(mean=mean_in, cov=cov_in)
X = nv.rvs(n_samples)
y = np.ones(n_samples, dtype=float) * class_label
return nv, X, y
""" Generate the non-sensitive features randomly """
# We will generate one gaussian cluster for each class
mu1, sigma1 = [2, 2], [[5, 1], [1, 5]]
mu2, sigma2 = [-2, -2], [[10, 1], [1, 3]]
nv1, X1, y1 = gen_gaussian(mu1, sigma1, 1) # positive class
nv2, X2, y2 = gen_gaussian(mu2, sigma2, -1) # negative class
# join the posisitve and negative class clusters
X = np.vstack((X1, X2))
y = np.hstack((y1, y2))
# shuffle the data
perm = np.random.randint(0, X.shape[0], n_samples * 2)
X = X[perm]
y = y[perm]
rotation_mult = np.array(
[[math.cos(disc_factor), -math.sin(disc_factor)], [math.sin(disc_factor), math.cos(disc_factor)]])
X_aux = np.dot(X, rotation_mult)
""" Generate the sensitive feature here """
x_control = [] # this array holds the sensitive feature value
for i in range(len(X)):
x = X_aux[i]
# probability for each cluster that the point belongs to it
p1 = nv1.pdf(x)
p2 = nv2.pdf(x)
# normalize the probabilities from 0 to 1
s = p1 + p2
p1 = p1 / s
p2 = p2 / s
r = np.random.uniform() # generate a random number from 0 to 1
if r < p1: # the first cluster is the positive class
x_control.append(1.0) # 1.0 means its male
else:
x_control.append(0.0) # 0.0 -> female
x_control = np.array(x_control)
""" Show the data """
if plot_data:
num_to_draw = 200 # we will only draw a small number of points to avoid clutter
x_draw = X[:num_to_draw]
y_draw = y[:num_to_draw]
x_control_draw = x_control[:num_to_draw]
X_s_0 = x_draw[x_control_draw == 0.0]
X_s_1 = x_draw[x_control_draw == 1.0]
y_s_0 = y_draw[x_control_draw == 0.0]
y_s_1 = y_draw[x_control_draw == 1.0]
plt.scatter(X_s_0[y_s_0 == 1.0][:, 0], X_s_0[y_s_0 == 1.0][:, 1], color='green', marker='x', s=30,
linewidth=1.5, label="Prot. +ve")
plt.scatter(X_s_0[y_s_0 == -1.0][:, 0], X_s_0[y_s_0 == -1.0][:, 1], color='red', marker='x', s=30,
linewidth=1.5, label="Prot. -ve")
plt.scatter(X_s_1[y_s_1 == 1.0][:, 0], X_s_1[y_s_1 == 1.0][:, 1], color='green', marker='o', facecolors='none',
s=30, label="Non-prot. +ve")
plt.scatter(X_s_1[y_s_1 == -1.0][:, 0], X_s_1[y_s_1 == -1.0][:, 1], color='red', marker='o', facecolors='none',
s=30, label="Non-prot. -ve")
plt.tick_params(axis='x', which='both', bottom='off', top='off',
labelbottom='off') # dont need the ticks to see the data distribution
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.legend(loc=2, fontsize=15)
plt.xlim((-15, 10))
plt.ylim((-10, 15))
plt.show()
y[y==-1] = 0
return X, y, x_control
def load_adult(DIR_DATA, smaller=False, scaler=True):
'''
:param smaller: selecting this flag it is possible to generate a smaller version of the training and test sets.
:param scaler: if True it applies a StandardScaler() (from sklearn.preprocessing) to the data.
:return: train and test data.
Features of the Adult dataset:
0. age: continuous.
1. workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
2. fnlwgt: continuous.
3. education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th,
Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
4. education-num: continuous.
5. marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
Married-spouse-absent, Married-AF-spouse.
6. occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty,
Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv,
Protective-serv, Armed-Forces.
7. relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
8. race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
9. sex: Female, Male.
10. capital-gain: continuous.
11. capital-loss: continuous.
12. hours-per-week: continuous.
13. native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc),
India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico,
Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala,
Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
(14. label: <=50K, >50K)
'''
data = pd.read_csv(
DIR_DATA,
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
len_train = len(data.values[:, -1])
data_test = pd.read_csv(
DIR_DATA + "adult/adult.test",
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
data = pd.concat([data, data_test])
# Considering the relative low portion of missing data, we discard rows with missing data
domanda = data["workclass"][4].values[1]
data = data[data["workclass"] != domanda]
data = data[data["occupation"] != domanda]
data = data[data["native-country"] != domanda]
# Here we apply discretisation on column marital_status
data.replace(['Divorced', 'Married-AF-spouse',
'Married-civ-spouse', 'Married-spouse-absent',
'Never-married', 'Separated', 'Widowed'],
['not married', 'married', 'married', 'married',
'not married', 'not married', 'not married'], inplace=True)
# categorical fields
category_col = ['workclass', 'race', 'education', 'marital-status', 'occupation',
'relationship', 'gender', 'native-country', 'income']
for col in category_col:
b, c = np.unique(data[col], return_inverse=True)
data[col] = c
datamat = data.values
target = np.array([-1.0 if val == 0 else 1.0 for val in np.array(datamat)[:, -1]])
datamat = datamat[:, :-1]
if scaler:
scaler = StandardScaler()
scaler.fit(datamat)
datamat = scaler.transform(datamat)
if smaller:
print('A smaller version of the dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train // 20, :-1], target[:len_train // 20])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
else:
print('The dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train, :-1], target[:len_train])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
return data, data_test
# def load_toy_test():
# # Load toy test
# n_samples = 100 * 2
# n_samples_low = 20 * 2
# n_dimensions = 10
# X, y, sensible_feature_id, _, _ = generate_toy_data(n_samples=n_samples,
# n_samples_low=n_samples_low,
# n_dimensions=n_dimensions)
# data = namedtuple('_', 'data, target')(X, y)
# return data, data
| 15,165
| 43.737463
| 207
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/Fair_KDE/algorithm.py
|
import random
import IPython
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dataloader import CustomDataset
from utils import measures_from_Yhat
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
def train_fair_classifier(dataset, net, optimizer, lr_scheduler, fairness, lambda_, h, delta, device, n_epochs=200, batch_size=2048, seed=0):
# Retrieve train/test splitted pytorch tensors for index=split
train_tensors, test_tensors = dataset.get_dataset_in_tensor()
X_train, Y_train, Z_train, XZ_train = train_tensors
X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
data_loader = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# An empty dataframe for logging experimental results
df = pd.DataFrame()
df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(data_loader):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.detach().reshape(-1))
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch)
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(),h,tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
if (i + 1) % 10 == 0 or (i + 1) == len(data_loader):
print('Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(epoch+1, n_epochs,
i+1, len(data_loader),
cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
Yhat_train = net(XZ_train).squeeze().detach().cpu().numpy()
df_temp = measures_from_Yhat(Y_train_np, Z_train_np, Yhat=Yhat_train, threshold=tau)
df_temp['epoch'] = epoch * len(data_loader) + i + 1
df_ckpt = df_ckpt.append(df_temp)
# Plot (cost, train accuracies, fairness measures) curves per 50 epochs
if (epoch + 1) % 50 == 0:
IPython.display.clear_output()
print('Currently working on - seed: {}'.format(seed))
plt.figure(figsize=(15,5), dpi=100)
plt.subplot(1,3,1)
plt.plot(costs)
plt.xlabel('x10 iterations')
plt.title('cost')
plt.subplot(1,3,2)
plt.plot(df_ckpt['acc'].to_numpy())
plt.xlabel('epoch')
plt.title('Accuracy')
plt.subplot(1,3,3)
if fairness == 'DP':
plt.plot(df_ckpt['DDP'].to_numpy())
plt.title('DDP')
elif fairness == 'EO':
plt.plot(df_ckpt['DEO'].to_numpy())
plt.title('DEO')
plt.xlabel('epoch')
plt.show()
Yhat_test = net(XZ_test).squeeze().detach().cpu().numpy()
df_test = measures_from_Yhat(Y_test_np, Z_test_np, Yhat=Yhat_test, threshold=tau)
return df_test
| 7,375
| 41.390805
| 141
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/Fair_KDE/dataloader.py
|
import os
import copy
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import data_loader
#from tempeh.configurations import datasets
from sklearn.datasets import make_moons
from sklearn.preprocessing import LabelEncoder, StandardScaler
def arrays_to_tensor(X, Y, Z, XZ, device):
return torch.FloatTensor(X).to(device), torch.FloatTensor(Y).to(device), torch.FloatTensor(Z).to(device), torch.FloatTensor(XZ).to(device)
def adult(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
def compas_data_loader():
""" Downloads COMPAS data from the propublica GitHub repository.
:return: pandas.DataFrame with columns 'sex', 'age', 'juv_fel_count', 'juv_misd_count',
'juv_other_count', 'priors_count', 'two_year_recid', 'age_cat_25 - 45',
'age_cat_Greater than 45', 'age_cat_Less than 25', 'race_African-American',
'race_Caucasian', 'c_charge_degree_F', 'c_charge_degree_M'
"""
data = pd.read_csv("./data/compas/compas-scores-two-years.csv") # noqa: E501
# filter similar to
# https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
data = data[(data['days_b_screening_arrest'] <= 30) &
(data['days_b_screening_arrest'] >= -30) &
(data['is_recid'] != -1) &
(data['c_charge_degree'] != "O") &
(data['score_text'] != "N/A")]
# filter out all records except the ones with the most common two races
data = data[(data['race'] == 'African-American') | (data['race'] == 'Caucasian')]
# Select relevant columns for machine learning.
# We explicitly leave in age_cat to allow linear classifiers to be non-linear in age
data = data[["sex", "age", "age_cat", "race", "juv_fel_count", "juv_misd_count",
"juv_other_count", "priors_count", "c_charge_degree", "two_year_recid"]]
# map string representation of feature "sex" to 0 for Female and 1 for Male
data = data.assign(sex=(data["sex"] == "Male") * 1)
data = pd.get_dummies(data)
return data
class CustomDataset():
def __init__(self, X, Y, Z):
self.X = X
self.Y = Y
self.Z = Z
def __len__(self):
return len(self.Y)
def __getitem__(self, index):
x, y, z = self.X[index], self.Y[index], self.Z[index]
return x, y, z
class FairnessDataset():
def __init__(self, dataset, device=torch.device('cuda')):
self.dataset = dataset
self.device = device
np.random.seed(12345678)
if self.dataset == 'AdultCensus':
self.get_adult_data()
elif self.dataset == 'COMPAS':
self.get_compas_data()
elif self.dataset == 'CreditDefault':
self.get_credit_default_data()
elif self.dataset == 'Lawschool':
self.get_lawschool_data()
elif self.dataset == 'Moon':
self.get_moon_data()
else:
raise ValueError('Your argument {} for dataset name is invalid.'.format(self.dataset))
self.prepare_ndarray()
def get_adult_data(self):
X_train, Y_train, X_test, Y_test = adult('./data/adult/')
self.Z_train_ = X_train['Sex']
self.Z_test_ = X_test['Sex']
self.X_train_ = X_train.drop(labels=['Sex'], axis=1)
self.X_train_ = pd.get_dummies(self.X_train_)
self.X_test_ = X_test.drop(labels=['Sex'], axis=1)
self.X_test_ = pd.get_dummies(self.X_test_)
le = LabelEncoder()
self.Y_train_ = le.fit_transform(Y_train)
self.Y_train_ = pd.Series(self.Y_train_, name='>50k')
self.Y_test_ = le.fit_transform(Y_test)
self.Y_test_ = pd.Series(self.Y_test_, name='>50k')
# def get_compas_data(self):
# dataset = datasets['compas']()
# # dataset = compas_data_loader()
# X_train, X_test = dataset.get_X(format=pd.DataFrame)
# Y_train, Y_test = dataset.get_y(format=pd.Series)
# Z_train, Z_test = dataset.get_sensitive_features('race', format=pd.Series)
# self.X_train_ = X_train
# self.Y_train_ = Y_train
# self.Z_train_ = (Z_train != 'African-American').astype(float)
# self.X_test_ = X_test
# self.Y_test_ = Y_test
# self.Z_test_ = (Z_test != 'African-American').astype(float)
def get_compas_data(self):
dataset = datasets['compas']()
ds = data_loader.Compas()
ds.split_test()
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test = ds.get_test_data()
self.X_train_ = X
self.Y_train_ = Y
self.Z_train_ = A
self.X_test_ = X_test
self.Y_test_ = Y_test
self.Z_test_ = A_test
def get_credit_default_data(self):
rawdata = pd.read_excel('./data/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
Z = 2 - preproc_data['SEX']
self.X_train_ = X.loc[list(range(24000)), :]
self.Y_train_ = Y.loc[list(range(24000))]
self.Z_train_ = Z.loc[list(range(24000))]
self.X_test_ = X.loc[list(range(24000,30000)), :]
self.Y_test_ = Y.loc[list(range(24000,30000))]
self.Z_test_ = Z.loc[list(range(24000,30000))]
def get_lawschool_data(self):
rawdata = pd.read_sas('./data/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
Z = rawdata['White']
self.X_train_ = X.loc[list(range(77267)), :]
self.Y_train_ = Y.loc[list(range(77267))]
self.Z_train_ = Z.loc[list(range(77267))]
self.X_test_ = X.loc[list(range(77267,96584)), :]
self.Y_test_ = Y.loc[list(range(77267,96584))]
self.Z_test_ = Z.loc[list(range(77267,96584))]
def get_moon_data(self):
n_train = 10000
n_test = 5000
X, Y = make_moons(n_samples=n_train+n_test, noise=0.2, random_state=0)
Z = np.zeros_like(Y)
np.random.seed(0)
for i in range(n_train + n_test):
if Y[i] == 0:
if -0.734 < X[i][0] < 0.734:
Z[i] = np.random.binomial(1, 0.90)
else:
Z[i] = np.random.binomial(1, 0.35)
elif Y[i] == 1:
if 0.262 < X[i][0] < 1.734:
Z[i] = np.random.binomial(1, 0.55)
else:
Z[i] = np.random.binomial(1, 0.10)
X = pd.DataFrame(X, columns=['x_1', 'x_2'])
Y = pd.Series(Y, name='label')
Z = pd.Series(Z, name='sensitive attribute')
self.X_train_ = X.loc[list(range(10000)), :]
self.Y_train_ = Y.loc[list(range(10000))]
self.Z_train_ = Z.loc[list(range(10000))]
self.X_test_ = X.loc[list(range(10000,15000)), :]
self.Y_test_ = Y.loc[list(range(10000,15000))]
self.Z_test_ = Z.loc[list(range(10000,15000))]
def prepare_ndarray(self):
self.normalized = False
self.X_train = self.X_train_.to_numpy(dtype=np.float64)
self.Y_train = self.Y_train_.to_numpy(dtype=np.float64)
self.Z_train = self.Z_train_.to_numpy(dtype=np.float64)
self.XZ_train = np.concatenate([self.X_train, self.Z_train.reshape(-1,1)], axis=1)
self.X_test = self.X_test_.to_numpy(dtype=np.float64)
self.Y_test = self.Y_test_.to_numpy(dtype=np.float64)
self.Z_test = self.Z_test_.to_numpy(dtype=np.float64)
self.XZ_test = np.concatenate([self.X_test, self.Z_test.reshape(-1,1)], axis=1)
self.sensitive_attrs = sorted(list(set(self.Z_train)))
return None
def normalize(self):
self.normalized = True
scaler_XZ = StandardScaler()
self.XZ_train = scaler_XZ.fit_transform(self.XZ_train)
self.XZ_test = scaler_XZ.transform(self.XZ_test)
scaler_X = StandardScaler()
self.X_train = scaler_X.fit_transform(self.X_train)
self.X_test = scaler_X.transform(self.X_test)
return None
def get_dataset_in_ndarray(self):
return (self.X_train, self.Y_train, self.Z_train, self.XZ_train),\
(self.X_test, self.Y_test, self.Z_test, self.XZ_test)
def get_dataset_in_tensor(self, validation=False, val_portion=.0):
X_train_, Y_train_, Z_train_, XZ_train_ = arrays_to_tensor(
self.X_train, self.Y_train, self.Z_train, self.XZ_train, self.device)
X_test_, Y_test_, Z_test_, XZ_test_ = arrays_to_tensor(
self.X_test, self.Y_test, self.Z_test, self.XZ_test, self.device)
return (X_train_, Y_train_, Z_train_, XZ_train_),\
(X_test_, Y_test_, Z_test_, XZ_test_)
| 11,650
| 40.462633
| 159
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/Fair_KDE/data_loader_or.py
|
# data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
# TODO: possibly some form of (cross) validation
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, use_tensor=True, device='cpu', info='No Info Available'):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
self.X_test = None
self.A_test = None
self.Y_test = None
self.info = info
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
# def stratified_batch_generator(self, n_samples, n_iterates):
# # get propoertions of protected attribute
# p_A1 = self.A.mean()
# p_A0 = 1-p_A1
# # build index set of protected and unprotected attribute
# ind_A1 = (self.A==1).nonzero()[:,0]
# ind_A0 = (self.A==0).nonzero()[:,0]
# # number of samples to sample from each distribution
# n_batch_1 = int(p_A1*n_samples)
# n_batch_0 = int(p_A0*n_samples)
# replacement = False
# for _ in range(n_iterates):
# # sample indexes for protected and unprotected class
# batch_idx1 = ind_A1[(torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
# num_samples=n_batch_1,
# replacement=replacement)]
# batch_idx0 = ind_A0[(torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
# num_samples=n_batch_0,
# replacement=replacement)]
# yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
# torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
# torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
# n_epochs = 100
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
# print(p_A0)
total_samples = self.A.shape[0]
# batch_size = 32
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
# print(epoch)
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
# print(ind_A1.shape)
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(10000, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self):
X, Y, A = load_compas_data('data/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
| 11,586
| 38.546075
| 121
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/Fair_KDE/utils.py
|
import numpy as np
import pandas as pd
def measures_from_Yhat(Y, Z, Yhat=None, threshold=0.5):
assert isinstance(Y, np.ndarray)
assert isinstance(Z, np.ndarray)
assert Yhat is not None
assert isinstance(Yhat, np.ndarray)
if Yhat is not None:
Ytilde = (Yhat >= threshold).astype(np.float32)
assert Ytilde.shape == Y.shape and Y.shape == Z.shape
# Accuracy
acc = (Ytilde == Y).astype(np.float32).mean()
# DP
DDP = abs(np.mean(Ytilde[Z==0])-np.mean(Ytilde[Z==1]))
# EO
Y_Z0, Y_Z1 = Y[Z==0], Y[Z==1]
Y1_Z0 = Y_Z0[Y_Z0==1]
Y0_Z0 = Y_Z0[Y_Z0==0]
Y1_Z1 = Y_Z1[Y_Z1==1]
Y0_Z1 = Y_Z1[Y_Z1==0]
FPR, FNR = {}, {}
FPR[0] = np.sum(Ytilde[np.logical_and(Z==0, Y==0)])/len(Y0_Z0)
FPR[1] = np.sum(Ytilde[np.logical_and(Z==1, Y==0)])/len(Y0_Z1)
FNR[0] = np.sum(1 - Ytilde[np.logical_and(Z==0, Y==1)])/len(Y1_Z0)
FNR[1] = np.sum(1 - Ytilde[np.logical_and(Z==1, Y==1)])/len(Y1_Z1)
TPR_diff = abs((1-FNR[0]) - (1-FNR[1]))
FPR_diff = abs(FPR[0] - FPR[1])
DEO = TPR_diff + FPR_diff
data = [acc, DDP, DEO]
columns = ['acc', 'DDP', 'DEO']
return pd.DataFrame([data], columns=columns)
| 1,205
| 29.923077
| 70
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/Fair_KDE/models.py
|
import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, n_layers, n_inputs, n_hidden_units):
super(Classifier, self).__init__()
layers = []
if n_layers == 1: # Logistic Regression
layers.append(nn.Linear(n_inputs, 1))
layers.append(nn.Sigmoid())
else:
layers.append(nn.Linear(n_inputs, n_hidden_units))
layers.append(nn.ReLU())
for i in range(n_layers-2):
layers.append(nn.Linear(n_hidden_units, n_hidden_units))
layers.append(nn.ReLU())
layers.append(nn.Linear(n_hidden_units,1))
layers.append(nn.Sigmoid())
self.layers = nn.Sequential(*layers)
def forward(self, x):
x = self.layers(x)
return x
| 829
| 33.583333
| 72
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/Fair_KDE/fair_KDE_.py
|
# Baseline Fair KDE : https://proceedings.neurips.cc//paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import fairness_metrics
import data_loader
from tqdm import tqdm
from collections import namedtuple
from sklearn.metrics import log_loss
from copy import deepcopy
import os, sys
import time
import random
import matplotlib.pyplot as plt
import torch.optim as optim
from models import Classifier
from dataloader import FairnessDataset
from algorithm import train_fair_classifier
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from dataloader import CustomDataset
from utils import measures_from_Yhat
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
# act on experiment parameters:
data_loader.set_seed(0)
gamma_candidates = np.logspace(-2, 2, num=10)
ds = data_loader.Compas()
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
dataset_name = 'COMPAS' # ['Moon', 'Lawschool', 'AdultCensus', 'CreditDefault', 'COMPAS']
##### Which fairness notion to consider (Demographic Parity / Equalized Odds) #####
fairness = 'DP' # ['DP', 'EO']
##### Model specifications #####
n_layers = 2 # [positive integers]
n_hidden_units = 20 # [positive integers]
##### Our algorithm hyperparameters #####
h = 0.1 # Bandwidth hyperparameter in KDE [positive real numbers]
delta = 1.0 # Delta parameter in Huber loss [positive real numbers]
lambda_ = 0.05 # regularization factor of DDP/DEO; Positive real numbers \in [0.0, 1.0]
##### Other training hyperparameters #####
batch_size = 2048
lr = 2e-4
lr_decay = 1.0 # Exponential decay factor of LR scheduler
n_seeds = 5 # Number of random seeds to try
n_epochs = 200
seed = 5
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
##### Whether to enable GPU training or not
device = torch.device('cpu') # or torch.device('cpu')
# Import dataset
# dataset = FairnessDataset(dataset=dataset_name, device=device)
# dataset.normalize()
input_dim = k + 1
net = Classifier(n_layers=n_layers, n_inputs=input_dim, n_hidden_units=n_hidden_units)
net = net.to(device)
# Set an optimizer
optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay) # None
# X, Y, A = ds.get_data()
# X_test, Y_test, A_test = ds.get_test_data()
# x_train = X.cpu().detach().numpy()
# Y_train = Y.cpu().detach().numpy().flatten()
# a_train = A.cpu().detach().numpy().flatten()
# x_test = X_test.cpu().detach().numpy()
# y_test = Y_test.cpu().detach().numpy().flatten()
# a_test = A_test.cpu().detach().numpy().flatten()
# train_tensors, test_tensors = dataset.get_dataset_in_tensor()
# X_train, Y_train, Z_train, XZ_train = train_tensors
# X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
X_train, Y_train, Z_train = ds.get_data()
X_test, Y_test, Z_test = ds.get_test_data()
XZ_test = torch.cat([X_test, Z_test], 1)
XZ_train = torch.cat([X_train, Z_train], 1)
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
generator = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# # An empty dataframe for logging experimental results
# df = pd.DataFrame()
# df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
results_test = []
results_train = []
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(generator):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.squeeze())
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch.squeeze())
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(), h, tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
if (i + 1) % 10 == 0 or (i + 1) == len(generator):
print('Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(epoch+1, n_epochs,
i+1, len(generator),
cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
def predict(XZ):
Y_hat_ = net(XZ)
Y_hat_[Y_hat_>=0.5] = 1
Y_hat_[Y_hat_ < 0.5] = 0
return Y_hat_
# metrics on train set
y_hat = predict(XZ_train).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_train==1]
y_hat_0 = y_hat[Z_train==0]
y_1 = Y_train[Z_train==1]
y_0 = Y_train[Z_train==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(XZ_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_test==1]
y_hat_0 = y_hat[Z_test==0]
y_1 = Y_test[Z_test==1]
y_0 = Y_test[Z_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
results_train.append(train_results)
results_test.append(test_results)
# df_train = pd.DataFrame(data=results_train)
# df_test = pd.DataFrame(data=results_test)
# df_train.to_csv('results/{}_zafar_{}_train.csv'.format(args.dataset, 0))
# df_test.to_csv('results/{}_zafar_{}_test.csv'.format(args.dataset, 0))
| 9,960
| 34.830935
| 136
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/zafar_method/funcs_disp_mist.py
|
from __future__ import division
import os, sys
import traceback
import numpy as np
from random import seed, shuffle
from collections import defaultdict
from copy import deepcopy
import cvxpy
import dccp
from dccp.problem import is_dccp
from zafar_method import utils as ut
def train_model_disp_mist(x, y, x_control, loss_function, EPS, cons_params):
# cons_type, sensitive_attrs_to_cov_thresh, take_initial_sol, gamma, tau, mu, EPS, cons_type
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "disparate_mistreatment/synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
EPS: stopping criteria for the convex solver. check the CVXPY documentation for details. default for CVXPY is 1e-6
cons_params: is None when we do not want to apply any constraints
otherwise: cons_params is a dict with keys as follows:
- cons_type:
- 0 for all misclassifications
- 1 for FPR
- 2 for FNR
- 4 for both FPR and FNR
- tau: DCCP parameter, controls how much weight to put on the constraints, if the constraints are not satisfied, then increase tau -- default is DCCP val 0.005
- mu: DCCP parameter, controls the multiplicative factor by which the tau increases in each DCCP iteration -- default is the DCCP val 1.2
- take_initial_sol: whether the starting point for DCCP should be the solution for the original (unconstrained) classifier -- default value is True
- sensitive_attrs_to_cov_thresh: covariance threshold for each cons_type, eg, key 1 contains the FPR covariance
----
Outputs:
w: the learned weight vector for the classifier
"""
max_iters = 100 # for the convex program
max_iter_dccp = 50 # for the dccp algo
num_points, num_features = x.shape
w = cvxpy.Variable(num_features) # this is the weight vector
# initialize a random value of w
w.value = np.random.rand(x.shape[1])
if cons_params is None: # just train a simple classifier, no fairness constraints
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, cons_params["sensitive_attrs_to_cov_thresh"],
cons_params["cons_type"], w)
if loss_function == "logreg":
# constructing the logistic loss problem
loss = cvxpy.sum(
cvxpy.logistic(cvxpy.multiply(-y, x * w))) / num_points # we are converting y to a diagonal matrix for consistent
# sometimes, its a good idea to give a starting point to the constrained solver
# this starting point for us is the solution to the unconstrained optimization problem
# another option of starting point could be any feasible solution
if cons_params is not None:
if cons_params.get("take_initial_sol") is None: # true by default
take_initial_sol = True
elif cons_params["take_initial_sol"] == False:
take_initial_sol = False
if take_initial_sol == True: # get the initial solution
p = cvxpy.Problem(cvxpy.Minimize(loss), [])
p.solve()
# construct the cvxpy problem
prob = cvxpy.Problem(cvxpy.Minimize(loss), constraints)
# print "\n\n"
# print "Problem is DCP (disciplined convex program):", prob.is_dcp()
# print "Problem is DCCP (disciplined convex-concave program):", is_dccp(prob)
try:
tau, mu = 0.005, 1.2 # default dccp parameters, need to be varied per dataset
if cons_params is not None: # in case we passed these parameters as a part of dccp constraints
if cons_params.get("tau") is not None: tau = cons_params["tau"]
if cons_params.get("mu") is not None: mu = cons_params["mu"]
if cons_params.get("gamma") is not None: gamma = cons_params["gamma"]
prob.solve(method='dccp', tau=tau, mu=mu, tau_max=1e10,
solver=cvxpy.ECOS, verbose=False,
feastol=EPS, abstol=EPS, reltol=EPS, feastol_inacc=EPS, abstol_inacc=EPS, reltol_inacc=EPS,
max_iters=max_iters, max_iter=max_iter_dccp)
assert (prob.status == "Converged" or prob.status == "optimal")
# print "Optimization done, problem status:", prob.status
except:
traceback.print_exc()
sys.stdout.flush()
sys.exit(1)
# check that the fairness constraint is satisfied
# for f_c in constraints:
# assert (
# f_c.value == True) # can comment this out if the solver fails too often, but make sure that the constraints are satisfied empirically. alternatively, consider increasing tau parameter
# pass
w = np.array(w.value).flatten() # flatten converts it to a 1d array
return w
def get_clf_stats(w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs):
assert (len(sensitive_attrs) == 1) # ensure that we have just one sensitive attribute
s_attr = "s1" # for now, lets compute the accuracy for just one sensitive attr
# compute distance from boundary
distances_boundary_train = get_distance_boundary(w, x_train, x_control_train[s_attr])
distances_boundary_test = get_distance_boundary(w, x_test, x_control_test[s_attr])
# compute the class labels
all_class_labels_assigned_train = np.sign(distances_boundary_train)
all_class_labels_assigned_test = np.sign(distances_boundary_test)
train_score, test_score, correct_answers_train, correct_answers_test = ut.check_accuracy(None, x_train, y_train,
x_test, y_test,
all_class_labels_assigned_train,
all_class_labels_assigned_test)
cov_all_train = {}
cov_all_test = {}
for s_attr in sensitive_attrs:
print_stats = False # we arent printing the stats for the train set to avoid clutter
# uncomment these lines to print stats for the train fold
# print "*** Train ***"
# print "Accuracy: %0.3f" % (train_score)
# print_stats = True
s_attr_to_fp_fn_train = get_fpr_fnr_sensitive_features(y_train, all_class_labels_assigned_train,
x_control_train, sensitive_attrs, print_stats)
cov_all_train[s_attr] = get_sensitive_attr_constraint_fpr_fnr_cov(None, x_train, y_train,
distances_boundary_train,
x_control_train[s_attr])
print_stats = True # only print stats for the test fold
s_attr_to_fp_fn_test = get_fpr_fnr_sensitive_features(y_test, all_class_labels_assigned_test, x_control_test,
sensitive_attrs, print_stats)
cov_all_test[s_attr] = get_sensitive_attr_constraint_fpr_fnr_cov(None, x_test, y_test, distances_boundary_test,
x_control_test[s_attr])
return train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test,\
all_class_labels_assigned_train, all_class_labels_assigned_test
def get_distance_boundary(w, x, s_attr_arr):
"""
if we have boundaries per group, then use those separate boundaries for each sensitive group
else, use the same weight vector for everything
"""
distances_boundary = np.zeros(x.shape[0])
if isinstance(w, dict): # if we have separate weight vectors per group
for k in w.keys(): # for each w corresponding to each sensitive group
d = np.dot(x, w[k])
distances_boundary[s_attr_arr == k] = d[
s_attr_arr == k] # set this distance only for people with this sensitive attr val
else: # we just learn one w for everyone else
distances_boundary = np.dot(x, w)
return distances_boundary
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs_to_cov_thresh, cons_type, w):
"""
get the list of constraints to be fed to the minimizer
cons_type == -1: means the whole combined misclassification constraint (without FNR or FPR)
cons_type == 1: FPR constraint
cons_type == 2: FNR constraint
cons_type == 4: both FPR as well as FNR constraints
sensitive_attrs_to_cov_thresh: is a dict like {s: {cov_type: val}}
s is the sensitive attr
cov_type is the covariance type. contains the covariance for all misclassifications, FPR and for FNR etc
"""
constraints = []
# print(sensitive_attrs_to_cov_thresh.keys())
for attr in ["s1"]:
attr_arr = x_control_train[attr]
attr_arr_transformed = attr_arr
s_val_to_total = {ct: {} for ct in [0, 1, 2]} # constrain type -> sens_attr_val -> total number
s_val_to_avg = {ct: {} for ct in [0, 1, 2]}
cons_sum_dict = {ct: {} for ct in
[0, 1, 2]} # sum of entities (females and males) in constraints are stored here
for v in set(attr_arr):
s_val_to_total[0][v] = sum(x_control_train[attr] == v)
s_val_to_total[1][v] = sum(np.logical_and(x_control_train[attr] == v,
y_train == -1)) # FPR constraint so we only consider the ground truth negative dataset for computing the covariance
s_val_to_total[2][v] = sum(np.logical_and(x_control_train[attr] == v, y_train == +1))
for ct in [0, 1, 2]:
s_val_to_avg[ct][0] = s_val_to_total[ct][1] / float(s_val_to_total[ct][0] + s_val_to_total[ct][
1]) # N1/N in our formulation, differs from one constraint type to another
s_val_to_avg[ct][1] = 1.0 - s_val_to_avg[ct][0] # N0/N
for v in set(attr_arr):
idx = x_control_train[attr] == v
#################################################################
# #DCCP constraints
dist_bound_prod = cvxpy.multiply(y_train[idx], x_train[idx] * w) # y.f(x)
cons_sum_dict[0][v] = cvxpy.sum(cvxpy.minimum(0, dist_bound_prod)) * (
s_val_to_avg[0][v] / len(x_train)) # avg misclassification distance from boundary
cons_sum_dict[1][v] = cvxpy.sum(
cvxpy.minimum(0, cvxpy.multiply((1 - y_train[idx]) / 2.0, dist_bound_prod))) * (
s_val_to_avg[1][v] / sum(
y_train == -1)) # avg false positive distance from boundary (only operates on the ground truth neg dataset)
cons_sum_dict[2][v] = cvxpy.sum(
cvxpy.minimum(0, cvxpy.multiply((1 + y_train[idx]) / 2.0, dist_bound_prod))) * (
s_val_to_avg[2][v] / sum(
y_train == +1)) # avg false negative distance from boundary
#################################################################
if cons_type == 4:
cts = [1, 2]
elif cons_type in [0, 1, 2]:
cts = [cons_type]
else:
raise Exception("Invalid constraint type")
#################################################################
# DCCP constraints
for ct in cts:
thresh = cvxpy.abs(sensitive_attrs_to_cov_thresh[attr][ct][1] - sensitive_attrs_to_cov_thresh[attr][ct][0])
constraints.append(cons_sum_dict[ct][1] <= cons_sum_dict[ct][0] + thresh)
constraints.append(cons_sum_dict[ct][1] >= cons_sum_dict[ct][0] - thresh)
#################################################################
return constraints
def get_fpr_fnr_sensitive_features(y_true, y_pred, x_control, sensitive_attrs, verbose=False):
# we will make some changes to x_control in this function, so make a copy in order to preserve the origianl referenced object
x_control_internal = deepcopy(x_control)
s_attr_to_fp_fn = {}
for s in sensitive_attrs:
s_attr_to_fp_fn[s] = {}
s_attr_vals = x_control_internal[s]
for s_val in sorted(list(set(s_attr_vals))):
s_attr_to_fp_fn[s][s_val] = {}
y_true_local = y_true[s_attr_vals == s_val]
y_pred_local = y_pred[s_attr_vals == s_val]
acc = float(sum(y_true_local == y_pred_local)) / len(y_true_local)
fp = sum(np.logical_and(y_true_local == -1,
y_pred_local == +1.0)) # something which is -ve but is misclassified as +ve
fn = sum(np.logical_and(y_true_local == +1.0,
y_pred_local == -1.0)) # something which is +ve but is misclassified as -ve
tp = sum(np.logical_and(y_true_local == +1.0,
y_pred_local == +1.0)) # something which is +ve AND is correctly classified as +ve
tn = sum(np.logical_and(y_true_local == -1,
y_pred_local == -1.0)) # something which is -ve AND is correctly classified as -ve
all_neg = sum(y_true_local == -1)
all_pos = sum(y_true_local == +1.0)
# fpr = float(fp) / (float(fp + tn))
# fnr = float(fn) / (float(fn + tp))
# tpr = float(tp) / (float(tp + fn) )
# tnr = float(tn) / (float(tn + fp))
fpr = 0.0 if float(fp + tn) == 0 else float(fp) / float(fp + tn)
fnr = 0.0 if float(fn + tp) == 0 else float(fn) / float(fn + tp)
tpr = 0.0 if float(tp + fn) == 0 else float(tp) / float(tp + fn)
tnr = 0.0 if float(tn + fp) == 0 else float(tn) / float(tn + fp)
s_attr_to_fp_fn[s][s_val]["fp"] = fp
s_attr_to_fp_fn[s][s_val]["fn"] = fn
s_attr_to_fp_fn[s][s_val]["fpr"] = fpr
s_attr_to_fp_fn[s][s_val]["fnr"] = fnr
s_attr_to_fp_fn[s][s_val]["acc"] = (tp + tn) / (tp + tn + fp + fn)
if verbose == True:
if isinstance(s_val, float): # print the int value of the sensitive attr val
s_val = int(s_val)
return s_attr_to_fp_fn
def get_sensitive_attr_constraint_fpr_fnr_cov(model, x_arr, y_arr_true, y_arr_dist_boundary, x_control_arr,
verbose=False):
"""
Here we compute the covariance between sensitive attr val and ONLY misclassification distances from boundary for False-positives
(-N_1 / N) sum_0(min(0, y.f(x))) + (N_0 / N) sum_1(min(0, y.f(x))) for all misclassifications
(-N_1 / N) sum_0(min(0, (1-y)/2 . y.f(x))) + (N_0 / N) sum_1(min(0, (1-y)/2. y.f(x))) for FPR
y_arr_true are the true class labels
y_arr_dist_boundary are the predicted distances from the decision boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert (x_arr.shape[0] == x_control_arr.shape[0])
if len(x_control_arr.shape) > 1: # make sure we just have one column in the array
assert (x_control_arr.shape[1] == 1)
if len(set(x_control_arr)) != 2: # non binary attr
raise Exception("Non binary attr, fix to handle non bin attrs")
arr = []
if model is None:
arr = y_arr_dist_boundary * y_arr_true # simply the output labels
else:
arr = np.dot(model,
x_arr.T) * y_arr_true # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr)
s_val_to_total = {ct: {} for ct in [0, 1, 2]}
s_val_to_avg = {ct: {} for ct in [0, 1, 2]}
cons_sum_dict = {ct: {} for ct in [0, 1, 2]} # sum of entities (females and males) in constraints are stored here
for v in set(x_control_arr):
s_val_to_total[0][v] = cvxpy.sum(x_control_arr == v)
s_val_to_total[1][v] = cvxpy.sum(np.logical_and(x_control_arr == v, y_arr_true == -1))
s_val_to_total[2][v] = cvxpy.sum(np.logical_and(x_control_arr == v, y_arr_true == +1))
for ct in [0, 1, 2]:
s_val_to_avg[ct][0] = s_val_to_total[ct][1] / (s_val_to_total[ct][0] + s_val_to_total[ct][1]) # N1 / N
s_val_to_avg[ct][1] = 1.0 - s_val_to_avg[ct][0] # N0 / N
for v in set(x_control_arr):
idx = x_control_arr == v
dist_bound_prod = arr[idx]
cons_sum_dict[0][v] = cvxpy.sum(np.minimum(0, dist_bound_prod)) * (s_val_to_avg[0][v] / len(x_arr))
cons_sum_dict[1][v] = cvxpy.sum(np.minimum(0, ((1 - y_arr_true[idx]) / 2) * dist_bound_prod)) * (
s_val_to_avg[1][v] / cvxpy.sum(y_arr_true == -1))
cons_sum_dict[2][v] = cvxpy.sum(np.minimum(0, ((1 + y_arr_true[idx]) / 2) * dist_bound_prod)) * (
s_val_to_avg[2][v] / cvxpy.sum(y_arr_true == + 1))
cons_type_to_name = {0: "ALL", 1: "FPR", 2: "FNR"}
for cons_type in [0, 1, 2]:
cov_type_name = cons_type_to_name[cons_type]
cov = cons_sum_dict[cons_type][1] - cons_sum_dict[cons_type][0]
return cons_sum_dict
| 18,367
| 49.185792
| 206
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/zafar_method/loss_funcs.py
|
import sys
import os
import numpy as np
import scipy.special
from collections import defaultdict
import traceback
from copy import deepcopy
def _hinge_loss(w, X, y):
yz = y * np.dot(X,w) # y * (x.w)
yz = np.maximum(np.zeros_like(yz), (1-yz)) # hinge function
return sum(yz)
def _logistic_loss(w, X, y, return_arr=None):
"""Computes the logistic loss.
This function is used from scikit-learn source code
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
if return_arr == True:
out = -(log_logistic(yz))
else:
out = -np.sum(log_logistic(yz))
return out
def _logistic_loss_l2_reg(w, X, y, lam=None):
if lam is None:
lam = 1.0
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
logistic_loss = -np.sum(log_logistic(yz))
l2_reg = (float(lam)/2.0) * np.sum([elem*elem for elem in w])
out = logistic_loss + l2_reg
return out
def log_logistic(X):
""" This function is used from scikit-learn source code. Source link below """
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
Source code at:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
if X.ndim > 1: raise Exception("Array of samples cannot be more than 1-D!")
out = np.empty_like(X) # same dimensions and data types
idx = X>0
out[idx] = -np.log(1.0 + np.exp(-X[idx]))
out[~idx] = X[~idx] - np.log(1.0 + np.exp(X[~idx]))
return out
| 2,268
| 22.884211
| 82
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/zafar_method/loss_funcs_after.py
|
import sys
import os
import numpy as np
import scipy.special
from collections import defaultdict
import traceback
from copy import deepcopy
def _hinge_loss(w, X, y):
yz = y * np.dot(X, w) # y * (x.w)
yz = np.maximum(np.zeros_like(yz), (1 - yz)) # hinge function
return sum(yz)
def _logistic_loss(w, X, y, return_arr=None):
"""Computes the logistic loss.
This function is used from scikit-learn source code
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
yz = y * np.dot(X, w)
# Logistic loss is the negative of the log of the logistic function.
if return_arr == True:
out = -(log_logistic(yz))
else:
out = -np.sum(log_logistic(yz))
return out
def _logistic_loss_l2_reg(w, X, y, lam=None):
if lam is None:
lam = 1.0
yz = y * np.dot(X, w)
# Logistic loss is the negative of the log of the logistic function.
logistic_loss = -np.sum(log_logistic(yz))
l2_reg = (float(lam) / 2.0) * np.sum([elem * elem for elem in w])
out = logistic_loss + l2_reg
return out
def log_logistic(X):
""" This function is used from scikit-learn source code. Source link below """
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
Source code at:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
if X.ndim > 1: raise Exception("Array of samples cannot be more than 1-D!")
out = np.empty_like(X) # same dimensions and data types
idx = X > 0
idx = X > 0
out[idx] = -np.log(1.0 + np.exp(-X[idx]))
out[~idx] = X[~idx] - np.log(1.0 + np.exp(X[~idx]))
return out
| 2,356
| 27.743902
| 82
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/zafar_method/utils.py
|
import numpy as np
from random import seed, shuffle
from zafar_method import loss_funcs as lf # our implementation of loss funcs
from scipy.optimize import minimize # for loss func minimization
from multiprocessing import Pool, Process, Queue
from collections import defaultdict
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
import sys
def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma=None):
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values)
apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values)
sep_constraint: apply the fine grained accuracy constraint
for details, see Section 3.3 of arxiv.org/abs/1507.05259v3
For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py"
Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time
sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control
sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints)
gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint
----
Outputs:
w: the learned weight vector for the classifier
"""
assert((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time
max_iter = 100000 # maximum number of iterations for the minimization algorithm
if apply_fairness_constraints == 0:
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh)
if apply_accuracy_constraint == 0: #its not the reverse problem, just train w with cross cov constraints
f_args=(x, y)
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = f_args,
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = constraints
)
else:
# train on just the loss function
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = (x, y),
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = []
)
old_w = deepcopy(w.x)
def constraint_gamma_all(w, x, y, initial_loss_arr):
gamma_arr = np.ones_like(y) * gamma # set gamma for everyone
new_loss = loss_function(w, x, y)
old_loss = sum(initial_loss_arr)
return ((1.0 + gamma) * old_loss) - new_loss
def constraint_protected_people(w,x,y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class
return np.dot(w, x.T) # if this is positive, the constraint is satisfied
def constraint_unprotected_people(w,ind,old_loss,x,y):
new_loss = loss_function(w, np.array([x]), np.array(y))
return ((1.0 + gamma) * old_loss) - new_loss
constraints = []
predicted_labels = np.sign(np.dot(w.x, x.T))
unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True)
if sep_constraint == True: # separate gemma for different people
for i in range(0, len(predicted_labels)):
if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs
c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args':(x[i], y[i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier
constraints.append(c)
else:
c = ({'type': 'ineq', 'fun': constraint_unprotected_people, 'args':(i, unconstrained_loss_arr[i], x[i], y[i])})
constraints.append(c)
else: # same gamma for everyone
c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args':(x,y,unconstrained_loss_arr)})
constraints.append(c)
def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr):
cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T)
return float(abs(sum(cross_cov))) / float(x_in.shape[0])
w = minimize(fun = cross_cov_abs_optm_func,
x0 = old_w,
args = (x, x_control[sensitive_attrs[0]]),
method = 'SLSQP',
options = {"maxiter":100000},
constraints = constraints
)
try:
assert(w.success == True)
except:
print("Optimization problem did not converge.. Check the solution returned by the optimizer.")
print("Returned solution is:")
print(w)
return w.x
def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh_arr, gamma):
"""
Computes the cross validation error for the classifier subject to various fairness constraints
This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info.
Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds).
"""
train_folds = []
test_folds = []
n_samples = len(y_all)
train_fold_size = 0.7 # the rest of 0.3 is for testing
# split the data into folds for cross-validation
for i in range(0,num_folds):
perm = range(0,n_samples) # shuffle the data before creating each fold
shuffle(perm)
x_all_perm = x_all[perm]
y_all_perm = y_all[perm]
x_control_all_perm = {}
for k in x_control_all.keys():
x_control_all_perm[k] = np.array(x_control_all[k])[perm]
x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(x_all_perm, y_all_perm, x_control_all_perm, train_fold_size)
train_folds.append([x_all_train, y_all_train, x_control_all_train])
test_folds.append([x_all_test, y_all_test, x_control_all_test])
def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh):
x_train, y_train, x_control_train = train_data
x_test, y_test, x_control_test = test_data
w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train, x_test, y_test, None, None)
distances_boundary_test = (np.dot(x_test, w)).tolist()
all_class_labels_assigned_test = np.sign(distances_boundary_test)
correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test, sensitive_attrs)
cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test, sensitive_attrs)
distances_boundary_train = (np.dot(x_train, w)).tolist()
all_class_labels_assigned_train = np.sign(distances_boundary_train)
correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train, sensitive_attrs)
cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train, sensitive_attrs)
output_folds.put([fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train])
return
output_folds = Queue()
processes = [Process(target=train_test_single_fold, args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x])) for x in range(num_folds)]
# Run processes
for p in processes:
p.start()
# Get the reuslts
results = [output_folds.get() for p in processes]
for p in processes:
p.join()
test_acc_arr = []
train_acc_arr = []
correlation_dict_test_arr = []
correlation_dict_train_arr = []
cov_dict_test_arr = []
cov_dict_train_arr = []
results = sorted(results, key = lambda x : x[0]) # sort w.r.t fold num
for res in results:
fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res
test_acc_arr.append(test_score)
train_acc_arr.append(train_score)
correlation_dict_test_arr.append(correlation_dict_test)
correlation_dict_train_arr.append(correlation_dict_train)
cov_dict_test_arr.append(cov_dict_test)
cov_dict_train_arr.append(cov_dict_train)
return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr
def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name):
correlation_dict = get_avg_correlation_dict(correlation_dict_arr)
non_prot_pos = correlation_dict[s_attr_name][1][1]
prot_pos = correlation_dict[s_attr_name][0][1]
p_rule = (prot_pos / non_prot_pos) * 100.0
print("Accuracy: %0.2f" % (np.mean(acc_arr)))
print("Protected/non-protected in +ve class: %0.0f%% / %0.0f%%" % (prot_pos, non_prot_pos))
print("P-rule achieved: %0.0f%%" % (p_rule))
print("Covariance between sensitive feature and decision from distance boundary : %0.3f" % (np.mean([v[s_attr_name] for v in cov_dict_arr])))
return p_rule
def compute_p_rule(x_control, class_labels):
""" Compute the p-rule based on Doctrine of disparate impact """
non_prot_all = sum(x_control == 1.0) # non-protected group
prot_all = sum(x_control == 0.0) # protected group
non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0
print("Total data points: %d" % (len(x_control)))
print("# non-protected examples: %d" % (non_prot_all))
print("# protected examples: %d" % (prot_all))
print("Non-protected in positive class: %d (%0.0f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all))
print("Protected in positive class: %d (%0.0f%%)" % (prot_pos, prot_pos * 100.0 / prot_all))
print("P-rule is: %0.0f%%" % ( p_rule ))
return p_rule
def add_intercept(x):
""" Add intercept to the data before linear classification """
m,n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis = 1)
def check_binary(arr):
"give an array of values, see if the values are only 0 and 1"
s = sorted(set(arr))
if s[0] == 0 and s[1] == 1:
return True
else:
return False
def get_one_hot_encoding(in_arr):
"""
input: 1-D arr with int vals -- if not int vals, will raise an error
output: m (ndarray): one-hot encoded matrix
d (dict): also returns a dictionary original_val -> column in encoded matrix
"""
for k in in_arr:
if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64:
print(str(type(k)))
print("************* ERROR: Input arr does not have integer types")
return None
in_arr = np.array(in_arr, dtype=int)
assert(len(in_arr.shape)==1) # no column, means it was a 1-D arr
attr_vals_uniq_sorted = sorted(list(set(in_arr)))
num_uniq_vals = len(attr_vals_uniq_sorted)
if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1):
return in_arr, None
index_dict = {} # value to the column number
for i in range(0,len(attr_vals_uniq_sorted)):
val = attr_vals_uniq_sorted[i]
index_dict[val] = i
out_arr = []
for i in range(0,len(in_arr)):
tup = np.zeros(num_uniq_vals)
val = in_arr[i]
ind = index_dict[val]
tup[ind] = 1 # set that value of tuple to 1
out_arr.append(tup)
return np.array(out_arr), index_dict
def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted):
"""
returns the train/test accuracy of the model
we either pass the model (w)
else we pass y_predicted
"""
if model is not None and y_test_predicted is not None:
print("Either the model (w) or the predicted labels should be None")
raise Exception("Either the model (w) or the predicted labels should be None")
if model is not None:
y_test_predicted = np.sign(np.dot(x_test, model))
y_train_predicted = np.sign(np.dot(x_train, model))
def get_accuracy(y, Y_predicted):
correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy, sum(correct_answers)
train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted)
test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted)
return train_score, test_score, correct_answers_train, correct_answers_test
def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose):
"""
The covariance is computed b/w the sensitive attr val and the distance from the boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert(x_arr.shape[0] == x_control.shape[0])
if len(x_control.shape) > 1: # make sure we just have one column in the array
assert(x_control.shape[1] == 1)
arr = []
if model is None:
arr = y_arr_dist_boundary # simply the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr, dtype=np.float64)
cov = np.dot(x_control - np.mean(x_control), arr ) / float(len(x_control))
ans = thresh - abs(cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
# ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
if verbose is True:
print("Covariance is", cov)
print("Diff is:", ans)
print
return ans
def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs):
"""
reutrns the covariance between sensitive features and distance from decision boundary
"""
arr = []
if model is None:
arr = y_arr_dist_boundary # simplt the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
sensitive_attrs_to_cov_original = {}
for attr in sensitive_attrs:
attr_arr = x_control[attr]
bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals
if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
thresh = 0
if bin_attr:
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False)
sensitive_attrs_to_cov_original[attr] = cov
else: # sensitive feature has more than 2 categorical values
cov_arr = []
sensitive_attrs_to_cov_original[attr] = {}
for attr_val, ind in index_dict.items():
t = attr_arr_transformed[:,ind]
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False)
sensitive_attrs_to_cov_original[attr][attr_val] = cov
cov_arr.append(abs(cov))
cov = max(cov_arr)
return sensitive_attrs_to_cov_original
def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs):
"""
returns the fraction in positive class for sensitive feature values
"""
if model is not None:
y_predicted = np.sign(np.dot(x_test, model))
y_predicted = np.array(y_predicted)
out_dict = {}
for attr in sensitive_attrs:
attr_val = []
for v in x_control_test[attr]: attr_val.append(v)
assert(len(attr_val) == len(y_predicted))
total_per_val = defaultdict(int)
attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int))
for i in range(0, len(y_predicted)):
val = attr_val[i]
label = y_predicted[i]
# val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names
total_per_val[val] += 1
attr_to_class_labels_dict[val][label] += 1
class_labels = set(y_predicted.tolist())
local_dict_1 = {}
for k1,v1 in attr_to_class_labels_dict.items():
total_this_val = total_per_val[k1]
local_dict_2 = {}
for k2 in class_labels: # the order should be the same for printing
v2 = v1[k2]
f = float(v2) * 100.0 / float(total_this_val)
local_dict_2[k2] = f
local_dict_1[k1] = local_dict_2
out_dict[attr] = local_dict_1
return out_dict
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh):
"""
get the list of constraints to be fed to the minimizer
"""
constraints = []
for attr in sensitive_attrs:
attr_arr = x_control_train[attr]
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
if index_dict is None: # binary attribute
thresh = sensitive_attrs_to_cov_thresh[attr]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, attr_arr_transformed,thresh, False)})
constraints.append(c)
else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately
for attr_val, ind in index_dict.items():
attr_name = attr_val
thresh = sensitive_attrs_to_cov_thresh[attr][attr_name]
t = attr_arr_transformed[:,ind]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, t ,thresh, False)})
constraints.append(c)
return constraints
def split_into_train_test(x_all, y_all, x_control_all, train_fold_size):
split_point = int(round(float(x_all.shape[0]) * train_fold_size))
x_all_train = x_all[:split_point]
x_all_test = x_all[split_point:]
y_all_train = y_all[:split_point]
y_all_test = y_all[split_point:]
x_control_all_train = {}
x_control_all_test = {}
for k in x_control_all.keys():
x_control_all_train[k] = x_control_all[k][:split_point]
x_control_all_test[k] = x_control_all[k][split_point:]
return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test
def get_avg_correlation_dict(correlation_dict_arr):
# make the structure for the correlation dict
correlation_dict_avg = {}
# print correlation_dict_arr
for k,v in correlation_dict_arr[0].items():
correlation_dict_avg[k] = {}
for feature_val, feature_dict in v.items():
correlation_dict_avg[k][feature_val] = {}
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = []
# populate the correlation dict
for correlation_dict in correlation_dict_arr:
for k,v in correlation_dict.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label].append(frac_class)
# now take the averages
for k,v in correlation_dict_avg.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class_arr in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr)
return correlation_dict_avg
def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs):
# very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness
it = 0.05
cov_range = np.arange(1.0, 0.0-it, -it).tolist()
if apply_accuracy_constraint == True:
if sep_constraint == False:
it = 0.1
cov_range = np.arange(0.0, 1.0 + it, it).tolist()
if sep_constraint == True:
cov_range = [0,1,5,10,20,50,100,500,1000]
positive_class_label = 1 # positive class is +1
train_acc = []
test_acc = []
positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive
# first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint, sensitive_attrs, [{} for i in range(0,num_folds)], 0)
for c in cov_range:
print("LOG: testing for multiplicative factor: %0.2f" % c)
sensitive_attrs_to_cov_original_arr_multiplied = []
for sensitive_attrs_to_cov_original in cov_dict_train_arr:
sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original)
for k in sensitive_attrs_to_cov_thresh.keys():
v = sensitive_attrs_to_cov_thresh[k]
if type(v) == type({}):
for k1 in v.keys():
v[k1] = v[k1] * c
else:
sensitive_attrs_to_cov_thresh[k] = v * c
sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh)
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied, c)
test_acc.append(np.mean(test_acc_arr))
correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr)
correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr)
# just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one
s = sensitive_attrs[0]
for k,v in correlation_dict_test[s].items():
if v.get(positive_class_label) is None:
positive_per_category[k].append(0.0)
else:
positive_per_category[k].append(v[positive_class_label])
positive_per_category = dict(positive_per_category)
p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0
ax = plt.subplot(2,1,1)
plt.plot(cov_range, positive_per_category[0], "-o" , color="green", label = "Protected")
plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label = "Non-protected")
ax.set_xlim([min(cov_range), max(cov_range)])
plt.xlabel('Multiplicative loss factor')
plt.ylabel('Perc. in positive class')
if apply_accuracy_constraint == False:
plt.gca().invert_xaxis()
plt.xlabel('Multiplicative covariance factor (c)')
ax.legend()
ax = plt.subplot(2,1,2)
plt.scatter(p_rule_arr, test_acc, color="red")
ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)])
plt.xlabel('P% rule')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.show()
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1,y2
| 27,182
| 41.606583
| 357
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/zafar_method/funcs_disp_mist_after.py
|
from __future__ import division
import os, sys
import traceback
import numpy as np
from random import seed, shuffle
from collections import defaultdict
from copy import deepcopy
import cvxpy
import dccp
from dccp.problem import is_dccp
import utils as ut
SEED = 1122334455
seed(SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(SEED)
def train_model_disp_mist(x, y, x_control, loss_function, EPS, cons_params=None):
# cons_type, sensitive_attrs_to_cov_thresh, take_initial_sol, gamma, tau, mu, EPS, cons_type
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "disparate_mistreatment/synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
EPS: stopping criteria for the convex solver. check the CVXPY documentation for details. default for CVXPY is 1e-6
cons_params: is None when we do not want to apply any constraints
otherwise: cons_params is a dict with keys as follows:
- cons_type:
- 0 for all misclassifications
- 1 for FPR
- 2 for FNR
- 4 for both FPR and FNR
- tau: DCCP parameter, controls how much weight to put on the constraints, if the constraints are not satisfied, then increase tau -- default is DCCP val 0.005
- mu: DCCP parameter, controls the multiplicative factor by which the tau increases in each DCCP iteration -- default is the DCCP val 1.2
- take_initial_sol: whether the starting point for DCCP should be the solution for the original (unconstrained) classifier -- default value is True
- sensitive_attrs_to_cov_thresh: covariance threshold for each cons_type, eg, key 1 contains the FPR covariance
----
Outputs:
w: the learned weight vector for the classifier
"""
max_iters = 100 # for the convex program
max_iter_dccp = 50 # for the dccp algo
num_points, num_features = x.shape
w = cvxpy.Variable(num_features) # this is the weight vector
# initialize a random value of w
np.random.seed(112233)
w.value = np.random.rand(x.shape[1])
if cons_params is None: # just train a simple classifier, no fairness constraints
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, cons_params["sensitive_attrs_to_cov_thresh"],
cons_params["cons_type"], w)
if loss_function == "logreg":
# constructing the logistic loss problem
loss = cvxpy.sum(
cvxpy.logistic(cvxpy.multiply(-y, x * w))) / num_points # we are converting y to a diagonal matrix for consistent
# sometimes, its a good idea to give a starting point to the constrained solver
# this starting point for us is the solution to the unconstrained optimization problem
# another option of starting point could be any feasible solution
if cons_params is not None:
if cons_params.get("take_initial_sol") is None: # true by default
take_initial_sol = True
elif cons_params["take_initial_sol"] == False:
take_initial_sol = False
if take_initial_sol == True: # get the initial solution
p = cvxpy.Problem(cvxpy.Minimize(loss), [])
p.solve()
# construct the cvxpy problem
prob = cvxpy.Problem(cvxpy.Minimize(loss), constraints)
try:
tau, mu = 0.005, 1.2 # default dccp parameters, need to be varied per dataset
if cons_params is not None: # in case we passed these parameters as a part of dccp constraints
if cons_params.get("tau") is not None: tau = cons_params["tau"]
if cons_params.get("mu") is not None: mu = cons_params["mu"]
prob.solve(method='dccp', tau=tau, mu=mu, tau_max=1e10,
solver=cvxpy.ECOS, verbose=False,
feastol=EPS, abstol=EPS, reltol=EPS, feastol_inacc=EPS, abstol_inacc=EPS, reltol_inacc=EPS,
max_iters=max_iters, max_iter=max_iter_dccp)
assert (prob.status == "Converged" or prob.status == "optimal")
# print "Optimization done, problem status:", prob.status
except:
traceback.print_exc()
sys.stdout.flush()
sys.exit(1)
# # check that the fairness constraint is satisfied
# for f_c in constraints:
# assert (f_c.value == True) # can comment this out if the solver fails too often, but make sure that the constraints are satisfied empirically. alternatively, consider increasing tau parameter
# pass
w = np.array(w.value).flatten() # flatten converts it to a 1d array
return w
def get_clf_stats(w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs):
assert (len(sensitive_attrs) == 1) # ensure that we have just one sensitive attribute
s_attr = "s1" # for now, lets compute the accuracy for just one sensitive attr
# compute distance from boundary
distances_boundary_train = get_distance_boundary(w, x_train, x_control_train[s_attr])
distances_boundary_test = get_distance_boundary(w, x_test, x_control_test[s_attr])
# compute the class labels
all_class_labels_assigned_train = np.sign(distances_boundary_train)
all_class_labels_assigned_test = np.sign(distances_boundary_test)
train_score, test_score, correct_answers_train, correct_answers_test = ut.check_accuracy(None, x_train, y_train,
x_test, y_test,
all_class_labels_assigned_train,
all_class_labels_assigned_test)
cov_all_train = {}
cov_all_test = {}
for s_attr in sensitive_attrs:
print_stats = False # we arent printing the stats for the train set to avoid clutter
# uncomment these lines to print stats for the train fold
# print "*** Train ***"
# print "Accuracy: %0.3f" % (train_score)
# print_stats = True
s_attr_to_fp_fn_train = get_fpr_fnr_sensitive_features(y_train, all_class_labels_assigned_train,
x_control_train, sensitive_attrs, print_stats)
cov_all_train[s_attr] = get_sensitive_attr_constraint_fpr_fnr_cov(None, x_train, y_train,
distances_boundary_train,
x_control_train[s_attr])
print_stats = True # only print stats for the test fold
s_attr_to_fp_fn_test = get_fpr_fnr_sensitive_features(y_test, all_class_labels_assigned_test, x_control_test,
sensitive_attrs, print_stats)
cov_all_test[s_attr] = get_sensitive_attr_constraint_fpr_fnr_cov(None, x_test, y_test, distances_boundary_test,
x_control_test[s_attr])
return train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test, \
all_class_labels_assigned_train, all_class_labels_assigned_test
def get_distance_boundary(w, x, s_attr_arr):
"""
if we have boundaries per group, then use those separate boundaries for each sensitive group
else, use the same weight vector for everything
"""
distances_boundary = np.zeros(x.shape[0])
if isinstance(w, dict): # if we have separate weight vectors per group
for k in w.keys(): # for each w corresponding to each sensitive group
d = np.dot(x, w[k])
distances_boundary[s_attr_arr == k] = d[
s_attr_arr == k] # set this distance only for people with this sensitive attr val
else: # we just learn one w for everyone else
distances_boundary = np.dot(x, w)
return distances_boundary
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs_to_cov_thresh, cons_type, w):
"""
get the list of constraints to be fed to the minimizer
cons_type == 0: means the whole combined misclassification constraint (without FNR or FPR)
cons_type == 1: FPR constraint
cons_type == 2: FNR constraint
cons_type == 4: both FPR as well as FNR constraints
sensitive_attrs_to_cov_thresh: is a dict like {s: {cov_type: val}}
s is the sensitive attr
cov_type is the covariance type. contains the covariance for all misclassifications, FPR and for FNR etc
"""
constraints = []
for attr in ["s1"]:
attr_arr = x_control_train[attr]
# attr_arr = x_control_train[attr]
# print(attr_arr)
attr_arr_transformed = attr_arr
# if index_dict is None: # binary attribute, in this case, the attr_arr_transformed is the same as the attr_arr
s_val_to_total = {ct: {} for ct in [0, 1, 2]} # constrain type -> sens_attr_val -> total number
s_val_to_avg = {ct: {} for ct in [0, 1, 2]}
cons_sum_dict = {ct: {} for ct in
[0, 1, 2]} # sum of entities (females and males) in constraints are stored here
for v in set(attr_arr):
s_val_to_total[0][v] = sum(x_control_train[attr] == v)
s_val_to_total[1][v] = sum(np.logical_and(x_control_train[attr] == v,
y_train == -1)) # FPR constraint so we only consider the ground truth negative dataset for computing the covariance
s_val_to_total[2][v] = sum(np.logical_and(x_control_train[attr] == v, y_train == +1))
for ct in [0, 1, 2]:
s_val_to_avg[ct][0] = s_val_to_total[ct][1] / float(s_val_to_total[ct][0] + s_val_to_total[ct][
1]) # N1/N in our formulation, differs from one constraint type to another
s_val_to_avg[ct][1] = 1.0 - s_val_to_avg[ct][0] # N0/N
for v in set(attr_arr):
idx = x_control_train[attr] == v
#################################################################
# #DCCP constraints
dist_bound_prod = cvxpy.multiply(y_train[idx], x_train[idx] * w) # y.f(x)
cons_sum_dict[0][v] = cvxpy.sum(cvxpy.minimum(0, dist_bound_prod)) * (
s_val_to_avg[0][v] / len(x_train)) # avg misclassification distance from boundary
cons_sum_dict[1][v] = cvxpy.sum(
cvxpy.minimum(0, cvxpy.multiply((1 - y_train[idx]) / 2.0, dist_bound_prod))) * (
s_val_to_avg[1][v] / sum(
y_train == -1)) # avg false positive distance from boundary (only operates on the ground truth neg dataset)
cons_sum_dict[2][v] = cvxpy.sum(
cvxpy.minimum(0, cvxpy.multiply((1 + y_train[idx]) / 2.0, dist_bound_prod))) * (
s_val_to_avg[2][v] / sum(
y_train == +1)) # avg false negative distance from boundary
#################################################################
if cons_type == 4:
cts = [1, 2]
elif cons_type in [0, 1, 2]:
cts = [cons_type]
else:
raise Exception("Invalid constraint type")
#################################################################
# DCCP constraints
for ct in cts:
print(ct)
thresh = abs(sensitive_attrs_to_cov_thresh[attr][ct][1] - sensitive_attrs_to_cov_thresh[attr][ct][0])
constraints.append(cons_sum_dict[ct][1] <= cons_sum_dict[ct][0] + thresh)
constraints.append(cons_sum_dict[ct][1] >= cons_sum_dict[ct][0] - thresh)
#################################################################
return constraints
def get_fpr_fnr_sensitive_features(y_true, y_pred, x_control, sensitive_attrs, verbose=False):
# we will make some changes to x_control in this function, so make a copy in order to preserve the origianl referenced object
x_control_internal = deepcopy(x_control)
s_attr_to_fp_fn = {}
for s in sensitive_attrs:
s_attr_to_fp_fn[s] = {}
s_attr_vals = x_control_internal[s]
for s_val in sorted(list(set(s_attr_vals))):
s_attr_to_fp_fn[s][s_val] = {}
y_true_local = y_true[s_attr_vals == s_val]
y_pred_local = y_pred[s_attr_vals == s_val]
acc = float(sum(y_true_local == y_pred_local)) / len(y_true_local)
fp = sum(np.logical_and(y_true_local == -1.0,
y_pred_local == +1.0)) # something which is -ve but is misclassified as +ve
fn = sum(np.logical_and(y_true_local == +1.0,
y_pred_local == -1.0)) # something which is +ve but is misclassified as -ve
tp = sum(np.logical_and(y_true_local == +1.0,
y_pred_local == +1.0)) # something which is +ve AND is correctly classified as +ve
tn = sum(np.logical_and(y_true_local == -1.0,
y_pred_local == -1.0)) # something which is -ve AND is correctly classified as -ve
all_neg = sum(y_true_local == -1.0)
all_pos = sum(y_true_local == +1.0)
# fpr = float(fp) / float(fp + tn)
# fnr = float(fn) / float(fn + tp)
# tpr = float(tp) / float(tp + fn)
# tnr = float(tn) / float(tn + fp)
fpr = 0.0 if float(fp + tn) == 0 else float(fp) / float(fp + tn)
fnr = 0.0 if float(fn + tp) == 0 else float(fn) / float(fn + tp)
tpr = 0.0 if float(tp + fn) == 0 else float(tp) / float(tp + fn)
tnr = 0.0 if float(tn + fp) == 0 else float(tn) / float(tn + fp)
s_attr_to_fp_fn[s][s_val]["fp"] = fp
s_attr_to_fp_fn[s][s_val]["fn"] = fn
s_attr_to_fp_fn[s][s_val]["fpr"] = fpr
s_attr_to_fp_fn[s][s_val]["fnr"] = fnr
s_attr_to_fp_fn[s][s_val]["acc"] = (tp + tn) / (tp + tn + fp + fn)
if verbose == True:
if isinstance(s_val, float): # print the int value of the sensitive attr val
s_val = int(s_val)
return s_attr_to_fp_fn
def get_sensitive_attr_constraint_fpr_fnr_cov(model, x_arr, y_arr_true, y_arr_dist_boundary, x_control_arr,
verbose=False):
"""
Here we compute the covariance between sensitive attr val and ONLY misclassification distances from boundary for False-positives
(-N_1 / N) sum_0(min(0, y.f(x))) + (N_0 / N) sum_1(min(0, y.f(x))) for all misclassifications
(-N_1 / N) sum_0(min(0, (1-y)/2 . y.f(x))) + (N_0 / N) sum_1(min(0, (1-y)/2. y.f(x))) for FPR
y_arr_true are the true class labels
y_arr_dist_boundary are the predicted distances from the decision boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert (x_arr.shape[0] == x_control_arr.shape[0])
if len(x_control_arr.shape) > 1: # make sure we just have one column in the array
assert (x_control_arr.shape[1] == 1)
if len(set(x_control_arr)) != 2: # non binary attr
raise Exception("Non binary attr, fix to handle non bin attrs")
arr = []
if model is None:
arr = y_arr_dist_boundary * y_arr_true # simply the output labels
else:
arr = np.dot(model,
x_arr.T) * y_arr_true # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr)
s_val_to_total = {ct: {} for ct in [0, 1, 2]}
s_val_to_avg = {ct: {} for ct in [0, 1, 2]}
cons_sum_dict = {ct: {} for ct in [0, 1, 2]} # sum of entities (females and males) in constraints are stored here
for v in set(x_control_arr):
s_val_to_total[0][v] = sum(x_control_arr == v)
s_val_to_total[1][v] = sum(np.logical_and(x_control_arr == v, y_arr_true == -1))
s_val_to_total[2][v] = sum(np.logical_and(x_control_arr == v, y_arr_true == +1))
for ct in [0, 1, 2]:
s_val_to_avg[ct][0] = s_val_to_total[ct][1] / float(s_val_to_total[ct][0] + s_val_to_total[ct][1]) # N1 / N
s_val_to_avg[ct][1] = 1.0 - s_val_to_avg[ct][0] # N0 / N
for v in set(x_control_arr):
idx = x_control_arr == v
dist_bound_prod = arr[idx]
cons_sum_dict[0][v] = sum(np.minimum(0, dist_bound_prod)) * (s_val_to_avg[0][v] / len(x_arr))
cons_sum_dict[1][v] = sum(np.minimum(0, ((1 - y_arr_true[idx]) / 2) * dist_bound_prod)) * (
s_val_to_avg[1][v] / sum(y_arr_true == -1))
cons_sum_dict[2][v] = sum(np.minimum(0, ((1 + y_arr_true[idx]) / 2) * dist_bound_prod)) * (
s_val_to_avg[2][v] / sum(y_arr_true == +1))
cons_type_to_name = {0: "ALL", 1: "FPR", 2: "FNR"}
for cons_type in [0, 1, 2]:
cov_type_name = cons_type_to_name[cons_type]
cov = cons_sum_dict[cons_type][1] - cons_sum_dict[cons_type][0]
return cons_sum_dict
| 18,388
| 49.798343
| 202
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/zafar_method/utils_after.py
|
import numpy as np
from random import seed, shuffle
import loss_funcs as lf # our implementation of loss funcs
from scipy.optimize import minimize # for loss func minimization
from multiprocessing import Pool, Process, Queue
from collections import defaultdict
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
import sys
SEED = 1122334455
seed(SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(SEED)
def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint,
sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma):
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values)
apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values)
sep_constraint: apply the fine grained accuracy constraint
for details, see Section 3.3 of arxiv.org/abs/1507.05259v3
For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py"
Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time
sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control
sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints)
gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint
----
Outputs:
w: the learned weight vector for the classifier
"""
assert ((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time
max_iter = 100000 # maximum number of iterations for the minimization algorithm
if apply_fairness_constraints == 0:
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh)
if apply_accuracy_constraint == 0: # its not the reverse problem, just train w with cross cov constraints
f_args = (x, y)
w = minimize(fun=loss_function,
x0=np.random.rand(x.shape[1], ),
args=f_args,
method='SLSQP',
options={"maxiter": max_iter},
constraints=constraints
)
else:
# train on just the loss function
w = minimize(fun=loss_function,
x0=np.random.rand(x.shape[1], ),
args=(x, y),
method='SLSQP',
options={"maxiter": max_iter},
constraints=[]
)
old_w = deepcopy(w.x)
def constraint_gamma_all(w, x, y, initial_loss_arr):
gamma_arr = np.ones_like(y) * gamma # set gamma for everyone
new_loss = loss_function(w, x, y)
old_loss = sum(initial_loss_arr)
return ((1.0 + gamma) * old_loss) - new_loss
def constraint_protected_people(w, x,
y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class
return np.dot(w, x.T) # if this is positive, the constraint is satisfied
def constraint_unprotected_people(w, ind, old_loss, x, y):
new_loss = loss_function(w, np.array([x]), np.array(y))
return ((1.0 + gamma) * old_loss) - new_loss
constraints = []
predicted_labels = np.sign(np.dot(w.x, x.T))
unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True)
if sep_constraint == True: # separate gemma for different people
for i in range(0, len(predicted_labels)):
if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][
i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs
c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args': (x[i], y[
i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier
constraints.append(c)
else:
c = ({'type': 'ineq', 'fun': constraint_unprotected_people,
'args': (i, unconstrained_loss_arr[i], x[i], y[i])})
constraints.append(c)
else: # same gamma for everyone
c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args': (x, y, unconstrained_loss_arr)})
constraints.append(c)
def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr):
cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T)
return float(abs(sum(cross_cov))) / float(x_in.shape[0])
w = minimize(fun=cross_cov_abs_optm_func,
x0=old_w,
args=(x, x_control[sensitive_attrs[0]]),
method='SLSQP',
options={"maxiter": 100000},
constraints=constraints
)
return w.x
def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints,
apply_accuracy_constraint, sep_constraint, sensitive_attrs,
sensitive_attrs_to_cov_thresh_arr, gamma):
"""
Computes the cross validation error for the classifier subject to various fairness constraints
This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info.
Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds).
"""
train_folds = []
test_folds = []
n_samples = len(y_all)
train_fold_size = 0.7 # the rest of 0.3 is for testing
# split the data into folds for cross-validation
for i in range(0, num_folds):
perm = range(0, n_samples) # shuffle the data before creating each fold
shuffle(perm)
x_all_perm = x_all[perm]
y_all_perm = y_all[perm]
x_control_all_perm = {}
for k in x_control_all.keys():
x_control_all_perm[k] = np.array(x_control_all[k])[perm]
x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(
x_all_perm, y_all_perm, x_control_all_perm, train_fold_size)
train_folds.append([x_all_train, y_all_train, x_control_all_train])
test_folds.append([x_all_test, y_all_test, x_control_all_test])
def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh):
x_train, y_train, x_control_train = train_data
x_test, y_test, x_control_test = test_data
w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints,
apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh,
gamma)
train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train,
x_test, y_test, None,
None)
distances_boundary_test = (np.dot(x_test, w)).tolist()
all_class_labels_assigned_test = np.sign(distances_boundary_test)
correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test,
sensitive_attrs)
cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test,
sensitive_attrs)
distances_boundary_train = (np.dot(x_train, w)).tolist()
all_class_labels_assigned_train = np.sign(distances_boundary_train)
correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train,
sensitive_attrs)
cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train,
sensitive_attrs)
output_folds.put(
[fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test,
cov_dict_train])
return
output_folds = Queue()
processes = [Process(target=train_test_single_fold,
args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x]))
for x in range(num_folds)]
# Run processes
for p in processes:
p.start()
# Get the reuslts
results = [output_folds.get() for p in processes]
for p in processes:
p.join()
test_acc_arr = []
train_acc_arr = []
correlation_dict_test_arr = []
correlation_dict_train_arr = []
cov_dict_test_arr = []
cov_dict_train_arr = []
results = sorted(results, key=lambda x: x[0]) # sort w.r.t fold num
for res in results:
fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res
test_acc_arr.append(test_score)
train_acc_arr.append(train_score)
correlation_dict_test_arr.append(correlation_dict_test)
correlation_dict_train_arr.append(correlation_dict_train)
cov_dict_test_arr.append(cov_dict_test)
cov_dict_train_arr.append(cov_dict_train)
return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr
def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name):
correlation_dict = get_avg_correlation_dict(correlation_dict_arr)
non_prot_pos = correlation_dict[s_attr_name][1][1]
prot_pos = correlation_dict[s_attr_name][0][1]
p_rule = (prot_pos / non_prot_pos) * 100.0
return p_rule
def compute_p_rule(x_control, class_labels):
""" Compute the p-rule based on Doctrine of disparate impact """
non_prot_all = sum(x_control == 1.0) # non-protected group
prot_all = sum(x_control == 0.0) # protected group
non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0
return p_rule
def add_intercept(x):
""" Add intercept to the data before linear classification """
m, n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis=1)
def check_binary(arr):
"give an array of values, see if the values are only 0 and 1"
s = sorted(set(arr))
if s[0] == 0 and s[1] == 1:
return True
else:
return False
def get_one_hot_encoding(in_arr):
"""
input: 1-D arr with int vals -- if not int vals, will raise an error
output: m (ndarray): one-hot encoded matrix
d (dict): also returns a dictionary original_val -> column in encoded matrix
"""
for k in in_arr:
if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64:
return None
in_arr = np.array(in_arr, dtype=int)
assert (len(in_arr.shape) == 1) # no column, means it was a 1-D arr
attr_vals_uniq_sorted = sorted(list(set(in_arr)))
num_uniq_vals = len(attr_vals_uniq_sorted)
if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1):
return in_arr, None
index_dict = {} # value to the column number
for i in range(0, len(attr_vals_uniq_sorted)):
val = attr_vals_uniq_sorted[i]
index_dict[val] = i
out_arr = []
for i in range(0, len(in_arr)):
tup = np.zeros(num_uniq_vals)
val = in_arr[i]
ind = index_dict[val]
tup[ind] = 1 # set that value of tuple to 1
out_arr.append(tup)
return np.array(out_arr), index_dict
def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted):
"""
returns the train/test accuracy of the model
we either pass the model (w)
else we pass y_predicted
"""
if model is not None and y_test_predicted is not None:
raise Exception("Either the model (w) or the predicted labels should be None")
if model is not None:
y_test_predicted = np.sign(np.dot(x_test, model))
y_train_predicted = np.sign(np.dot(x_train, model))
def get_accuracy(y, Y_predicted):
correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy, sum(correct_answers)
train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted)
test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted)
return train_score, test_score, correct_answers_train, correct_answers_test
def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose):
"""
The covariance is computed b/w the sensitive attr val and the distance from the boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert (x_arr.shape[0] == x_control.shape[0])
if len(x_control.shape) > 1: # make sure we just have one column in the array
assert (x_control.shape[1] == 1)
arr = []
if model is None:
arr = y_arr_dist_boundary # simply the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr, dtype=np.float64)
cov = np.dot(x_control - np.mean(x_control), arr) / float(len(x_control))
ans = thresh - abs(
cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
# ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
return ans
def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs):
"""
reutrns the covariance between sensitive features and distance from decision boundary
"""
arr = []
if model is None:
arr = y_arr_dist_boundary # simplt the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
sensitive_attrs_to_cov_original = {}
for attr in sensitive_attrs:
attr_arr = x_control[attr]
bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals
if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
thresh = 0
if bin_attr:
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False)
sensitive_attrs_to_cov_original[attr] = cov
else: # sensitive feature has more than 2 categorical values
cov_arr = []
sensitive_attrs_to_cov_original[attr] = {}
for attr_val, ind in index_dict.items():
t = attr_arr_transformed[:, ind]
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False)
sensitive_attrs_to_cov_original[attr][attr_val] = cov
cov_arr.append(abs(cov))
cov = max(cov_arr)
return sensitive_attrs_to_cov_original
def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs):
"""
returns the fraction in positive class for sensitive feature values
"""
if model is not None:
y_predicted = np.sign(np.dot(x_test, model))
y_predicted = np.array(y_predicted)
out_dict = {}
for attr in sensitive_attrs:
attr_val = []
for v in x_control_test[attr]: attr_val.append(v)
assert (len(attr_val) == len(y_predicted))
total_per_val = defaultdict(int)
attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int))
for i in range(0, len(y_predicted)):
val = attr_val[i]
label = y_predicted[i]
# val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names
total_per_val[val] += 1
attr_to_class_labels_dict[val][label] += 1
class_labels = set(y_predicted.tolist())
local_dict_1 = {}
for k1, v1 in attr_to_class_labels_dict.items():
total_this_val = total_per_val[k1]
local_dict_2 = {}
for k2 in class_labels: # the order should be the same for printing
v2 = v1[k2]
f = float(v2) * 100.0 / float(total_this_val)
local_dict_2[k2] = f
local_dict_1[k1] = local_dict_2
out_dict[attr] = local_dict_1
return out_dict
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh):
"""
get the list of constraints to be fed to the minimizer
"""
constraints = []
for attr in sensitive_attrs:
attr_arr = x_control_train[attr]
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
if index_dict is None: # binary attribute
thresh = sensitive_attrs_to_cov_thresh[attr]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov,
'args': (x_train, y_train, attr_arr_transformed, thresh, False)})
constraints.append(c)
else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately
for attr_val, ind in index_dict.items():
attr_name = attr_val
thresh = sensitive_attrs_to_cov_thresh[attr][attr_name]
t = attr_arr_transformed[:, ind]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov,
'args': (x_train, y_train, t, thresh, False)})
constraints.append(c)
return constraints
def split_into_train_test(x_all, y_all, x_control_all, train_fold_size):
split_point = int(round(float(x_all.shape[0]) * train_fold_size))
x_all_train = x_all[:split_point]
x_all_test = x_all[split_point:]
y_all_train = y_all[:split_point]
y_all_test = y_all[split_point:]
x_control_all_train = {}
x_control_all_test = {}
for k in x_control_all.keys():
x_control_all_train[k] = x_control_all[k][:split_point]
x_control_all_test[k] = x_control_all[k][split_point:]
return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test
def get_avg_correlation_dict(correlation_dict_arr):
# make the structure for the correlation dict
correlation_dict_avg = {}
# print correlation_dict_arr
for k, v in correlation_dict_arr[0].items():
correlation_dict_avg[k] = {}
for feature_val, feature_dict in v.items():
correlation_dict_avg[k][feature_val] = {}
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = []
# populate the correlation dict
for correlation_dict in correlation_dict_arr:
for k, v in correlation_dict.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label].append(frac_class)
# now take the averages
for k, v in correlation_dict_avg.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class_arr in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr)
return correlation_dict_avg
def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints,
apply_accuracy_constraint, sep_constraint, sensitive_attrs):
# very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness
it = 0.05
cov_range = np.arange(1.0, 0.0 - it, -it).tolist()
if apply_accuracy_constraint == True:
if sep_constraint == False:
it = 0.1
cov_range = np.arange(0.0, 1.0 + it, it).tolist()
if sep_constraint == True:
cov_range = [0, 1, 5, 10, 20, 50, 100, 500, 1000]
positive_class_label = 1 # positive class is +1
train_acc = []
test_acc = []
positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive
# first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(
x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint,
sensitive_attrs, [{} for i in range(0, num_folds)], 0)
for c in cov_range:
sensitive_attrs_to_cov_original_arr_multiplied = []
for sensitive_attrs_to_cov_original in cov_dict_train_arr:
sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original)
for k in sensitive_attrs_to_cov_thresh.keys():
v = sensitive_attrs_to_cov_thresh[k]
if type(v) == type({}):
for k1 in v.keys():
v[k1] = v[k1] * c
else:
sensitive_attrs_to_cov_thresh[k] = v * c
sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh)
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(
x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints,
apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied,
c)
test_acc.append(np.mean(test_acc_arr))
correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr)
correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr)
# just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one
s = sensitive_attrs[0]
for k, v in correlation_dict_test[s].items():
if v.get(positive_class_label) is None:
positive_per_category[k].append(0.0)
else:
positive_per_category[k].append(v[positive_class_label])
positive_per_category = dict(positive_per_category)
p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0
ax = plt.subplot(2, 1, 1)
plt.plot(cov_range, positive_per_category[0], "-o", color="green", label="Protected")
plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label="Non-protected")
ax.set_xlim([min(cov_range), max(cov_range)])
plt.xlabel('Multiplicative loss factor')
plt.ylabel('Perc. in positive class')
if apply_accuracy_constraint == False:
plt.gca().invert_xaxis()
plt.xlabel('Multiplicative covariance factor (c)')
ax.legend()
ax = plt.subplot(2, 1, 2)
plt.scatter(p_rule_arr, test_acc, color="red")
ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)])
plt.xlabel('P% rule')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.show()
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1, y2
| 26,695
| 44.094595
| 291
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/equal_opportunity/zafar_method/.ipynb_checkpoints/utils-checkpoint.py
|
import numpy as np
from random import seed, shuffle
from zafar_method import loss_funcs as lf # our implementation of loss funcs
from scipy.optimize import minimize # for loss func minimization
from multiprocessing import Pool, Process, Queue
from collections import defaultdict
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
import sys
def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma=None):
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values)
apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values)
sep_constraint: apply the fine grained accuracy constraint
for details, see Section 3.3 of arxiv.org/abs/1507.05259v3
For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py"
Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time
sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control
sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints)
gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint
----
Outputs:
w: the learned weight vector for the classifier
"""
assert((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time
max_iter = 100000 # maximum number of iterations for the minimization algorithm
if apply_fairness_constraints == 0:
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh)
if apply_accuracy_constraint == 0: #its not the reverse problem, just train w with cross cov constraints
f_args=(x, y)
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = f_args,
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = constraints
)
else:
# train on just the loss function
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = (x, y),
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = []
)
old_w = deepcopy(w.x)
def constraint_gamma_all(w, x, y, initial_loss_arr):
gamma_arr = np.ones_like(y) * gamma # set gamma for everyone
new_loss = loss_function(w, x, y)
old_loss = sum(initial_loss_arr)
return ((1.0 + gamma) * old_loss) - new_loss
def constraint_protected_people(w,x,y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class
return np.dot(w, x.T) # if this is positive, the constraint is satisfied
def constraint_unprotected_people(w,ind,old_loss,x,y):
new_loss = loss_function(w, np.array([x]), np.array(y))
return ((1.0 + gamma) * old_loss) - new_loss
constraints = []
predicted_labels = np.sign(np.dot(w.x, x.T))
unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True)
if sep_constraint == True: # separate gemma for different people
for i in range(0, len(predicted_labels)):
if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs
c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args':(x[i], y[i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier
constraints.append(c)
else:
c = ({'type': 'ineq', 'fun': constraint_unprotected_people, 'args':(i, unconstrained_loss_arr[i], x[i], y[i])})
constraints.append(c)
else: # same gamma for everyone
c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args':(x,y,unconstrained_loss_arr)})
constraints.append(c)
def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr):
cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T)
return float(abs(sum(cross_cov))) / float(x_in.shape[0])
w = minimize(fun = cross_cov_abs_optm_func,
x0 = old_w,
args = (x, x_control[sensitive_attrs[0]]),
method = 'SLSQP',
options = {"maxiter":100000},
constraints = constraints
)
try:
assert(w.success == True)
except:
print("Optimization problem did not converge.. Check the solution returned by the optimizer.")
print("Returned solution is:")
print(w)
return w.x
def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh_arr, gamma):
"""
Computes the cross validation error for the classifier subject to various fairness constraints
This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info.
Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds).
"""
train_folds = []
test_folds = []
n_samples = len(y_all)
train_fold_size = 0.7 # the rest of 0.3 is for testing
# split the data into folds for cross-validation
for i in range(0,num_folds):
perm = range(0,n_samples) # shuffle the data before creating each fold
shuffle(perm)
x_all_perm = x_all[perm]
y_all_perm = y_all[perm]
x_control_all_perm = {}
for k in x_control_all.keys():
x_control_all_perm[k] = np.array(x_control_all[k])[perm]
x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(x_all_perm, y_all_perm, x_control_all_perm, train_fold_size)
train_folds.append([x_all_train, y_all_train, x_control_all_train])
test_folds.append([x_all_test, y_all_test, x_control_all_test])
def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh):
x_train, y_train, x_control_train = train_data
x_test, y_test, x_control_test = test_data
w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train, x_test, y_test, None, None)
distances_boundary_test = (np.dot(x_test, w)).tolist()
all_class_labels_assigned_test = np.sign(distances_boundary_test)
correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test, sensitive_attrs)
cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test, sensitive_attrs)
distances_boundary_train = (np.dot(x_train, w)).tolist()
all_class_labels_assigned_train = np.sign(distances_boundary_train)
correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train, sensitive_attrs)
cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train, sensitive_attrs)
output_folds.put([fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train])
return
output_folds = Queue()
processes = [Process(target=train_test_single_fold, args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x])) for x in range(num_folds)]
# Run processes
for p in processes:
p.start()
# Get the reuslts
results = [output_folds.get() for p in processes]
for p in processes:
p.join()
test_acc_arr = []
train_acc_arr = []
correlation_dict_test_arr = []
correlation_dict_train_arr = []
cov_dict_test_arr = []
cov_dict_train_arr = []
results = sorted(results, key = lambda x : x[0]) # sort w.r.t fold num
for res in results:
fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res
test_acc_arr.append(test_score)
train_acc_arr.append(train_score)
correlation_dict_test_arr.append(correlation_dict_test)
correlation_dict_train_arr.append(correlation_dict_train)
cov_dict_test_arr.append(cov_dict_test)
cov_dict_train_arr.append(cov_dict_train)
return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr
def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name):
correlation_dict = get_avg_correlation_dict(correlation_dict_arr)
non_prot_pos = correlation_dict[s_attr_name][1][1]
prot_pos = correlation_dict[s_attr_name][0][1]
p_rule = (prot_pos / non_prot_pos) * 100.0
print("Accuracy: %0.2f" % (np.mean(acc_arr)))
print("Protected/non-protected in +ve class: %0.0f%% / %0.0f%%" % (prot_pos, non_prot_pos))
print("P-rule achieved: %0.0f%%" % (p_rule))
print("Covariance between sensitive feature and decision from distance boundary : %0.3f" % (np.mean([v[s_attr_name] for v in cov_dict_arr])))
return p_rule
def compute_p_rule(x_control, class_labels):
""" Compute the p-rule based on Doctrine of disparate impact """
non_prot_all = sum(x_control == 1.0) # non-protected group
prot_all = sum(x_control == 0.0) # protected group
non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0
print("Total data points: %d" % (len(x_control)))
print("# non-protected examples: %d" % (non_prot_all))
print("# protected examples: %d" % (prot_all))
print("Non-protected in positive class: %d (%0.0f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all))
print("Protected in positive class: %d (%0.0f%%)" % (prot_pos, prot_pos * 100.0 / prot_all))
print("P-rule is: %0.0f%%" % ( p_rule ))
return p_rule
def add_intercept(x):
""" Add intercept to the data before linear classification """
m,n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis = 1)
def check_binary(arr):
"give an array of values, see if the values are only 0 and 1"
s = sorted(set(arr))
if s[0] == 0 and s[1] == 1:
return True
else:
return False
def get_one_hot_encoding(in_arr):
"""
input: 1-D arr with int vals -- if not int vals, will raise an error
output: m (ndarray): one-hot encoded matrix
d (dict): also returns a dictionary original_val -> column in encoded matrix
"""
for k in in_arr:
if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64:
print(str(type(k)))
print("************* ERROR: Input arr does not have integer types")
return None
in_arr = np.array(in_arr, dtype=int)
assert(len(in_arr.shape)==1) # no column, means it was a 1-D arr
attr_vals_uniq_sorted = sorted(list(set(in_arr)))
num_uniq_vals = len(attr_vals_uniq_sorted)
if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1):
return in_arr, None
index_dict = {} # value to the column number
for i in range(0,len(attr_vals_uniq_sorted)):
val = attr_vals_uniq_sorted[i]
index_dict[val] = i
out_arr = []
for i in range(0,len(in_arr)):
tup = np.zeros(num_uniq_vals)
val = in_arr[i]
ind = index_dict[val]
tup[ind] = 1 # set that value of tuple to 1
out_arr.append(tup)
return np.array(out_arr), index_dict
def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted):
"""
returns the train/test accuracy of the model
we either pass the model (w)
else we pass y_predicted
"""
if model is not None and y_test_predicted is not None:
print("Either the model (w) or the predicted labels should be None")
raise Exception("Either the model (w) or the predicted labels should be None")
if model is not None:
y_test_predicted = np.sign(np.dot(x_test, model))
y_train_predicted = np.sign(np.dot(x_train, model))
def get_accuracy(y, Y_predicted):
correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy, sum(correct_answers)
train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted)
test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted)
return train_score, test_score, correct_answers_train, correct_answers_test
def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose):
"""
The covariance is computed b/w the sensitive attr val and the distance from the boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert(x_arr.shape[0] == x_control.shape[0])
if len(x_control.shape) > 1: # make sure we just have one column in the array
assert(x_control.shape[1] == 1)
arr = []
if model is None:
arr = y_arr_dist_boundary # simply the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr, dtype=np.float64)
cov = np.dot(x_control - np.mean(x_control), arr ) / float(len(x_control))
ans = thresh - abs(cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
# ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
if verbose is True:
print("Covariance is", cov)
print("Diff is:", ans)
print
return ans
def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs):
"""
reutrns the covariance between sensitive features and distance from decision boundary
"""
arr = []
if model is None:
arr = y_arr_dist_boundary # simplt the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
sensitive_attrs_to_cov_original = {}
for attr in sensitive_attrs:
attr_arr = x_control[attr]
bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals
if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
thresh = 0
if bin_attr:
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False)
sensitive_attrs_to_cov_original[attr] = cov
else: # sensitive feature has more than 2 categorical values
cov_arr = []
sensitive_attrs_to_cov_original[attr] = {}
for attr_val, ind in index_dict.items():
t = attr_arr_transformed[:,ind]
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False)
sensitive_attrs_to_cov_original[attr][attr_val] = cov
cov_arr.append(abs(cov))
cov = max(cov_arr)
return sensitive_attrs_to_cov_original
def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs):
"""
returns the fraction in positive class for sensitive feature values
"""
if model is not None:
y_predicted = np.sign(np.dot(x_test, model))
y_predicted = np.array(y_predicted)
out_dict = {}
for attr in sensitive_attrs:
attr_val = []
for v in x_control_test[attr]: attr_val.append(v)
assert(len(attr_val) == len(y_predicted))
total_per_val = defaultdict(int)
attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int))
for i in range(0, len(y_predicted)):
val = attr_val[i]
label = y_predicted[i]
# val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names
total_per_val[val] += 1
attr_to_class_labels_dict[val][label] += 1
class_labels = set(y_predicted.tolist())
local_dict_1 = {}
for k1,v1 in attr_to_class_labels_dict.items():
total_this_val = total_per_val[k1]
local_dict_2 = {}
for k2 in class_labels: # the order should be the same for printing
v2 = v1[k2]
f = float(v2) * 100.0 / float(total_this_val)
local_dict_2[k2] = f
local_dict_1[k1] = local_dict_2
out_dict[attr] = local_dict_1
return out_dict
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh):
"""
get the list of constraints to be fed to the minimizer
"""
constraints = []
for attr in sensitive_attrs:
attr_arr = x_control_train[attr]
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
if index_dict is None: # binary attribute
thresh = sensitive_attrs_to_cov_thresh[attr]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, attr_arr_transformed,thresh, False)})
constraints.append(c)
else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately
for attr_val, ind in index_dict.items():
attr_name = attr_val
thresh = sensitive_attrs_to_cov_thresh[attr][attr_name]
t = attr_arr_transformed[:,ind]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, t ,thresh, False)})
constraints.append(c)
return constraints
def split_into_train_test(x_all, y_all, x_control_all, train_fold_size):
split_point = int(round(float(x_all.shape[0]) * train_fold_size))
x_all_train = x_all[:split_point]
x_all_test = x_all[split_point:]
y_all_train = y_all[:split_point]
y_all_test = y_all[split_point:]
x_control_all_train = {}
x_control_all_test = {}
for k in x_control_all.keys():
x_control_all_train[k] = x_control_all[k][:split_point]
x_control_all_test[k] = x_control_all[k][split_point:]
return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test
def get_avg_correlation_dict(correlation_dict_arr):
# make the structure for the correlation dict
correlation_dict_avg = {}
# print correlation_dict_arr
for k,v in correlation_dict_arr[0].items():
correlation_dict_avg[k] = {}
for feature_val, feature_dict in v.items():
correlation_dict_avg[k][feature_val] = {}
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = []
# populate the correlation dict
for correlation_dict in correlation_dict_arr:
for k,v in correlation_dict.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label].append(frac_class)
# now take the averages
for k,v in correlation_dict_avg.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class_arr in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr)
return correlation_dict_avg
def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs):
# very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness
it = 0.05
cov_range = np.arange(1.0, 0.0-it, -it).tolist()
if apply_accuracy_constraint == True:
if sep_constraint == False:
it = 0.1
cov_range = np.arange(0.0, 1.0 + it, it).tolist()
if sep_constraint == True:
cov_range = [0,1,5,10,20,50,100,500,1000]
positive_class_label = 1 # positive class is +1
train_acc = []
test_acc = []
positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive
# first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint, sensitive_attrs, [{} for i in range(0,num_folds)], 0)
for c in cov_range:
print("LOG: testing for multiplicative factor: %0.2f" % c)
sensitive_attrs_to_cov_original_arr_multiplied = []
for sensitive_attrs_to_cov_original in cov_dict_train_arr:
sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original)
for k in sensitive_attrs_to_cov_thresh.keys():
v = sensitive_attrs_to_cov_thresh[k]
if type(v) == type({}):
for k1 in v.keys():
v[k1] = v[k1] * c
else:
sensitive_attrs_to_cov_thresh[k] = v * c
sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh)
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied, c)
test_acc.append(np.mean(test_acc_arr))
correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr)
correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr)
# just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one
s = sensitive_attrs[0]
for k,v in correlation_dict_test[s].items():
if v.get(positive_class_label) is None:
positive_per_category[k].append(0.0)
else:
positive_per_category[k].append(v[positive_class_label])
positive_per_category = dict(positive_per_category)
p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0
ax = plt.subplot(2,1,1)
plt.plot(cov_range, positive_per_category[0], "-o" , color="green", label = "Protected")
plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label = "Non-protected")
ax.set_xlim([min(cov_range), max(cov_range)])
plt.xlabel('Multiplicative loss factor')
plt.ylabel('Perc. in positive class')
if apply_accuracy_constraint == False:
plt.gca().invert_xaxis()
plt.xlabel('Multiplicative covariance factor (c)')
ax.legend()
ax = plt.subplot(2,1,2)
plt.scatter(p_rule_arr, test_acc, color="red")
ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)])
plt.xlabel('P% rule')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.show()
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1,y2
| 27,182
| 41.606583
| 357
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/online_regression/fairness_metrics.py
|
import torch
import ot
import cvxpy as cp
import numpy as np
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementations of the fairness metrics (e.g. energy distance, Sinkhorn divergence, statistical parity)
as well as performance metrics (e.g. MSE, accuracy) of a model mentioned in the paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*(n1-1))
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*(n2-1)))
def energy_distance_biased(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*n1)
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*n2))
def wasserstein_distance(y1, y2):
'''
Compute wasserstein distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
wd = torch.multiply(torch.Tensor(ot.emd(ones_1.flatten(), ones_2.flatten(), C_np)), C).sum()
return wd
# +------------------------------------------+
# | Metric 2: Sinkhorn Divergence |
# +------------------------------------------+
def sinkhorn_diver(y1, y2):
'''
Compute type Sinkhorn divergence between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink12 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y1.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink11 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y2.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink22 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
return (sink12 - 1/2 * sink11 - 1/2 * sink22).sum()
# +------------------------------------------+
# | Metric 3: MMD with RBF Kernel |
# +------------------------------------------+
def MMD_RBF(y1, y2):
n = y1.flatten().shape[0]
m = y2.flatten().shape[0]
def rbf(diff, diagzero=True):
sigma = 0.1
if diagzero:
return torch.exp(((diff*(1-torch.eye(diff.shape[0])))**2)/(2*sigma**2))
else:
return torch.exp((diff**2)/(2*sigma**2))
return (-2*rbf(y1.unsqueeze(0)-y2.unsqueeze(1), diagzero=False).sum()/(n*m)
+rbf(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n*(n-1))
+rbf(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(m*(m-1)))
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Reg/Clf Metrics: MSE, MAE, Accuracy |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
ys = torch.hstack((y1, y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
return torch.tensor(correct / total * 100)
def R2(y1_hat, y2_hat, y1, y2):
'''
Compute regression R2.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
R2 (torch.Tensor): mean squared error
'''
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
var_y = torch.var(ys, unbiased=False)
return 1.0 - torch.nn.MSELoss(reduction="mean")(yhats, ys) / var_y
| 11,133
| 34.012579
| 133
|
py
|
warcio
|
warcio-master/setup.py
|
#!/usr/bin/env python
# vim: set sw=4 et:
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import glob
__version__ = '1.7.4'
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# should work with setuptools <18, 18 18.5
self.test_suite = ' '
def run_tests(self):
import pytest
import sys
import os
errcode = pytest.main(['--doctest-modules', './warcio', '--cov', 'warcio', '-v', 'test/'])
sys.exit(errcode)
setup(
name='warcio',
version=__version__,
author='Ilya Kreymer',
author_email='ikreymer@gmail.com',
license='Apache 2.0',
packages=find_packages(exclude=['test']),
url='https://github.com/webrecorder/warcio',
description='Streaming WARC (and ARC) IO library',
long_description=open('README.rst').read(),
provides=[
'warcio',
],
install_requires=[
'six',
],
zip_safe=True,
entry_points="""
[console_scripts]
warcio = warcio.cli:main
""",
cmdclass={'test': PyTest},
test_suite='',
tests_require=[
'pytest',
'pytest-cov',
'httpbin==0.5.0',
'requests',
'wsgiprox',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
)
| 1,925
| 26.913043
| 98
|
py
|
warcio
|
warcio-master/warcio/bufferedreaders.py
|
from io import BytesIO
import zlib
import sys
from warcio.utils import BUFF_SIZE
#=================================================================
def gzip_decompressor():
"""
Decompressor which can handle decompress gzip stream
"""
return zlib.decompressobj(16 + zlib.MAX_WBITS)
def deflate_decompressor():
return zlib.decompressobj()
def deflate_decompressor_alt():
return zlib.decompressobj(-zlib.MAX_WBITS)
#=================================================================
def try_brotli_init():
try:
import brotli
def brotli_decompressor():
decomp = brotli.Decompressor()
decomp.unused_data = None
return decomp
BufferedReader.DECOMPRESSORS['br'] = brotli_decompressor
except ImportError: #pragma: no cover
pass
#=================================================================
class BufferedReader(object):
"""
A wrapping line reader which wraps an existing reader.
Read operations operate on underlying buffer, which is filled to
block_size (16384 default)
If an optional decompress type is specified,
data is fed through the decompressor when read from the buffer.
Currently supported decompression: gzip
If unspecified, default decompression is None
If decompression is specified, and decompress fails on first try,
data is assumed to not be compressed and no exception is thrown.
If a failure occurs after data has been
partially decompressed, the exception is propagated.
"""
DECOMPRESSORS = {'gzip': gzip_decompressor,
'deflate': deflate_decompressor,
'deflate_alt': deflate_decompressor_alt
}
def __init__(self, stream, block_size=BUFF_SIZE,
decomp_type=None,
starting_data=None,
read_all_members=False):
self.stream = stream
self.block_size = block_size
self._init_decomp(decomp_type)
self.buff = None
self.starting_data = starting_data
self.num_read = 0
self.buff_size = 0
self.read_all_members = read_all_members
def set_decomp(self, decomp_type):
self._init_decomp(decomp_type)
def _init_decomp(self, decomp_type):
self.num_block_read = 0
if decomp_type:
try:
self.decomp_type = decomp_type
self.decompressor = self.DECOMPRESSORS[decomp_type.lower()]()
except KeyError:
raise Exception('Decompression type not supported: ' +
decomp_type)
else:
self.decomp_type = None
self.decompressor = None
def _fillbuff(self, block_size=None):
if not self.empty():
return
# can't read past next member
if self.rem_length() > 0:
return
block_size = block_size or self.block_size
if self.starting_data:
data = self.starting_data
self.starting_data = None
else:
data = self.stream.read(block_size)
self._process_read(data)
# if raw data is not empty and decompressor set, but
# decompressed buff is empty, keep reading --
# decompressor likely needs more data to decompress
while data and self.decompressor and not self.decompressor.unused_data and self.empty():
data = self.stream.read(block_size)
self._process_read(data)
def _process_read(self, data):
# don't process if no raw data read
if not data:
self.buff = None
return
data = self._decompress(data)
self.buff_size = len(data)
self.num_read += self.buff_size
self.num_block_read += self.buff_size
self.buff = BytesIO(data)
def _decompress(self, data):
if self.decompressor and data:
try:
data = self.decompressor.decompress(data)
except Exception as e:
# if first read attempt, assume non-gzipped stream
if self.num_block_read == 0:
if self.decomp_type == 'deflate':
self._init_decomp('deflate_alt')
data = self._decompress(data)
else:
self.decompressor = None
# otherwise (partly decompressed), something is wrong
else:
sys.stderr.write(str(e) + '\n')
return b''
return data
def read(self, length=None):
"""
Fill bytes and read some number of bytes
(up to length if specified)
<= length bytes may be read if reached the end of input
if at buffer boundary, will attempt to read again until
specified length is read
"""
all_buffs = []
while length is None or length > 0:
self._fillbuff()
if self.empty():
break
buff = self.buff.read(length)
all_buffs.append(buff)
if length:
length -= len(buff)
return b''.join(all_buffs)
def readline(self, length=None):
"""
Fill buffer and read a full line from the buffer
(up to specified length, if provided)
If no newline found at end, try filling buffer again in case
at buffer boundary.
"""
if length == 0:
return b''
self._fillbuff()
if self.empty():
return b''
linebuff = self.buff.readline(length)
# we may be at a boundary
while not linebuff.endswith(b'\n'):
if length:
length -= len(linebuff)
if length <= 0:
break
self._fillbuff()
if self.empty():
break
linebuff += self.buff.readline(length)
return linebuff
def tell(self):
return self.num_read
def empty(self):
if not self.buff or self.buff.tell() >= self.buff_size:
# if reading all members, attempt to get next member automatically
if self.read_all_members:
self.read_next_member()
return True
return False
def read_next_member(self):
if not self.decompressor or not self.decompressor.unused_data:
return False
self.starting_data = self.decompressor.unused_data
self._init_decomp(self.decomp_type)
return True
def rem_length(self):
rem = 0
if self.buff:
rem = self.buff_size - self.buff.tell()
if self.decompressor and self.decompressor.unused_data:
rem += len(self.decompressor.unused_data)
return rem
def close(self):
if self.stream:
self.stream.close()
self.stream = None
self.buff = None
self.close_decompressor()
def close_decompressor(self):
if self.decompressor:
self.decompressor.flush()
self.decompressor = None
@classmethod
def get_supported_decompressors(cls):
return cls.DECOMPRESSORS.keys()
#=================================================================
class DecompressingBufferedReader(BufferedReader):
"""
A BufferedReader which defaults to gzip decompression,
(unless different type specified)
"""
def __init__(self, *args, **kwargs):
if 'decomp_type' not in kwargs:
kwargs['decomp_type'] = 'gzip'
super(DecompressingBufferedReader, self).__init__(*args, **kwargs)
#=================================================================
class ChunkedDataException(Exception):
def __init__(self, msg, data=b''):
Exception.__init__(self, msg)
self.data = data
#=================================================================
class ChunkedDataReader(BufferedReader):
r"""
A ChunkedDataReader is a DecompressingBufferedReader
which also supports de-chunking of the data if it happens
to be http 'chunk-encoded'.
If at any point the chunked header is not available, the stream is
assumed to not be chunked and no more dechunking occurs.
"""
def __init__(self, stream, raise_exceptions=False, **kwargs):
super(ChunkedDataReader, self).__init__(stream, **kwargs)
self.all_chunks_read = False
self.not_chunked = False
# if False, we'll use best-guess fallback for parse errors
self.raise_chunked_data_exceptions = raise_exceptions
def _fillbuff(self, block_size=None):
if self.not_chunked:
return super(ChunkedDataReader, self)._fillbuff(block_size)
# Loop over chunks until there is some data (not empty())
# In particular, gzipped data may require multiple chunks to
# return any decompressed result
while (self.empty() and
not self.all_chunks_read and
not self.not_chunked):
try:
length_header = self.stream.readline(64)
self._try_decode(length_header)
except ChunkedDataException as e:
if self.raise_chunked_data_exceptions:
raise
# Can't parse the data as chunked.
# It's possible that non-chunked data is served
# with a Transfer-Encoding: chunked.
# Treat this as non-chunk encoded from here on.
self._process_read(length_header + e.data)
self.not_chunked = True
# parse as block as non-chunked
return super(ChunkedDataReader, self)._fillbuff(block_size)
def _try_decode(self, length_header):
# decode length header
try:
# ensure line ends with \r\n
assert(length_header[-2:] == b'\r\n')
chunk_size = length_header[:-2].split(b';')[0]
chunk_size = int(chunk_size, 16)
# sanity check chunk size
assert(chunk_size <= 2**31)
except (ValueError, AssertionError):
raise ChunkedDataException(b"Couldn't decode length header " +
length_header)
if not chunk_size:
# chunk_size 0 indicates end of file. read final bytes to compute digest.
final_data = self.stream.read(2)
try:
assert(final_data == b'\r\n')
except AssertionError:
raise ChunkedDataException(b"Incorrect \r\n after length header of 0")
self.all_chunks_read = True
self._process_read(b'')
return
data_len = 0
data = b''
# read chunk
while data_len < chunk_size:
new_data = self.stream.read(chunk_size - data_len)
# if we unexpectedly run out of data,
# either raise an exception or just stop reading,
# assuming file was cut off
if not new_data:
if self.raise_chunked_data_exceptions:
msg = 'Ran out of data before end of chunk'
raise ChunkedDataException(msg, data)
else:
chunk_size = data_len
self.all_chunks_read = True
data += new_data
data_len = len(data)
# if we successfully read a block without running out,
# it should end in \r\n
if not self.all_chunks_read:
clrf = self.stream.read(2)
if clrf != b'\r\n':
raise ChunkedDataException(b"Chunk terminator not found.",
data)
# hand to base class for further processing
self._process_read(data)
#=================================================================
try_brotli_init()
| 11,987
| 30.382199
| 96
|
py
|
warcio
|
warcio-master/warcio/indexer.py
|
import json
import sys
import os
from collections import OrderedDict
from warcio.archiveiterator import ArchiveIterator
from warcio.utils import open_or_default
# ============================================================================
class Indexer(object):
field_names = {}
def __init__(self, fields, inputs, output, verify_http=False):
if isinstance(fields, str):
fields = fields.split(',')
self.fields = fields
self.record_parse = any(field.startswith('http:') for field in self.fields)
self.inputs = inputs
self.output = output
self.verify_http = verify_http
def process_all(self):
with open_or_default(self.output, 'wt', sys.stdout) as out:
for filename in self.inputs:
try:
stdin = sys.stdin.buffer
except AttributeError: # py2
stdin = sys.stdin
with open_or_default(filename, 'rb', stdin) as fh:
self.process_one(fh, out, filename)
def process_one(self, input_, output, filename):
it = self._create_record_iter(input_)
self._write_header(output, filename)
for record in it:
self.process_index_entry(it, record, filename, output)
def process_index_entry(self, it, record, filename, output):
index = self._new_dict(record)
for field in self.fields:
value = self.get_field(record, field, it, filename)
if value is not None:
field = self.field_names.get(field, field)
index[field] = value
self._write_line(output, index, record, filename)
def _create_record_iter(self, input_):
return ArchiveIterator(input_,
no_record_parse=not self.record_parse,
arc2warc=True,
verify_http=self.verify_http)
def _new_dict(self, record):
return OrderedDict()
def get_field(self, record, name, it, filename):
value = None
if name == 'offset':
value = str(it.get_record_offset())
elif name == 'length':
value = str(it.get_record_length())
elif name == 'filename':
value = os.path.basename(filename)
elif name == 'http:status':
if record.rec_type in ('response', 'revisit') and record.http_headers:
value = record.http_headers.get_statuscode()
elif name.startswith('http:'):
if record.http_headers:
value = record.http_headers.get_header(name[5:])
else:
value = record.rec_headers.get_header(name)
return value
def _write_header(self, out, filename):
pass
def _write_line(self, out, index, record, filename):
out.write(json.dumps(index) + '\n')
| 2,894
| 31.166667
| 83
|
py
|
warcio
|
warcio-master/warcio/limitreader.py
|
# ============================================================================
class LimitReader(object):
"""
A reader which will not read more than specified limit
"""
def __init__(self, stream, limit):
self.stream = stream
self.limit = limit
self._orig_limit = limit
def _update(self, buff):
length = len(buff)
self.limit -= length
return buff
def read(self, length=None):
if length is not None:
length = min(length, self.limit)
else:
length = self.limit
if length == 0:
return b''
buff = self.stream.read(length)
return self._update(buff)
def readline(self, length=None):
if length is not None:
length = min(length, self.limit)
else:
length = self.limit
if length == 0:
return b''
buff = self.stream.readline(length)
return self._update(buff)
def close(self):
self.stream.close()
def tell(self):
# implement our own tell
return self._orig_limit - self.limit
@staticmethod
def wrap_stream(stream, content_length):
"""
If given content_length is an int > 0, wrap the stream
in a LimitReader. Otherwise, return the stream unaltered
"""
try:
content_length = int(content_length)
if content_length >= 0:
# optimize: if already a LimitStream, set limit to
# the smaller of the two limits
if isinstance(stream, LimitReader):
stream.limit = min(stream.limit, content_length)
else:
stream = LimitReader(stream, content_length)
except (ValueError, TypeError):
pass
return stream
| 1,850
| 25.826087
| 78
|
py
|
warcio
|
warcio-master/warcio/exceptions.py
|
#=================================================================
class ArchiveLoadFailed(Exception):
def __init__(self, reason):
self.msg = str(reason)
super(ArchiveLoadFailed, self).__init__(self.msg)
| 224
| 36.5
| 66
|
py
|
warcio
|
warcio-master/warcio/archiveiterator.py
|
from warcio.bufferedreaders import DecompressingBufferedReader
from warcio.exceptions import ArchiveLoadFailed
from warcio.recordloader import ArcWarcRecordLoader
from warcio.utils import BUFF_SIZE
import sys
import six
# ============================================================================
class UnseekableYetTellable:
def __init__(self, fh):
self.fh = fh
self.offset = 0
def tell(self):
return self.offset
def read(self, size=-1):
result = self.fh.read(size)
self.offset += len(result)
return result
# ============================================================================
class ArchiveIterator(six.Iterator):
""" Iterate over records in WARC and ARC files, both gzip chunk
compressed and uncompressed
The indexer will automatically detect format, and decompress
if necessary.
"""
GZIP_ERR_MSG = """
ERROR: non-chunked gzip file detected, gzip block continues
beyond single record.
This file is probably not a multi-member gzip but a single gzip file.
To allow seek, a gzipped {1} must have each record compressed into
a single gzip member and concatenated together.
This file is likely still valid and can be fixed by running:
warcio recompress <path/to/file> <path/to/new_file>
"""
INC_RECORD = """\
WARNING: Record not followed by newline, perhaps Content-Length is invalid
Offset: {0}
Remainder: {1}
"""
def __init__(self, fileobj, no_record_parse=False,
verify_http=False, arc2warc=False,
ensure_http_headers=False, block_size=BUFF_SIZE,
check_digests=False):
self.fh = fileobj
self.loader = ArcWarcRecordLoader(verify_http=verify_http,
arc2warc=arc2warc)
self.known_format = None
self.mixed_arc_warc = arc2warc
self.member_info = None
self.no_record_parse = no_record_parse
self.ensure_http_headers = ensure_http_headers
try:
self.offset = self.fh.tell()
except:
self.fh = UnseekableYetTellable(self.fh)
self.offset = self.fh.tell()
self.reader = DecompressingBufferedReader(self.fh,
block_size=block_size)
self.next_line = None
self.check_digests = check_digests
self.err_count = 0
self.record = None
self.the_iter = self._iterate_records()
def __iter__(self):
return self.the_iter
def __next__(self):
return six.next(self.the_iter)
def close(self):
self.record = None
if self.reader:
self.reader.close_decompressor()
self.reader = None
def _iterate_records(self):
""" iterate over each record
"""
raise_invalid_gzip = False
empty_record = False
while True:
try:
self.record = self._next_record(self.next_line)
if raise_invalid_gzip:
self._raise_invalid_gzip_err()
yield self.record
except EOFError:
empty_record = True
self.read_to_end()
if self.reader.decompressor:
# if another gzip member, continue
if self.reader.read_next_member():
continue
# if empty record, then we're done
elif empty_record:
break
# otherwise, probably a gzip
# containing multiple non-chunked records
# raise this as an error
else:
raise_invalid_gzip = True
# non-gzip, so we're done
elif empty_record:
break
self.close()
def _raise_invalid_gzip_err(self):
""" A gzip file with multiple ARC/WARC records, non-chunked
has been detected. This is not valid for replay, so notify user
"""
frmt = 'warc/arc'
if self.known_format:
frmt = self.known_format
frmt_up = frmt.upper()
msg = self.GZIP_ERR_MSG.format(frmt, frmt_up)
raise ArchiveLoadFailed(msg)
def _consume_blanklines(self):
""" Consume blank lines that are between records
- For warcs, there are usually 2
- For arcs, may be 1 or 0
- For block gzipped files, these are at end of each gzip envelope
and are included in record length which is the full gzip envelope
- For uncompressed, they are between records and so are NOT part of
the record length
count empty_size so that it can be substracted from
the record length for uncompressed
if first line read is not blank, likely error in WARC/ARC,
display a warning
"""
empty_size = 0
first_line = True
while True:
line = self.reader.readline()
if len(line) == 0:
return None, empty_size
stripped = line.rstrip()
if len(stripped) == 0 or first_line:
empty_size += len(line)
if len(stripped) != 0:
# if first line is not blank,
# likely content-length was invalid, display warning
err_offset = self.fh.tell() - self.reader.rem_length() - empty_size
sys.stderr.write(self.INC_RECORD.format(err_offset, line))
self.err_count += 1
first_line = False
continue
return line, empty_size
def read_to_end(self, record=None):
""" Read remainder of the stream
If a digester is included, update it
with the data read
"""
# no current record to read
if not self.record:
return None
# already at end of this record, don't read until it is consumed
if self.member_info:
return None
curr_offset = self.offset
while True:
b = self.record.raw_stream.read(BUFF_SIZE)
if not b:
break
"""
- For compressed files, blank lines are consumed
since they are part of record length
- For uncompressed files, blank lines are read later,
and not included in the record length
"""
#if self.reader.decompressor:
self.next_line, empty_size = self._consume_blanklines()
self.offset = self.fh.tell() - self.reader.rem_length()
#if self.offset < 0:
# raise Exception('Not Gzipped Properly')
if self.next_line:
self.offset -= len(self.next_line)
length = self.offset - curr_offset
if not self.reader.decompressor:
length -= empty_size
self.member_info = (curr_offset, length)
#return self.member_info
#return next_line
def get_record_offset(self):
if not self.member_info:
self.read_to_end()
return self.member_info[0]
def get_record_length(self):
if not self.member_info:
self.read_to_end()
return self.member_info[1]
def _next_record(self, next_line):
""" Use loader to parse the record from the reader stream
Supporting warc and arc records
"""
record = self.loader.parse_record_stream(self.reader,
next_line,
self.known_format,
self.no_record_parse,
self.ensure_http_headers,
self.check_digests)
self.member_info = None
# Track known format for faster parsing of other records
if not self.mixed_arc_warc:
self.known_format = record.format
return record
# ============================================================================
class WARCIterator(ArchiveIterator):
def __init__(self, *args, **kwargs):
super(WARCIterator, self).__init__(*args, **kwargs)
self.known_format = 'warc'
# ============================================================================
class ARCIterator(ArchiveIterator):
def __init__(self, *args, **kwargs):
super(ARCIterator, self).__init__(*args, **kwargs)
self.known_format = 'arc'
| 8,571
| 28.763889
| 87
|
py
|
warcio
|
warcio-master/warcio/checker.py
|
from __future__ import print_function
from warcio.archiveiterator import ArchiveIterator
from warcio.exceptions import ArchiveLoadFailed
def _read_entire_stream(stream):
while True:
piece = stream.read(1024*1024)
if len(piece) == 0:
break
class Checker(object):
def __init__(self, cmd):
self.inputs = cmd.inputs
self.verbose = cmd.verbose
self.exit_value = 0
def process_all(self):
for filename in self.inputs:
try:
self.process_one(filename)
except ArchiveLoadFailed as e:
print(filename)
print(' saw exception ArchiveLoadFailed: '+str(e).rstrip())
print(' skipping rest of file')
self.exit_value = 1
return self.exit_value
def process_one(self, filename):
printed_filename = False
with open(filename, 'rb') as stream:
it = ArchiveIterator(stream, check_digests=True)
for record in it:
digest_present = (record.rec_headers.get_header('WARC-Payload-Digest') or
record.rec_headers.get_header('WARC-Block-Digest'))
_read_entire_stream(record.content_stream())
d_msg = None
output = []
rec_id = record.rec_headers.get_header('WARC-Record-ID')
rec_type = record.rec_headers.get_header('WARC-Type')
rec_offset = it.get_record_offset()
if record.digest_checker.passed is False:
self.exit_value = 1
output = list(record.digest_checker.problems)
elif record.digest_checker.passed is True and self.verbose:
d_msg = 'digest pass'
elif record.digest_checker.passed is None and self.verbose:
if digest_present and rec_type == 'revisit':
d_msg = 'digest present but not checked (revisit)'
elif digest_present: # pragma: no cover
# should not happen
d_msg = 'digest present but not checked'
else:
d_msg = 'no digest to check'
if d_msg or output:
if not printed_filename:
print(filename)
printed_filename = True
print(' ', 'offset', rec_offset, 'WARC-Record-ID', rec_id, rec_type)
if d_msg:
print(' ', d_msg)
for o in output:
print(' ', o)
| 2,675
| 36.690141
| 89
|
py
|
warcio
|
warcio-master/warcio/statusandheaders.py
|
"""
Representation and parsing of HTTP-style status + headers
"""
from six.moves import range
from six import iteritems
from warcio.utils import to_native_str, headers_to_str_headers
import uuid
from six.moves.urllib.parse import quote
import re
#=================================================================
class StatusAndHeaders(object):
ENCODE_HEADER_RX = re.compile(r'[=]["\']?([^;"]+)["\']?(?=[;]?)')
"""
Representation of parsed http-style status line and headers
Status Line if first line of request/response
Headers is a list of (name, value) tuples
An optional protocol which appears on first line may be specified
If is_http_request is true, split http verb (instead of protocol) from start of statusline
"""
def __init__(self, statusline, headers, protocol='', total_len=0, is_http_request=False):
if is_http_request:
protocol, statusline = statusline.split(' ', 1)
self.statusline = statusline
self.headers = headers_to_str_headers(headers)
self.protocol = protocol
self.total_len = total_len
self.headers_buff = None
def get_header(self, name, default_value=None):
"""
return header (name, value)
if found
"""
name_lower = name.lower()
for value in self.headers:
if value[0].lower() == name_lower:
return value[1]
return default_value
def add_header(self, name, value):
self.headers.append((name, value))
def replace_header(self, name, value):
"""
replace header with new value or add new header
return old header value, if any
"""
name_lower = name.lower()
for index in range(len(self.headers) - 1, -1, -1):
curr_name, curr_value = self.headers[index]
if curr_name.lower() == name_lower:
self.headers[index] = (curr_name, value)
return curr_value
self.headers.append((name, value))
return None
def remove_header(self, name):
"""
Remove header (case-insensitive)
return True if header removed, False otherwise
"""
name_lower = name.lower()
for index in range(len(self.headers) - 1, -1, -1):
if self.headers[index][0].lower() == name_lower:
del self.headers[index]
return True
return False
def get_statuscode(self):
"""
Return the statuscode part of the status response line
(Assumes no protocol in the statusline)
"""
code = self.statusline.split(' ', 1)[0]
return code
def validate_statusline(self, valid_statusline):
"""
Check that the statusline is valid, eg. starts with a numeric
code. If not, replace with passed in valid_statusline
"""
code = self.get_statuscode()
try:
code = int(code)
assert(code > 0)
return True
except(ValueError, AssertionError):
self.statusline = valid_statusline
return False
def add_range(self, start, part_len, total_len):
"""
Add range headers indicating that this a partial response
"""
content_range = 'bytes {0}-{1}/{2}'.format(start,
start + part_len - 1,
total_len)
self.statusline = '206 Partial Content'
self.replace_header('Content-Range', content_range)
self.replace_header('Content-Length', str(part_len))
self.replace_header('Accept-Ranges', 'bytes')
return self
def compute_headers_buffer(self, header_filter=None):
"""
Set buffer representing headers
"""
# HTTP headers %-encoded as ascii (see to_ascii_bytes for more info)
self.headers_buff = self.to_ascii_bytes(header_filter)
def __repr__(self):
return "StatusAndHeaders(protocol = '{0}', statusline = '{1}', \
headers = {2})".format(self.protocol, self.statusline, self.headers)
def __ne__(self, other):
return not (self == other)
def __eq__(self, other):
if not other:
return False
return (self.statusline == other.statusline and
self.headers == other.headers and
self.protocol == other.protocol)
def __str__(self, exclude_list=None):
return self.to_str(exclude_list)
def __bool__(self):
return bool(self.statusline or self.headers)
__nonzero__ = __bool__
def to_str(self, filter_func=None):
string = self.protocol
if string and self.statusline:
string += ' '
if self.statusline:
string += self.statusline
if string:
string += '\r\n'
for h in self.headers:
if filter_func:
h = filter_func(h)
if not h:
continue
string += ': '.join(h) + '\r\n'
return string
def to_bytes(self, filter_func=None, encoding='utf-8'):
return self.to_str(filter_func).encode(encoding) + b'\r\n'
def to_ascii_bytes(self, filter_func=None):
""" Attempt to encode the headers block as ascii
If encoding fails, call percent_encode_non_ascii_headers()
to encode any headers per RFCs
"""
try:
string = self.to_str(filter_func)
string = string.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
self.percent_encode_non_ascii_headers()
string = self.to_str(filter_func)
string = string.encode('ascii')
return string + b'\r\n'
def percent_encode_non_ascii_headers(self, encoding='UTF-8'):
""" Encode any headers that are not plain ascii
as UTF-8 as per:
https://tools.ietf.org/html/rfc8187#section-3.2.3
https://tools.ietf.org/html/rfc5987#section-3.2.2
"""
def do_encode(m):
return "*={0}''".format(encoding) + quote(to_native_str(m.group(1)))
for index in range(len(self.headers) - 1, -1, -1):
curr_name, curr_value = self.headers[index]
try:
# test if header is ascii encodable, no action needed
curr_value.encode('ascii')
except:
# if single value header, (eg. no ';'), %-encode entire header
if ';' not in curr_value:
new_value = quote(curr_value)
else:
# %-encode value in ; name="value"
new_value = self.ENCODE_HEADER_RX.sub(do_encode, curr_value)
if new_value == curr_value:
new_value = quote(curr_value)
self.headers[index] = (curr_name, new_value)
# act like a (case-insensitive) dictionary of headers, much like other
# python http headers apis including http.client.HTTPMessage
# and requests.structures.CaseInsensitiveDict
get = get_header
__getitem__ = get_header
__setitem__ = replace_header
__delitem__ = remove_header
def __contains__(self, key):
return bool(self[key])
#=================================================================
def _strip_count(string, total_read):
length = len(string)
return string.rstrip(), total_read + length
#=================================================================
class StatusAndHeadersParser(object):
"""
Parser which consumes a stream support readline() to read
status and headers and return a StatusAndHeaders object
"""
def __init__(self, statuslist, verify=True):
self.statuslist = statuslist
self.verify = verify
def parse(self, stream, full_statusline=None):
"""
parse stream for status line and headers
return a StatusAndHeaders object
support continuation headers starting with space or tab
"""
# status line w newlines intact
if full_statusline is None:
full_statusline = stream.readline()
full_statusline = self.decode_header(full_statusline)
statusline, total_read = _strip_count(full_statusline, 0)
headers = []
# at end of stream
if total_read == 0:
raise EOFError()
elif not statusline:
return StatusAndHeaders(statusline=statusline,
headers=headers,
protocol='',
total_len=total_read)
# validate only if verify is set
if self.verify:
protocol_status = self.split_prefix(statusline, self.statuslist)
if not protocol_status:
msg = 'Expected Status Line starting with {0} - Found: {1}'
msg = msg.format(self.statuslist, statusline)
raise StatusAndHeadersParserException(msg, full_statusline)
else:
protocol_status = statusline.split(' ', 1)
line, total_read = _strip_count(self.decode_header(stream.readline()), total_read)
while line:
result = line.split(':', 1)
if len(result) == 2:
name = result[0].rstrip(' \t')
value = result[1].lstrip()
else:
name = result[0]
value = None
next_line, total_read = _strip_count(self.decode_header(stream.readline()),
total_read)
# append continuation lines, if any
while next_line and next_line.startswith((' ', '\t')):
if value is not None:
value += next_line
next_line, total_read = _strip_count(self.decode_header(stream.readline()),
total_read)
if value is not None:
header = (name, value)
headers.append(header)
line = next_line
if len(protocol_status) > 1:
statusline = protocol_status[1].strip()
else:
statusline = ''
return StatusAndHeaders(statusline=statusline,
headers=headers,
protocol=protocol_status[0],
total_len=total_read)
@staticmethod
def split_prefix(key, prefixs):
"""
split key string into prefix and remainder
for first matching prefix from a list
"""
key_upper = key.upper()
for prefix in prefixs:
if key_upper.startswith(prefix):
plen = len(prefix)
return (key_upper[:plen], key[plen:])
@staticmethod
def make_warc_id(id_=None):
if not id_:
id_ = uuid.uuid4()
return '<urn:uuid:{0}>'.format(id_)
@staticmethod
def decode_header(line):
try:
# attempt to decode as utf-8 first
return to_native_str(line, 'utf-8')
except:
# if fails, default to ISO-8859-1
return to_native_str(line, 'iso-8859-1')
#=================================================================
class StatusAndHeadersParserException(Exception):
"""
status + headers parsing exception
"""
def __init__(self, msg, statusline):
super(StatusAndHeadersParserException, self).__init__(msg)
self.statusline = statusline
| 11,625
| 32.504323
| 94
|
py
|
warcio
|
warcio-master/warcio/utils.py
|
import six
import os
from contextlib import contextmanager
import base64
import hashlib
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError: #pragma: no cover
import collections as collections_abc
BUFF_SIZE = 16384
# #===========================================================================
def to_native_str(value, encoding='utf-8'):
if isinstance(value, str):
return value
if six.PY3 and isinstance(value, six.binary_type): #pragma: no cover
return value.decode(encoding)
elif six.PY2 and isinstance(value, six.text_type): #pragma: no cover
return value.encode(encoding)
else:
return value
# #===========================================================================
@contextmanager
def open_or_default(filename, mod, default_fh):
if filename == '-' or filename == b'-':
yield default_fh
elif filename and isinstance(filename, str):
res = open(filename, mod)
yield res
res.close()
elif filename:
yield filename
else:
yield default_fh
# #===========================================================================
def headers_to_str_headers(headers):
'''
Converts dict or tuple-based headers of bytes or str to
tuple-based headers of str, which is the python norm (pep 3333)
'''
ret = []
if isinstance(headers, collections_abc.Mapping):
h = headers.items()
else:
h = headers
if six.PY2: #pragma: no cover
return h
for tup in h:
k, v = tup
if isinstance(k, six.binary_type):
k = k.decode('iso-8859-1')
if isinstance(v, six.binary_type):
v = v.decode('iso-8859-1')
ret.append((k, v))
return ret
# ============================================================================
class Digester(object):
def __init__(self, type_='sha1'):
self.type_ = type_
self.digester = hashlib.new(type_)
def update(self, buff):
self.digester.update(buff)
def __str__(self):
return self.type_ + ':' + to_native_str(base64.b32encode(self.digester.digest()))
#=============================================================================
sys_open = open
def open(filename, mode='r', **kwargs): #pragma: no cover
"""
open() which supports exclusive mode 'x' in python < 3.3
"""
if six.PY3 or 'x' not in mode:
return sys_open(filename, mode, **kwargs)
flags = os.O_EXCL | os.O_CREAT | os.O_WRONLY
if 'b' in mode and hasattr(os, 'O_BINARY'):
flags |= os.O_BINARY
fd = os.open(filename, flags)
mode = mode.replace('x', 'w')
return os.fdopen(fd, mode, 0x664)
| 2,745
| 25.921569
| 89
|
py
|
warcio
|
warcio-master/warcio/recordbuilder.py
|
import datetime
import six
import tempfile
from io import BytesIO
from warcio.recordloader import ArcWarcRecord, ArcWarcRecordLoader
from warcio.statusandheaders import StatusAndHeadersParser, StatusAndHeaders
from warcio.timeutils import datetime_to_iso_date
from warcio.utils import to_native_str, BUFF_SIZE, Digester
#=================================================================
class RecordBuilder(object):
REVISIT_PROFILE = 'http://netpreserve.org/warc/1.0/revisit/identical-payload-digest'
REVISIT_PROFILE_1_1 = 'http://netpreserve.org/warc/1.1/revisit/identical-payload-digest'
WARC_1_0 = 'WARC/1.0'
WARC_1_1 = 'WARC/1.1'
# default warc version
WARC_VERSION = WARC_1_0
WARC_RECORDS = {'warcinfo': 'application/warc-fields',
'response': 'application/http; msgtype=response',
'revisit': 'application/http; msgtype=response',
'request': 'application/http; msgtype=request',
'metadata': 'application/warc-fields',
}
NO_PAYLOAD_DIGEST_TYPES = ('warcinfo', 'revisit')
def __init__(self, warc_version=None, header_filter=None):
self.warc_version = self._parse_warc_version(warc_version)
self.header_filter = header_filter
def create_warcinfo_record(self, filename, info):
warc_headers = StatusAndHeaders('', [], protocol=self.warc_version)
warc_headers.add_header('WARC-Type', 'warcinfo')
warc_headers.add_header('WARC-Record-ID', self._make_warc_id())
if filename:
warc_headers.add_header('WARC-Filename', filename)
warc_headers.add_header('WARC-Date', self.curr_warc_date())
warcinfo = BytesIO()
for name, value in six.iteritems(info):
if not value:
continue
line = name + ': ' + str(value) + '\r\n'
warcinfo.write(line.encode('utf-8'))
length = warcinfo.tell()
warcinfo.seek(0)
return self.create_warc_record('', 'warcinfo',
warc_headers=warc_headers,
payload=warcinfo,
length=length)
def create_revisit_record(self, uri, digest, refers_to_uri, refers_to_date,
http_headers=None, warc_headers_dict=None):
assert digest, 'Digest can not be empty'
if warc_headers_dict is None:
warc_headers_dict = dict()
record = self.create_warc_record(uri, 'revisit', http_headers=http_headers,
warc_headers_dict=warc_headers_dict)
revisit_profile = self.REVISIT_PROFILE_1_1 if self.warc_version == self.WARC_1_1 else self.REVISIT_PROFILE
record.rec_headers.add_header('WARC-Profile', revisit_profile)
record.rec_headers.add_header('WARC-Refers-To-Target-URI', refers_to_uri)
record.rec_headers.add_header('WARC-Refers-To-Date', refers_to_date)
record.rec_headers.add_header('WARC-Payload-Digest', digest)
return record
def create_warc_record(self, uri, record_type,
payload=None,
length=None,
warc_content_type='',
warc_headers_dict=None,
warc_headers=None,
http_headers=None):
if warc_headers_dict is None:
warc_headers_dict = dict()
if payload and not http_headers:
loader = ArcWarcRecordLoader()
http_headers = loader.load_http_headers(record_type, uri, payload, length)
if http_headers and length is not None:
length -= payload.tell()
if not payload:
payload = BytesIO()
length = 0
if not warc_headers:
warc_headers = self._init_warc_headers(uri, record_type, warc_headers_dict)
# compute Content-Type
if not warc_content_type:
warc_content_type = warc_headers.get_header('Content-Type')
if not warc_content_type:
warc_content_type = self.WARC_RECORDS.get(record_type,
'application/warc-record')
record = ArcWarcRecord('warc', record_type, warc_headers, payload,
http_headers, warc_content_type, length)
record.payload_length = length
self.ensure_digest(record, block=False, payload=True)
return record
def _init_warc_headers(self, uri, record_type, warc_headers_dict):
warc_headers = StatusAndHeaders('', list(warc_headers_dict.items()), protocol=self.warc_version)
warc_headers.replace_header('WARC-Type', record_type)
if not warc_headers.get_header('WARC-Record-ID'):
warc_headers.add_header('WARC-Record-ID', self._make_warc_id())
if uri:
warc_headers.replace_header('WARC-Target-URI', uri)
if not warc_headers.get_header('WARC-Date'):
warc_headers.add_header('WARC-Date', self.curr_warc_date())
return warc_headers
def curr_warc_date(self):
use_micros = (self.warc_version >= self.WARC_1_1)
return self._make_warc_date(use_micros=use_micros)
def _parse_warc_version(self, version):
if not version:
return self.WARC_VERSION
version = str(version)
if version.startswith('WARC/'):
return version
return 'WARC/' + version
@classmethod
def _make_warc_id(cls):
return StatusAndHeadersParser.make_warc_id()
@classmethod
def _make_warc_date(cls, use_micros=False):
return datetime_to_iso_date(datetime.datetime.utcnow(), use_micros=use_micros)
def ensure_digest(self, record, block=True, payload=True):
if block:
if record.rec_headers.get_header('WARC-Block-Digest'):
block = False
if payload:
if (record.rec_headers.get_header('WARC-Payload-Digest') or
(record.rec_type in self.NO_PAYLOAD_DIGEST_TYPES)):
payload = False
block_digester = self._create_digester() if block else None
payload_digester = self._create_digester() if payload else None
has_length = (record.length is not None)
if not block_digester and not payload_digester and has_length:
return
temp_file = None
try:
# force buffering if no length is set
assert(has_length)
pos = record.raw_stream.tell()
record.raw_stream.seek(pos)
except:
pos = 0
temp_file = self._create_temp_file()
if block_digester and record.http_headers:
if not record.http_headers.headers_buff:
record.http_headers.compute_headers_buffer(self.header_filter)
block_digester.update(record.http_headers.headers_buff)
for buf in self._iter_stream(record.raw_stream):
if block_digester:
block_digester.update(buf)
if payload_digester:
payload_digester.update(buf)
if temp_file:
temp_file.write(buf)
if temp_file:
record.payload_length = temp_file.tell()
temp_file.seek(0)
record._orig_stream = record.raw_stream
record.raw_stream = temp_file
else:
record.raw_stream.seek(pos)
if payload_digester:
record.rec_headers.add_header('WARC-Payload-Digest', str(payload_digester))
if block_digester:
record.rec_headers.add_header('WARC-Block-Digest', str(block_digester))
@staticmethod
def _iter_stream(stream):
while True:
buf = stream.read(BUFF_SIZE)
if not buf:
return
yield buf
@staticmethod
def _create_digester():
return Digester('sha1')
@staticmethod
def _create_temp_file():
return tempfile.SpooledTemporaryFile(max_size=512*1024)
| 8,105
| 34.090909
| 114
|
py
|
warcio
|
warcio-master/warcio/warcwriter.py
|
import zlib
from socket import gethostname
from warcio.utils import Digester
from warcio.recordbuilder import RecordBuilder
from warcio.statusandheaders import StatusAndHeadersParser
# ============================================================================
class BaseWARCWriter(RecordBuilder):
def __init__(self, gzip=True, *args, **kwargs):
super(BaseWARCWriter, self).__init__(warc_version=kwargs.get('warc_version'),
header_filter=kwargs.get('header_filter'))
self.gzip = gzip
self.hostname = gethostname()
self.parser = StatusAndHeadersParser([], verify=False)
def write_request_response_pair(self, req, resp, params=None):
url = resp.rec_headers.get_header('WARC-Target-URI')
dt = resp.rec_headers.get_header('WARC-Date')
req.rec_headers.replace_header('WARC-Target-URI', url)
req.rec_headers.replace_header('WARC-Date', dt)
resp_id = resp.rec_headers.get_header('WARC-Record-ID')
if resp_id:
req.rec_headers.add_header('WARC-Concurrent-To', resp_id)
self._do_write_req_resp(req, resp, params)
def write_record(self, record, params=None): #pragma: no cover
raise NotImplemented()
def _do_write_req_resp(self, req, resp, params): #pragma: no cover
raise NotImplemented()
def _write_warc_record(self, out, record):
if self.gzip:
out = GzippingWrapper(out)
if record.http_headers:
record.http_headers.compute_headers_buffer(self.header_filter)
# Content-Length is None/unknown
# Fix record by: buffering and recomputing all digests and length
# (since no length, can't trust existing digests)
# Also remove content-type for consistent header ordering
if record.length is None:
record.rec_headers.remove_header('WARC-Block-Digest')
if record.rec_type != 'revisit':
record.rec_headers.remove_header('WARC-Payload-Digest')
record.rec_headers.remove_header('Content-Type')
self.ensure_digest(record, block=True, payload=True)
record.length = record.payload_length
# ensure digests are set
else:
self.ensure_digest(record, block=True, payload=True)
if record.content_type != None:
# ensure proper content type
record.rec_headers.replace_header('Content-Type', record.content_type)
if record.rec_type == 'revisit':
http_headers_only = True
else:
http_headers_only = False
# compute Content-Length
if record.http_headers and record.payload_length >= 0:
actual_len = 0
if record.http_headers:
actual_len = len(record.http_headers.headers_buff)
if not http_headers_only:
actual_len += record.payload_length
record.length = actual_len
record.rec_headers.replace_header('Content-Length', str(record.length))
# write record headers -- encoded as utf-8
# WARC headers can be utf-8 per spec
out.write(record.rec_headers.to_bytes(encoding='utf-8'))
# write headers buffer, if any
if record.http_headers:
out.write(record.http_headers.headers_buff)
if not http_headers_only:
try:
for buf in self._iter_stream(record.raw_stream):
out.write(buf)
finally:
if hasattr(record, '_orig_stream'):
record.raw_stream.close()
record.raw_stream = record._orig_stream
# add two lines
out.write(b'\r\n\r\n')
out.flush()
# ============================================================================
class GzippingWrapper(object):
def __init__(self, out):
self.compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS + 16)
self.out = out
def write(self, buff):
#if isinstance(buff, str):
# buff = buff.encode('utf-8')
buff = self.compressor.compress(buff)
self.out.write(buff)
def flush(self):
buff = self.compressor.flush()
self.out.write(buff)
self.out.flush()
# ============================================================================
class WARCWriter(BaseWARCWriter):
def __init__(self, filebuf, *args, **kwargs):
super(WARCWriter, self).__init__(*args, **kwargs)
self.out = filebuf
def write_record(self, record, params=None):
self._write_warc_record(self.out, record)
def _do_write_req_resp(self, req, resp, params):
self._write_warc_record(self.out, resp)
self._write_warc_record(self.out, req)
# ============================================================================
class BufferWARCWriter(WARCWriter):
def __init__(self, *args, **kwargs):
out = self._create_temp_file()
super(BufferWARCWriter, self).__init__(out, *args, **kwargs)
def get_contents(self):
pos = self.out.tell()
self.out.seek(0)
buff = self.out.read()
self.out.seek(pos)
return buff
def get_stream(self):
self.out.seek(0)
return self.out
| 5,316
| 31.820988
| 87
|
py
|
warcio
|
warcio-master/warcio/timeutils.py
|
"""
utility functions for converting between
datetime, iso date and 14-digit timestamp
"""
import re
import time
import datetime
import calendar
from email.utils import parsedate, formatdate
#=================================================================
# str <-> datetime conversion
#=================================================================
DATE_TIMESPLIT = re.compile(r'[^\d]')
TIMESTAMP_14 = '%Y%m%d%H%M%S'
ISO_DT = '%Y-%m-%dT%H:%M:%SZ'
PAD_14_DOWN = '10000101000000'
PAD_14_UP = '29991231235959'
PAD_6_UP = '299912'
PAD_MICRO = '000000'
def iso_date_to_datetime(string):
"""
>>> iso_date_to_datetime('2013-12-26T10:11:12Z')
datetime.datetime(2013, 12, 26, 10, 11, 12)
>>> iso_date_to_datetime('2013-12-26T10:11:12.456789Z')
datetime.datetime(2013, 12, 26, 10, 11, 12, 456789)
>>> iso_date_to_datetime('2013-12-26T10:11:12.30Z')
datetime.datetime(2013, 12, 26, 10, 11, 12, 300000)
>>> iso_date_to_datetime('2013-12-26T10:11:12.00001Z')
datetime.datetime(2013, 12, 26, 10, 11, 12, 10)
>>> iso_date_to_datetime('2013-12-26T10:11:12.000001Z')
datetime.datetime(2013, 12, 26, 10, 11, 12, 1)
>>> iso_date_to_datetime('2013-12-26T10:11:12.0000001Z')
datetime.datetime(2013, 12, 26, 10, 11, 12)
>>> iso_date_to_datetime('2013-12-26T10:11:12.000000Z')
datetime.datetime(2013, 12, 26, 10, 11, 12)
"""
nums = DATE_TIMESPLIT.split(string)
if nums[-1] == '':
nums = nums[:-1]
if len(nums) == 7:
nums[6] = nums[6][:6]
nums[6] += PAD_MICRO[len(nums[6]):]
the_datetime = datetime.datetime(*(int(num) for num in nums))
return the_datetime
def http_date_to_datetime(string):
"""
>>> http_date_to_datetime('Thu, 26 Dec 2013 09:50:10 GMT')
datetime.datetime(2013, 12, 26, 9, 50, 10)
"""
return datetime.datetime(*parsedate(string)[:6])
def datetime_to_http_date(the_datetime):
"""
>>> datetime_to_http_date(datetime.datetime(2013, 12, 26, 9, 50, 10))
'Thu, 26 Dec 2013 09:50:10 GMT'
# Verify inverses
>>> x = 'Thu, 26 Dec 2013 09:50:10 GMT'
>>> datetime_to_http_date(http_date_to_datetime(x)) == x
True
"""
timeval = calendar.timegm(the_datetime.utctimetuple())
return formatdate(timeval=timeval,
localtime=False,
usegmt=True)
def datetime_to_iso_date(the_datetime, use_micros=False):
"""
>>> datetime_to_iso_date(datetime.datetime(2013, 12, 26, 10, 11, 12))
'2013-12-26T10:11:12Z'
>>> datetime_to_iso_date(datetime.datetime(2013, 12, 26, 10, 11, 12, 456789))
'2013-12-26T10:11:12Z'
>>> datetime_to_iso_date(datetime.datetime(2013, 12, 26, 10, 11, 12), use_micros=True)
'2013-12-26T10:11:12Z'
>>> datetime_to_iso_date(datetime.datetime(2013, 12, 26, 10, 11, 12, 456789), use_micros=True)
'2013-12-26T10:11:12.456789Z'
>>> datetime_to_iso_date(datetime.datetime(2013, 12, 26, 10, 11, 12, 1), use_micros=True)
'2013-12-26T10:11:12.000001Z'
"""
if not use_micros:
return the_datetime.strftime(ISO_DT)
else:
return the_datetime.isoformat() + 'Z'
def datetime_to_timestamp(the_datetime):
"""
>>> datetime_to_timestamp(datetime.datetime(2013, 12, 26, 10, 11, 12))
'20131226101112'
"""
return the_datetime.strftime(TIMESTAMP_14)
def timestamp_now():
"""
>>> len(timestamp_now())
14
"""
return datetime_to_timestamp(datetime.datetime.utcnow())
def timestamp20_now():
"""
Create 20-digit timestamp, useful to timestamping temp files
>>> n = timestamp20_now()
>>> timestamp20_now() >= n
True
>>> len(n)
20
"""
now = datetime.datetime.utcnow()
return now.strftime('%Y%m%d%H%M%S%f')
def iso_date_to_timestamp(string):
"""
>>> iso_date_to_timestamp('2013-12-26T10:11:12Z')
'20131226101112'
>>> iso_date_to_timestamp('2013-12-26T10:11:12')
'20131226101112'
"""
return datetime_to_timestamp(iso_date_to_datetime(string))
def timestamp_to_iso_date(string):
"""
>>> timestamp_to_iso_date('20131226101112')
'2013-12-26T10:11:12Z'
>>> timestamp_to_iso_date('20131226101112')
'2013-12-26T10:11:12Z'
"""
return datetime_to_iso_date(timestamp_to_datetime(string))
def http_date_to_timestamp(string):
"""
>>> http_date_to_timestamp('Thu, 26 Dec 2013 09:50:00 GMT')
'20131226095000'
>>> http_date_to_timestamp('Sun, 26 Jan 2014 20:08:04 GMT')
'20140126200804'
"""
return datetime_to_timestamp(http_date_to_datetime(string))
# pad to certain length (default 6)
def pad_timestamp(string, pad_str=PAD_6_UP):
"""
>>> pad_timestamp('20')
'209912'
>>> pad_timestamp('2014')
'201412'
>>> pad_timestamp('20141011')
'20141011'
>>> pad_timestamp('201410110010')
'201410110010'
"""
str_len = len(string)
pad_len = len(pad_str)
if str_len < pad_len:
string = string + pad_str[str_len:]
return string
def timestamp_to_datetime(string):
"""
# >14-digit -- rest ignored
>>> timestamp_to_datetime('2014122609501011')
datetime.datetime(2014, 12, 26, 9, 50, 10)
# 14-digit
>>> timestamp_to_datetime('20141226095010')
datetime.datetime(2014, 12, 26, 9, 50, 10)
# 13-digit padding
>>> timestamp_to_datetime('2014122609501')
datetime.datetime(2014, 12, 26, 9, 50, 59)
# 12-digit padding
>>> timestamp_to_datetime('201412260950')
datetime.datetime(2014, 12, 26, 9, 50, 59)
# 11-digit padding
>>> timestamp_to_datetime('20141226095')
datetime.datetime(2014, 12, 26, 9, 59, 59)
# 10-digit padding
>>> timestamp_to_datetime('2014122609')
datetime.datetime(2014, 12, 26, 9, 59, 59)
# 9-digit padding
>>> timestamp_to_datetime('201412260')
datetime.datetime(2014, 12, 26, 23, 59, 59)
# 8-digit padding
>>> timestamp_to_datetime('20141226')
datetime.datetime(2014, 12, 26, 23, 59, 59)
# 7-digit padding
>>> timestamp_to_datetime('2014122')
datetime.datetime(2014, 12, 31, 23, 59, 59)
# 6-digit padding
>>> timestamp_to_datetime('201410')
datetime.datetime(2014, 10, 31, 23, 59, 59)
# 5-digit padding
>>> timestamp_to_datetime('20141')
datetime.datetime(2014, 12, 31, 23, 59, 59)
# 4-digit padding
>>> timestamp_to_datetime('2014')
datetime.datetime(2014, 12, 31, 23, 59, 59)
# 3-digit padding
>>> timestamp_to_datetime('201')
datetime.datetime(2019, 12, 31, 23, 59, 59)
# 2-digit padding
>>> timestamp_to_datetime('20')
datetime.datetime(2099, 12, 31, 23, 59, 59)
# 1-digit padding
>>> timestamp_to_datetime('2')
datetime.datetime(2999, 12, 31, 23, 59, 59)
# 1-digit out-of-range padding
>>> timestamp_to_datetime('3')
datetime.datetime(2999, 12, 31, 23, 59, 59)
# 0-digit padding
>>> timestamp_to_datetime('')
datetime.datetime(2999, 12, 31, 23, 59, 59)
# bad month
>>> timestamp_to_datetime('20131709005601')
datetime.datetime(2013, 12, 9, 0, 56, 1)
# all out of range except minutes
>>> timestamp_to_datetime('40001965252477')
datetime.datetime(2999, 12, 31, 23, 24, 59)
# not a number!
>>> timestamp_to_datetime('2010abc')
datetime.datetime(2010, 12, 31, 23, 59, 59)
"""
# pad to 6 digits
string = pad_timestamp(string, PAD_6_UP)
def clamp(val, min_, max_):
try:
val = int(val)
val = max(min_, min(val, max_))
return val
except:
return max_
def extract(string, start, end, min_, max_):
if len(string) >= end:
return clamp(string[start:end], min_, max_)
else:
return max_
# now parse, clamp to boundary
year = extract(string, 0, 4, 1900, 2999)
month = extract(string, 4, 6, 1, 12)
day = extract(string, 6, 8, 1, calendar.monthrange(year, month)[1])
hour = extract(string, 8, 10, 0, 23)
minute = extract(string, 10, 12, 0, 59)
second = extract(string, 12, 14, 0, 59)
return datetime.datetime(year=year,
month=month,
day=day,
hour=hour,
minute=minute,
second=second)
#return time.strptime(pad_timestamp(string), TIMESTAMP_14)
def timestamp_to_sec(string):
"""
>>> timestamp_to_sec('20131226095010')
1388051410
# rounds to end of 2014
>>> timestamp_to_sec('2014')
1420070399
"""
return calendar.timegm(timestamp_to_datetime(string).utctimetuple())
def sec_to_timestamp(secs):
"""
>>> sec_to_timestamp(1388051410)
'20131226095010'
>>> sec_to_timestamp(1420070399)
'20141231235959'
"""
return datetime_to_timestamp(datetime.datetime.utcfromtimestamp(secs))
def timestamp_to_http_date(string):
"""
>>> timestamp_to_http_date('20131226095000')
'Thu, 26 Dec 2013 09:50:00 GMT'
>>> timestamp_to_http_date('20140126200804')
'Sun, 26 Jan 2014 20:08:04 GMT'
"""
return datetime_to_http_date(timestamp_to_datetime(string))
if __name__ == "__main__": #pragma: no cover
import doctest
doctest.testmod()
| 9,303
| 24.56044
| 98
|
py
|
warcio
|
warcio-master/warcio/digestverifyingreader.py
|
import base64
import sys
from warcio.limitreader import LimitReader
from warcio.utils import to_native_str, Digester
from warcio.exceptions import ArchiveLoadFailed
# ============================================================================
class DigestChecker(object):
def __init__(self, kind=None):
self._problem = []
self._passed = None
self.kind = kind
@property
def passed(self):
return self._passed
@passed.setter
def passed(self, value):
self._passed = value
@property
def problems(self):
return self._problem
def problem(self, value, passed=False):
self._problem.append(value)
if self.kind == 'raise':
raise ArchiveLoadFailed(value)
if self.kind == 'log':
sys.stderr.write(value + '\n')
self._passed = passed
# ============================================================================
class DigestVerifyingReader(LimitReader):
"""
A reader which verifies the digest of the wrapped reader
"""
def __init__(self, stream, limit, digest_checker, record_type=None,
payload_digest=None, block_digest=None, segment_number=None):
super(DigestVerifyingReader, self).__init__(stream, limit)
self.digest_checker = digest_checker
if record_type == 'revisit':
block_digest = None
payload_digest = None
if segment_number is not None: #pragma: no cover
payload_digest = None
self.payload_digest = payload_digest
self.block_digest = block_digest
self.payload_digester = None
self.payload_digester_obj = None
self.block_digester = None
if block_digest:
try:
algo, _ = _parse_digest(block_digest)
self.block_digester = Digester(algo)
except ValueError:
self.digest_checker.problem('unknown hash algorithm name in block digest')
self.block_digester = None
if payload_digest:
try:
algo, _ = _parse_digest(self.payload_digest)
self.payload_digester_obj = Digester(algo)
except ValueError:
self.digest_checker.problem('unknown hash algorithm name in payload digest')
self.payload_digester_obj = None
def begin_payload(self):
self.payload_digester = self.payload_digester_obj
if self.limit == 0:
check = _compare_digest_rfc_3548(self.payload_digester, self.payload_digest)
if check is False:
self.digest_checker.problem('payload digest failed: {}'.format(self.payload_digest))
self.payload_digester = None # prevent double-fire
elif check is True and self.digest_checker.passed is not False:
self.digest_checker.passed = True
def _update(self, buff):
super(DigestVerifyingReader, self)._update(buff)
if self.payload_digester:
self.payload_digester.update(buff)
if self.block_digester:
self.block_digester.update(buff)
if self.limit == 0:
check = _compare_digest_rfc_3548(self.block_digester, self.block_digest)
if check is False:
self.digest_checker.problem('block digest failed: {}'.format(self.block_digest))
elif check is True and self.digest_checker.passed is not False:
self.digest_checker.passed = True
check = _compare_digest_rfc_3548(self.payload_digester, self.payload_digest)
if check is False:
self.digest_checker.problem('payload digest failed {}'.format(self.payload_digest))
elif check is True and self.digest_checker.passed is not False:
self.digest_checker.passed = True
return buff
def _compare_digest_rfc_3548(digester, digest):
'''
The WARC standard does not recommend a digest algorithm and appears to
allow any encoding from RFC3548. The Python base64 module supports
RFC3548 although the base64 alternate alphabet is not exactly a first
class citizen. Hopefully digest algos are named with the same names
used by OpenSSL.
'''
if not digester or not digest:
return None
digester_b32 = str(digester)
our_algo, our_value = _parse_digest(digester_b32)
warc_algo, warc_value = _parse_digest(digest)
warc_b32 = _to_b32(len(our_value), warc_value)
if our_value == warc_b32:
return True
return False
def _to_b32(length, value):
'''
Convert value to base 32, given that it's supposed to have the same
length as the digest we're about to compare it to
'''
if len(value) == length:
return value # casefold needed here? -- rfc recommends not allowing
if len(value) > length:
binary = base64.b16decode(value, casefold=True)
else:
binary = _b64_wrapper(value)
return to_native_str(base64.b32encode(binary), encoding='ascii')
base64_url_filename_safe_alt = b'-_'
def _b64_wrapper(value):
if '-' in value or '_' in value:
return base64.b64decode(value, altchars=base64_url_filename_safe_alt)
else:
return base64.b64decode(value)
def _parse_digest(digest):
algo, sep, value = digest.partition(':')
if sep == ':':
return algo, value
else:
raise ValueError('could not parse digest algorithm out of '+digest)
| 5,500
| 31.94012
| 100
|
py
|
warcio
|
warcio-master/warcio/cli.py
|
from argparse import ArgumentParser, RawTextHelpFormatter
from warcio.indexer import Indexer
from warcio.checker import Checker
from warcio.extractor import Extractor
from warcio.recompressor import Recompressor
import sys
# ============================================================================
def main(args=None):
parser = ArgumentParser(description='warcio utils',
formatter_class=RawTextHelpFormatter)
parser.add_argument('-V', '--version', action='version', version=get_version())
subparsers = parser.add_subparsers(dest='cmd')
subparsers.required = True
index = subparsers.add_parser('index', help='WARC/ARC Indexer')
index.add_argument('inputs', nargs='*', help='input file(s); default is stdin')
index.add_argument('-f', '--fields', default='offset,warc-type,warc-target-uri',
help='fields to include in json output; supported values are "offset", '
'"length", "filename", "http:status", "http:{http-header}" '
'(arbitrary http header), and "{warc-header}" (arbitrary warc '
'record header)')
index.add_argument('-o', '--output', help='output file; default is stdout')
index.set_defaults(func=indexer)
recompress = subparsers.add_parser('recompress', help='Recompress an existing WARC or ARC',
description='Read an existing, possibly broken WARC ' +
'and correctly recompress it to fix any compression errors\n' +
'Also convert any ARC file to a standard compressed WARC file')
recompress.add_argument('filename')
recompress.add_argument('output')
recompress.add_argument('-v', '--verbose', action='store_true')
recompress.set_defaults(func=recompressor)
extract = subparsers.add_parser('extract', help='Extract WARC/ARC Record')
extract.add_argument('filename')
extract.add_argument('offset')
group = extract.add_mutually_exclusive_group()
group.add_argument('--payload', action='store_true', help='output only record payload (after content and transfer decoding, if applicable)')
group.add_argument('--headers', action='store_true', help='output only record headers (and http headers, if applicable)')
extract.set_defaults(func=extractor)
check = subparsers.add_parser('check', help='WARC digest checker')
check.add_argument('inputs', nargs='+')
check.add_argument('-v', '--verbose', action='store_true')
check.set_defaults(func=checker)
cmd = parser.parse_args(args=args)
cmd.func(cmd)
# ============================================================================
def get_version():
import pkg_resources
return '%(prog)s ' + pkg_resources.get_distribution('warcio').version
# ============================================================================
def indexer(cmd):
inputs = cmd.inputs or ('-',) # default to stdin
_indexer = Indexer(cmd.fields, inputs, cmd.output)
_indexer.process_all()
# ============================================================================
def checker(cmd):
_checker = Checker(cmd)
sys.exit(_checker.process_all())
# ============================================================================
def extractor(cmd):
_extractor = Extractor(cmd.filename, cmd.offset)
_extractor.extract(cmd.payload, cmd.headers)
# ============================================================================
def recompressor(cmd):
_recompressor = Recompressor(cmd.filename, cmd.output, cmd.verbose)
_recompressor.recompress()
# ============================================================================
if __name__ == "__main__": #pragma: no cover
main()
| 3,803
| 39.903226
| 144
|
py
|
warcio
|
warcio-master/warcio/extractor.py
|
from warcio.archiveiterator import ArchiveIterator
from warcio.utils import BUFF_SIZE
import sys
# ============================================================================
class Extractor(object):
READ_SIZE = BUFF_SIZE * 4
def __init__(self, filename, offset):
self.filename = filename
self.offset = offset
def extract(self, payload_only, headers_only):
with open(self.filename, 'rb') as fh:
fh.seek(int(self.offset))
it = iter(ArchiveIterator(fh))
record = next(it)
try:
stdout_raw = sys.stdout.buffer
except AttributeError: #pragma: no cover
stdout_raw = sys.stdout
if payload_only:
stream = record.content_stream()
buf = stream.read(self.READ_SIZE)
while buf:
stdout_raw.write(buf)
buf = stream.read(self.READ_SIZE)
else:
stdout_raw.write(record.rec_headers.to_bytes())
if record.http_headers:
stdout_raw.write(record.http_headers.to_bytes())
if not headers_only:
buf = record.raw_stream.read(self.READ_SIZE)
while buf:
stdout_raw.write(buf)
buf = record.raw_stream.read(self.READ_SIZE)
| 1,400
| 31.581395
| 78
|
py
|
warcio
|
warcio-master/warcio/recordloader.py
|
from warcio.statusandheaders import StatusAndHeaders
from warcio.statusandheaders import StatusAndHeadersParser
from warcio.statusandheaders import StatusAndHeadersParserException
from warcio.exceptions import ArchiveLoadFailed
from warcio.limitreader import LimitReader
from warcio.digestverifyingreader import DigestVerifyingReader, DigestChecker
from warcio.bufferedreaders import BufferedReader, ChunkedDataReader
from warcio.timeutils import timestamp_to_iso_date
from six.moves import zip
import logging
logger = logging.getLogger(__name__)
#=================================================================
class ArcWarcRecord(object):
def __init__(self, *args, **kwargs):
(self.format, self.rec_type, self.rec_headers, self.raw_stream,
self.http_headers, self.content_type, self.length) = args
self.payload_length = kwargs.get('payload_length', -1)
self.digest_checker = kwargs.get('digest_checker')
def content_stream(self):
if not self.http_headers:
return self.raw_stream
encoding = self.http_headers.get_header('content-encoding')
if encoding:
encoding = encoding.lower()
if encoding not in BufferedReader.get_supported_decompressors():
encoding = None
if self.http_headers.get_header('transfer-encoding') == 'chunked':
return ChunkedDataReader(self.raw_stream, decomp_type=encoding)
elif encoding:
return BufferedReader(self.raw_stream, decomp_type=encoding)
else:
return self.raw_stream
#=================================================================
class ArcWarcRecordLoader(object):
WARC_TYPES = ['WARC/1.1', 'WARC/1.0', 'WARC/0.17', 'WARC/0.18']
HTTP_TYPES = ['HTTP/1.0', 'HTTP/1.1']
HTTP_VERBS = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE',
'OPTIONS', 'CONNECT', 'PATCH']
HTTP_RECORDS = ('response', 'request', 'revisit')
NON_HTTP_SCHEMES = ('dns:', 'whois:', 'ntp:')
HTTP_SCHEMES = ('http:', 'https:')
def __init__(self, verify_http=True, arc2warc=True):
if arc2warc:
self.arc_parser = ARC2WARCHeadersParser()
else:
self.arc_parser = ARCHeadersParser()
self.warc_parser = StatusAndHeadersParser(self.WARC_TYPES)
self.http_parser = StatusAndHeadersParser(self.HTTP_TYPES, verify_http)
self.http_req_parser = StatusAndHeadersParser(self.HTTP_VERBS, verify_http)
def parse_record_stream(self, stream,
statusline=None,
known_format=None,
no_record_parse=False,
ensure_http_headers=False,
check_digests=False):
""" Parse file-like stream and return an ArcWarcRecord
encapsulating the record headers, http headers (if any),
and a stream limited to the remainder of the record.
Pass statusline and known_format to detect_type_loader_headers()
to faciliate parsing.
"""
(the_format, rec_headers) = (self.
_detect_type_load_headers(stream,
statusline,
known_format))
if the_format == 'arc':
uri = rec_headers.get_header('uri')
length = rec_headers.get_header('length')
content_type = rec_headers.get_header('content-type')
sub_len = rec_headers.total_len
if uri and uri.startswith('filedesc://'):
rec_type = 'arc_header'
else:
rec_type = 'response'
elif the_format in ('warc', 'arc2warc'):
rec_type = rec_headers.get_header('WARC-Type')
uri = self._ensure_target_uri_format(rec_headers)
length = rec_headers.get_header('Content-Length')
content_type = rec_headers.get_header('Content-Type')
if the_format == 'warc':
sub_len = 0
else:
sub_len = rec_headers.total_len
the_format = 'warc'
is_err = False
try:
if length is not None:
length = int(length) - sub_len
if length < 0:
is_err = True
except (ValueError, TypeError):
is_err = True
# err condition
if is_err:
length = 0
is_verifying = False
digest_checker = DigestChecker(check_digests)
# limit stream to the length for all valid records
if length is not None and length >= 0:
stream = LimitReader.wrap_stream(stream, length)
if check_digests:
stream, is_verifying = self.wrap_digest_verifying_stream(stream, rec_type,
rec_headers, digest_checker,
length=length)
http_headers = None
payload_length = -1
# load http headers if parsing
if not no_record_parse:
start = stream.tell()
http_headers = self.load_http_headers(rec_type, uri, stream, length)
if length and http_headers:
payload_length = length - (stream.tell() - start)
# generate validate http headers (eg. for replay)
if not http_headers and ensure_http_headers:
http_headers = self.default_http_headers(length, content_type)
if is_verifying:
stream.begin_payload()
return ArcWarcRecord(the_format, rec_type,
rec_headers, stream, http_headers,
content_type, length, payload_length=payload_length, digest_checker=digest_checker)
def wrap_digest_verifying_stream(self, stream, rec_type, rec_headers, digest_checker, length=None):
payload_digest = rec_headers.get_header('WARC-Payload-Digest')
block_digest = rec_headers.get_header('WARC-Block-Digest')
segment_number = rec_headers.get_header('WARC-Segment-Number')
if not payload_digest and not block_digest:
return stream, False
stream = DigestVerifyingReader(stream, length, digest_checker,
record_type=rec_type,
payload_digest=payload_digest,
block_digest=block_digest,
segment_number=segment_number)
return stream, True
def load_http_headers(self, rec_type, uri, stream, length):
# only if length == 0 don't parse
# try parsing is length is unknown (length is None) or length > 0
if length == 0:
return None
# only certain record types can have http headers
if rec_type not in self.HTTP_RECORDS:
return None
# only http:/https: uris can have http headers
if not uri.startswith(self.HTTP_SCHEMES):
return None
# request record: parse request
if rec_type == 'request':
return self.http_req_parser.parse(stream)
elif rec_type == 'revisit':
try:
return self.http_parser.parse(stream)
except EOFError:
# empty revisit with no http headers, is ok!
return None
# response record or non-empty revisit: parse HTTP status and headers!
else:
return self.http_parser.parse(stream)
def default_http_headers(self, length, content_type=None):
headers = []
if content_type:
headers.append(('Content-Type', content_type))
if length is not None and length >= 0:
headers.append(('Content-Length', str(length)))
return StatusAndHeaders('200 OK', headers=headers, protocol='HTTP/1.0')
def _detect_type_load_headers(self, stream,
statusline=None, known_format=None):
""" If known_format is specified ('warc' or 'arc'),
parse only as that format.
Otherwise, try parsing record as WARC, then try parsing as ARC.
if neither one succeeds, we're out of luck.
"""
if known_format != 'arc':
# try as warc first
try:
rec_headers = self.warc_parser.parse(stream, statusline)
return 'warc', rec_headers
except StatusAndHeadersParserException as se:
if known_format == 'warc':
msg = 'Invalid WARC record, first line: '
raise ArchiveLoadFailed(msg + str(se.statusline))
statusline = se.statusline
pass
# now try as arc
try:
rec_headers = self.arc_parser.parse(stream, statusline)
return self.arc_parser.get_rec_type(), rec_headers
except StatusAndHeadersParserException as se:
if known_format == 'arc':
msg = 'Invalid ARC record, first line: '
else:
msg = 'Unknown archive format, first line: '
raise ArchiveLoadFailed(msg + str(se.statusline))
def _ensure_target_uri_format(self, rec_headers):
"""Checks the value for the WARC-Target-URI header field to see if it starts
with '<' and ends with '>' (Wget 1.19 bug) and if '<' and '>' are present,
corrects and updates the field returning the corrected value for the field
otherwise just returns the fields value. Also checks for the presence of
spaces and percent-encodes them if present, for more reliable parsing
downstream.
:param StatusAndHeaders rec_headers: The parsed WARC headers
:return: The value for the WARC-Target-URI field
:rtype: str | None
"""
uri = rec_headers.get_header('WARC-Target-URI')
if uri is not None and uri.startswith('<') and uri.endswith('>'):
uri = uri[1:-1]
rec_headers.replace_header('WARC-Target-URI', uri)
if uri is not None and " " in uri:
logger.warning("Replacing spaces in invalid WARC-Target-URI: {}".format(uri))
uri = uri.replace(" ", "%20")
rec_headers.replace_header('WARC-Target-URI', uri)
return uri
#=================================================================
class ARCHeadersParser(object):
# ARC 1.0 headers
ARC_HEADERS = ["uri", "ip-address", "archive-date",
"content-type", "length"]
def __init__(self):
self.headernames = self.get_header_names()
def get_rec_type(self):
return 'arc'
def parse(self, stream, headerline=None):
total_read = 0
if headerline is None:
headerline = stream.readline()
headerline = StatusAndHeadersParser.decode_header(headerline)
header_len = len(headerline)
if header_len == 0:
raise EOFError()
headerline = headerline.rstrip()
headernames = self.headernames
# if arc header, consume next two lines
if headerline.startswith('filedesc://'):
version = StatusAndHeadersParser.decode_header(stream.readline()) # skip version
spec = StatusAndHeadersParser.decode_header(stream.readline()) # skip header spec, use preset one
total_read += len(version)
total_read += len(spec)
parts = headerline.rsplit(' ', len(headernames)-1)
if len(parts) != len(headernames):
msg = 'Wrong # of headers, expected arc headers {0}, Found {1}'
msg = msg.format(headernames, parts)
raise StatusAndHeadersParserException(msg, parts)
protocol, headers = self._get_protocol_and_headers(headerline, parts)
return StatusAndHeaders(statusline='',
headers=headers,
protocol='WARC/1.0',
total_len=total_read)
@classmethod
def get_header_names(cls):
return cls.ARC_HEADERS
def _get_protocol_and_headers(self, headerline, parts):
headers = []
for name, value in zip(self.headernames, parts):
headers.append((name, value))
return ('ARC/1.0', headers)
#=================================================================
class ARC2WARCHeadersParser(ARCHeadersParser):
# Headers for converting ARC -> WARC Header
ARC_TO_WARC_HEADERS = ["WARC-Target-URI",
"WARC-IP-Address",
"WARC-Date",
"Content-Type",
"Content-Length"]
def get_rec_type(self):
return 'arc2warc'
@classmethod
def get_header_names(cls):
return cls.ARC_TO_WARC_HEADERS
def _get_protocol_and_headers(self, headerline, parts):
headers = []
if headerline.startswith('filedesc://'):
rec_type = 'warcinfo'
else:
rec_type = 'response'
parts[3] = 'application/http;msgtype=response'
headers.append(('WARC-Type', rec_type))
headers.append(('WARC-Record-ID', StatusAndHeadersParser.make_warc_id()))
for name, value in zip(self.headernames, parts):
if name == 'WARC-Date':
value = timestamp_to_iso_date(value)
if rec_type == 'warcinfo' and name == 'WARC-Target-URI':
name = 'WARC-Filename'
value = value[len('filedesc://'):]
headers.append((name, value))
return ('WARC/1.0', headers)
| 13,804
| 35.715426
| 112
|
py
|
warcio
|
warcio-master/warcio/__init__.py
|
from warcio.statusandheaders import StatusAndHeaders
from warcio.archiveiterator import ArchiveIterator
from warcio.warcwriter import WARCWriter
| 145
| 35.5
| 52
|
py
|
warcio
|
warcio-master/warcio/recompressor.py
|
from warcio.archiveiterator import ArchiveIterator
from warcio.exceptions import ArchiveLoadFailed
from warcio.warcwriter import WARCWriter
from warcio.bufferedreaders import DecompressingBufferedReader
import tempfile
import shutil
import traceback
import sys
# ============================================================================
class Recompressor(object):
def __init__(self, filename, output, verbose=False):
self.filename = filename
self.output = output
self.verbose = verbose
def recompress(self):
from warcio.cli import main
try:
count = 0
msg = ''
with open(self.filename, 'rb') as stream:
try:
count = self.load_and_write(stream, self.output)
msg = 'No Errors Found!'
except Exception as e:
if self.verbose:
print('Parsing Error(s) Found:')
print(str(e) if isinstance(e, ArchiveLoadFailed) else repr(e))
print()
count = self.decompress_and_recompress(stream, self.output)
msg = 'Compression Errors Found and Fixed!'
if self.verbose:
print('Records successfully read and compressed:')
main(['index', self.output])
print('')
print('{0} records read and recompressed to file: {1}'.format(count, self.output))
print(msg)
except:
if self.verbose:
print('Exception Details:')
traceback.print_exc()
print('')
print('Recompress Failed: {0} could not be read as a WARC or ARC'.format(self.filename))
sys.exit(1)
def load_and_write(self, stream, output):
count = 0
with open(output, 'wb') as out:
writer = WARCWriter(filebuf=out, gzip=True)
for record in ArchiveIterator(stream,
no_record_parse=False,
arc2warc=True,
verify_http=False):
writer.write_record(record)
count += 1
return count
def decompress_and_recompress(self, stream, output):
with tempfile.TemporaryFile() as tout:
decomp = DecompressingBufferedReader(stream, read_all_members=True)
# decompress entire file to temp file
stream.seek(0)
shutil.copyfileobj(decomp, tout)
# attempt to compress and write temp
tout.seek(0)
return self.load_and_write(tout, output)
| 2,751
| 31.761905
| 100
|
py
|
warcio
|
warcio-master/warcio/capture_http.py
|
import threading
from io import BytesIO
from six.moves import http_client as httplib
from contextlib import contextmanager
from array import array
from warcio.utils import to_native_str, BUFF_SIZE, open
from warcio.warcwriter import WARCWriter, BufferWARCWriter
from tempfile import SpooledTemporaryFile
# ============================================================================
orig_connection = httplib.HTTPConnection
# ============================================================================
class RecordingStream(object):
def __init__(self, fp, recorder):
self.fp = fp
self.recorder = recorder
self.recorder.set_remote_ip(self._get_remote_ip())
def _get_remote_ip(self):
try:
fp = self.fp
# for python 3, need to get 'raw' fp
if hasattr(fp, 'raw'): #pragma: no cover
fp = fp.raw
socket = fp._sock
# wrapped ssl socket
if hasattr(socket, 'socket'):
socket = socket.socket
return socket.getpeername()[0]
except Exception: #pragma: no cover
return None
# Used in PY2 Only
def read(self, amt=None): #pragma: no cover
buff = self.fp.read(amt)
self.recorder.write_response(buff)
return buff
# Used in PY3 Only
def readinto(self, buff): #pragma: no cover
res = self.fp.readinto(buff)
self.recorder.write_response(buff)
return res
def readline(self, maxlen=-1):
line = self.fp.readline(maxlen)
self.recorder.write_response(line)
return line
def close(self):
self.recorder.done()
if self.fp:
return self.fp.close()
def flush(self):
return self.fp.flush()
# ============================================================================
class RecordingHTTPResponse(httplib.HTTPResponse):
def __init__(self, recorder, *args, **kwargs):
httplib.HTTPResponse.__init__(self, *args, **kwargs)
self.fp = RecordingStream(self.fp, recorder)
# ============================================================================
class RecordingHTTPConnection(httplib.HTTPConnection):
local = threading.local()
def __init__(self, *args, **kwargs):
orig_connection.__init__(self, *args, **kwargs)
if hasattr(self.local, 'recorder'):
self.recorder = self.local.recorder
else:
self.recorder = None
def make_recording_response(*args, **kwargs):
return RecordingHTTPResponse(self.recorder, *args, **kwargs)
if self.recorder:
self.response_class = make_recording_response
def send(self, data):
if not self.recorder:
orig_connection.send(self, data)
return
def send_request(buff):
self.recorder.extract_url(buff, self.host, self.port, self.default_port)
orig_connection.send(self, buff)
self.recorder.write_request(buff)
# if sending request body as stream
# (supported via httplib but seems unused via higher-level apis)
if hasattr(data, 'read') and not isinstance(data, array): #pragma: no cover
while True:
buff = data.read(BUFF_SIZE)
if not buff:
break
send_request(buff)
else:
send_request(data)
def _tunnel(self, *args, **kwargs):
if self.recorder:
self.recorder.start_tunnel()
return orig_connection._tunnel(self, *args, **kwargs)
def putrequest(self, *args, **kwargs):
if self.recorder:
self.recorder.start()
return orig_connection.putrequest(self, *args, **kwargs)
# ============================================================================
class RequestRecorder(object):
def __init__(self, writer, filter_func=None, record_ip=True):
self.writer = writer
self.filter_func = filter_func
self.request_out = None
self.response_out = None
self.url = None
self.connect_host = self.connect_port = None
self.started_req = False
self.first_line_read = False
self.lock = threading.Lock()
self.warc_headers = {}
self.record_ip = record_ip
def start_tunnel(self):
self.connect_host = self.connect_port = None
self.started_req = False
self.first_line_read = False
def start(self):
self.request_out = self._create_buffer()
self.response_out = self._create_buffer()
self.url = None
self.started_req = True
self.first_line_read = False
def _create_buffer(self):
return SpooledTemporaryFile(BUFF_SIZE)
def set_remote_ip(self, remote_ip):
if self.record_ip and remote_ip: #pragma: no cover
self.warc_headers['WARC-IP-Address'] = remote_ip
def write_request(self, buff):
if self.started_req:
self.request_out.write(buff)
def write_response(self, buff):
if self.started_req:
self.response_out.write(buff)
def _create_record(self, out, record_type):
length = out.tell()
out.seek(0)
return self.writer.create_warc_record(
warc_headers_dict=self.warc_headers,
uri=self.url,
record_type=record_type,
payload=out,
length=length)
def done(self):
if not self.started_req:
return
try:
request = self._create_record(self.request_out, 'request')
response = self._create_record(self.response_out, 'response')
if self.filter_func:
request, response = self.filter_func(request, response, self)
if not request or not response:
return
with self.lock:
self.writer.write_request_response_pair(request, response)
finally:
self.request_out.close()
self.response_out.close()
def extract_url(self, data, host, port, default_port):
if self.first_line_read:
return
self.first_line_read = True
buff = BytesIO(data)
line = to_native_str(buff.readline(), 'latin-1')
parts = line.split(' ', 2)
verb = parts[0]
path = parts[1]
if verb == "CONNECT":
parts = path.split(":", 1)
self.connect_host = parts[0]
self.connect_port = int(parts[1]) if len(parts) > 1 else default_port
self.warc_headers['WARC-Proxy-Host'] = "https://{0}:{1}".format(host, port)
return
if self.connect_host:
host = self.connect_host
if self.connect_port:
port = self.connect_port
if path.startswith(('http:', 'https:')):
self.warc_headers['WARC-Proxy-Host'] = "http://{0}:{1}".format(host, port)
self.url = path
return
scheme = 'https' if default_port == 443 else 'http'
self.url = scheme + '://' + host
if port != default_port:
self.url += ':' + str(port)
self.url += path
# ============================================================================
httplib.HTTPConnection = RecordingHTTPConnection
# ============================================================================
@contextmanager
def capture_http(warc_writer=None, filter_func=None, append=True,
record_ip=True, **kwargs):
out = None
if warc_writer == None:
if 'gzip' not in kwargs:
kwargs['gzip'] = False
warc_writer = BufferWARCWriter(**kwargs)
if isinstance(warc_writer, str):
out = open(warc_writer, 'ab' if append else 'xb')
warc_writer = WARCWriter(out, **kwargs)
try:
recorder = RequestRecorder(warc_writer, filter_func, record_ip=record_ip)
RecordingHTTPConnection.local.recorder = recorder
yield warc_writer
finally:
RecordingHTTPConnection.local.recorder = None
if out:
out.close()
| 8,176
| 29.285185
| 87
|
py
|
warcio
|
warcio-master/test/test_cli.py
|
from warcio.cli import main
from . import get_test_file
from contextlib import contextmanager
from io import BytesIO
from warcio.exceptions import ArchiveLoadFailed
import pytest
import sys
import tempfile
import os
def test_index(capsys):
files = ['example.warc.gz', 'example.warc', 'example.arc.gz', 'example.arc']
files = [get_test_file(filename) for filename in files]
args = ['index', '-f', 'length,offset,warc-type,warc-target-uri,warc-filename,http:content-type']
args.extend(files)
expected = """\
{"length": "353", "offset": "0", "warc-type": "warcinfo", "warc-filename": "temp-20170306040353.warc.gz"}
{"length": "431", "offset": "353", "warc-type": "warcinfo", "warc-filename": "temp-20170306040353.warc.gz"}
{"length": "1228", "offset": "784", "warc-type": "response", "warc-target-uri": "http://example.com/", "http:content-type": "text/html"}
{"length": "609", "offset": "2012", "warc-type": "request", "warc-target-uri": "http://example.com/"}
{"length": "586", "offset": "2621", "warc-type": "revisit", "warc-target-uri": "http://example.com/", "http:content-type": "text/html"}
{"length": "609", "offset": "3207", "warc-type": "request", "warc-target-uri": "http://example.com/"}
{"length": "484", "offset": "0", "warc-type": "warcinfo", "warc-filename": "temp-20170306040353.warc.gz"}
{"length": "705", "offset": "488", "warc-type": "warcinfo", "warc-filename": "temp-20170306040353.warc.gz"}
{"length": "1365", "offset": "1197", "warc-type": "response", "warc-target-uri": "http://example.com/", "http:content-type": "text/html"}
{"length": "800", "offset": "2566", "warc-type": "request", "warc-target-uri": "http://example.com/"}
{"length": "942", "offset": "3370", "warc-type": "revisit", "warc-target-uri": "http://example.com/", "http:content-type": "text/html"}
{"length": "800", "offset": "4316", "warc-type": "request", "warc-target-uri": "http://example.com/"}
{"length": "171", "offset": "0", "warc-type": "warcinfo", "warc-filename": "live-web-example.arc.gz"}
{"length": "856", "offset": "171", "warc-type": "response", "warc-target-uri": "http://example.com/", "http:content-type": "text/html"}
{"length": "150", "offset": "0", "warc-type": "warcinfo", "warc-filename": "live-web-example.arc.gz"}
{"length": "1656", "offset": "151", "warc-type": "response", "warc-target-uri": "http://example.com/", "http:content-type": "text/html"}
"""
res = main(args=args)
assert capsys.readouterr().out == expected
def test_index_2(capsys):
files = ['example.warc.gz']
files = [get_test_file(filename) for filename in files]
args = ['index', '-f', 'offset,length,http:status,warc-type,filename']
args.extend(files)
expected = """\
{"offset": "0", "length": "353", "warc-type": "warcinfo", "filename": "example.warc.gz"}
{"offset": "353", "length": "431", "warc-type": "warcinfo", "filename": "example.warc.gz"}
{"offset": "784", "length": "1228", "http:status": "200", "warc-type": "response", "filename": "example.warc.gz"}
{"offset": "2012", "length": "609", "warc-type": "request", "filename": "example.warc.gz"}
{"offset": "2621", "length": "586", "http:status": "200", "warc-type": "revisit", "filename": "example.warc.gz"}
{"offset": "3207", "length": "609", "warc-type": "request", "filename": "example.warc.gz"}
"""
res = main(args=args)
assert capsys.readouterr().out == expected
def check_helper(args, capsys, expected_exit_value):
exit_value = None
try:
main(args=args)
except SystemExit as e:
exit_value = e.code
finally:
assert exit_value == expected_exit_value
return capsys.readouterr().out
def test_check_valid(capsys):
filenames = [get_test_file('example.warc'), get_test_file('example.warc.gz')]
args = ['check'] + filenames
expected = ''
assert check_helper(args, capsys, 0) == expected
args = ['check', '-v'] + filenames
value = check_helper(args, capsys, 0)
assert value.count('digest pass') == 4
assert value.count('WARC-Record-ID') == 12
def test_check_invalid(capsys):
filenames = [get_test_file('example-digest.warc')]
args = ['check'] + filenames
value = check_helper(args, capsys, 1)
assert value.count('payload digest failed') == 1
assert value.count('WARC-Record-ID') == 1
args = ['check', '-v'] + filenames
value = check_helper(args, capsys, 1)
assert value.count('payload digest failed') == 1
assert value.count('digest pass') == 3
assert value.count('WARC-Record-ID') == 4
files = ['example-bad-non-chunked.warc.gz', 'example-digest.warc']
filenames = [get_test_file(filename) for filename in files]
args = ['check'] + filenames
value = check_helper(args, capsys, 1)
assert value.count('ArchiveLoadFailed') == 1
assert value.count('payload digest failed') == 1
assert value.count('WARC-Record-ID') == 1
def test_recompress_non_chunked(capsys):
with named_temp() as temp:
test_file = get_test_file('example-bad-non-chunked.warc.gz')
with pytest.raises(ArchiveLoadFailed):
main(args=['index', test_file, '-f', 'warc-type'])
assert capsys.readouterr().out
# recompress!
main(args=['recompress', test_file, temp.name])
assert 'Compression Errors Found and Fixed!' in capsys.readouterr().out
expected = """\
{"warc-type": "warcinfo"}
{"warc-type": "warcinfo"}
{"warc-type": "response"}
{"warc-type": "request"}
{"warc-type": "revisit"}
{"warc-type": "request"}
"""
main(args=['index', temp.name, '-f', 'warc-type'])
assert capsys.readouterr().out == expected
def test_recompress_wrong_chunks(capsys):
with named_temp() as temp:
test_file = get_test_file('example-wrong-chunks.warc.gz')
with pytest.raises(ArchiveLoadFailed):
main(args=['index', test_file, '-f', 'warc-type'])
expected = """\
{"offset": "0", "warc-type": "response", "warc-target-uri": "http://example.com/"}
{"offset": "1061", "warc-type": "request", "warc-target-uri": "http://example.com/"}
"""
# recompress!
main(args=['recompress', '-v', test_file, temp.name])
out = capsys.readouterr().out
assert '2 records read' in out
assert 'Compression Errors Found and Fixed!' in out
assert 'No Errors Found!' not in out
assert expected in out
def test_recompress_arc2warc(capsys):
with named_temp() as temp:
test_file = get_test_file('example.arc.gz')
# recompress!
main(args=['recompress', test_file, temp.name])
assert "No Errors" in capsys.readouterr().out
expected = """\
{"warc-type": "warcinfo", "warc-block-digest": "sha1:3I42H3S6NNFQ2MSVX7XZKYAYSCX5QBYJ"}
{"warc-type": "response", "warc-block-digest": "sha1:PEWDX5GTH66WU74WBPGFECIYBMPMP3FP", "warc-payload-digest": "sha1:B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A"}
"""
main(args=['index', temp.name, '-f', 'warc-type,warc-block-digest,warc-payload-digest'])
assert capsys.readouterr().out == expected
def test_recompress_arc2warc_verbose(capsys):
with named_temp() as temp:
test_file = get_test_file('example.arc.gz')
# recompress!
main(args=['recompress', '-v', test_file, temp.name])
out = capsys.readouterr().out
assert '{"offset": "0", "warc-type": "warcinfo"}' in out
assert '"warc-target-uri": "http://example.com/"' in out
assert 'No Errors Found!' in out
assert '2 records read' in out
def test_recompress_bad_file():
with named_temp() as temp:
temp.write(b'abcdefg-not-a-warc\n')
temp.seek(0)
with named_temp() as temp2:
with pytest.raises(SystemExit):
main(args=['recompress', temp.name, temp2.name])
def test_recompress_bad_file_verbose():
with named_temp() as temp:
temp.write(b'abcdefg-not-a-warc\n')
temp.seek(0)
with named_temp() as temp2:
with pytest.raises(SystemExit):
main(args=['recompress', '--verbose', temp.name, temp2.name])
def test_extract_warcinfo(capsys):
res = main(args=['extract', get_test_file('example.warc.gz'), '0'])
assert capsys.readouterr().out == 'WARC/1.0\r\nWARC-Date: 2017-03-06T04:03:53Z\r\nWARC-Record-ID: <urn:uuid:e9a0cecc-0221-11e7-adb1-0242ac120008>\r\nWARC-Filename: temp-20170306040353.warc.gz\r\nWARC-Type: warcinfo\r\nContent-Type: application/warc-fields\r\nContent-Length: 249\r\n\r\nsoftware: Webrecorder Platform v3.7\r\nformat: WARC File Format 1.0\r\ncreator: temp-MJFXHZ4S\r\nisPartOf: Temporary%20Collection\r\njson-metadata: {"title": "Temporary Collection", "size": 2865, "created_at": 1488772924, "type": "collection", "desc": ""}\r\n'
res = main(args=['extract', '--headers', get_test_file('example.warc.gz'), '0'])
assert capsys.readouterr().out == 'WARC/1.0\r\nWARC-Date: 2017-03-06T04:03:53Z\r\nWARC-Record-ID: <urn:uuid:e9a0cecc-0221-11e7-adb1-0242ac120008>\r\nWARC-Filename: temp-20170306040353.warc.gz\r\nWARC-Type: warcinfo\r\nContent-Type: application/warc-fields\r\nContent-Length: 249\r\n\r\n'
res = main(args=['extract', '--payload', get_test_file('example.warc.gz'), '0'])
assert capsys.readouterr().out == 'software: Webrecorder Platform v3.7\r\nformat: WARC File Format 1.0\r\ncreator: temp-MJFXHZ4S\r\nisPartOf: Temporary%20Collection\r\njson-metadata: {"title": "Temporary Collection", "size": 2865, "created_at": 1488772924, "type": "collection", "desc": ""}\r\n'
def test_extract_warc_response(capsysbinary):
res = main(args=['extract', get_test_file('example.warc.gz'), '784'])
assert capsysbinary.readouterr().out == b'WARC/1.0\r\nWARC-Target-URI: http://example.com/\r\nWARC-Date: 2017-03-06T04:02:06Z\r\nWARC-Type: response\r\nWARC-Record-ID: <urn:uuid:a9c51e3e-0221-11e7-bf66-0242ac120005>\r\nWARC-IP-Address: 93.184.216.34\r\nWARC-Block-Digest: sha1:DR5MBP7OD3OPA7RFKWJUD4CTNUQUGFC5\r\nWARC-Payload-Digest: sha1:G7HRM7BGOKSKMSXZAHMUQTTV53QOFSMK\r\nContent-Type: application/http; msgtype=response\r\nContent-Length: 975\r\n\r\nHTTP/1.1 200 OK\r\nContent-Encoding: gzip\r\nAccept-Ranges: bytes\r\nCache-Control: max-age=604800\r\nContent-Type: text/html\r\nDate: Mon, 06 Mar 2017 04:02:06 GMT\r\nEtag: "359670651+gzip"\r\nExpires: Mon, 13 Mar 2017 04:02:06 GMT\r\nLast-Modified: Fri, 09 Aug 2013 23:54:35 GMT\r\nServer: ECS (iad/182A)\r\nVary: Accept-Encoding\r\nX-Cache: HIT\r\nContent-Length: 606\r\nConnection: close\r\n\r\n\x1f\x8b\x08\x00;\x81\x05R\x00\x03\x8dTA\xaf\xd30\x0c\xbe\xefW\x98r\x01i]\xf7\x80\x07S\xd7V @\xe2\x02\x1c\xe0\xc21k\xdc\xd5Z\x93\x94$\xed6\xa1\xf7\xdfq\xdb\xbd\xae\xe5\xed@+\xb5\x8e\x1d\x7f\xfel\xc7I\x9eI\x93\xfbs\x8dPzUe\x8b\xe4\xf1\x87Bf\x0b\xe0\'\xf1\xe4+\xcc>\x9f\x84\xaa+\x84OF\t\xd2I4h\x17\xc3\x16\x85^@^\n\xeb\xd0\xa7A\xe3\x8bp\x13@\x94M\x8c\xa5\xf7u\x88\xbf\x1bj\xd3\xe0\xa3\xd1\x1e\xb5\x0f\xbb\xb0\x01\xe4\xc3*\r<\x9e|\xd4\x85\xdf\x8eP\xb7\x90\xb4P\x98\x06-\xe1\xb16\xd6O\xfc\x8f$}\x99Jl)\xc7\xb0_,\x814y\x12U\xe8rQazw\x85r\xfe\xcc\xc9t\x0c.\x81s\xe7\x82\xc1\xb63\xf2\x0c\x7fz\xb1_\x8a\xfc\xb0\xb7\xa6\xd12\xccMel\x0c\xcf\x8b5\xbf\xaf\xb6\xe3\x16%\xec\x9et\x0c\xeb\xab\xaa\x16R\x92\xde\xcft\x053\r\x0b\xa1\xa8:\xc7\x10|\xafQ\xc3\x0f\xa1]\xb0\x84\xe0\x0bV-z\xca\x05|\xc3\x06Y3*\x96\xf0\xc1r\x06Kp\xbc5th\xa9\xb8"\xf6\xc2C\xff\x95\xd4NH\xf7\xe9\xc7\xf0v\xbd\xaeOOy\xde\xa3\x02\xd1xs\x83\xee\xfd\xcc\xe1V\xee\xc5$\xfe\xceX\x896\xb4BR\xe3b\xb8C\xb5\x9dP\x12qE\xfa\xb0\xe4\x7fK\x8e<\xca\t\xc1G\xb8\xd7\x9b7\x9b\xcd\x04\xb1\xebE(17Vx2\xccU\x1b\x8dS\xd0\xf7\n%\tx\xa1\xc4)\xbcd\xf9\xae\xcb\xf2\xe5\xb4e\xf3\x0e\xfeO&\x0f\xa34/\xe4\xa4\x98\xf3\x8a\xcd\xfa~\xc3\xf6Oi\xd6s\xebX\xef\xb1dW\x12\xc37\x89\xfa#\x9au\xf2"\x89\x86y\\$]j<\x9eL\xf2r\x90\xcb\xbb\'\xa3\xc9\xaa\xc1Vg?Kr {=\xb0\x84\xce\x8b]E\xae\xe4^x\x03;\x84\xc6\xb1X\x18\x0bTU\x8d\xf3]\xd5[\x04\x1c\x10\x1d\xcf\x0f{\xe7\x8d\xe2\x01s+\xf8e\x1a\xce\xf9\xdc9\x81g\xe4\xe1\xe0]\xd0\xf5\xd5\xebH\xbe4\x8d\x87\xda\x12#\xe7\x86KA\xba\xef\'\xf0Z\xb8\x03\xa7\xde\x07\xad\xd1*r\x8e\r\xab$\xaaG\xd6\t\xdf\x17\x16\x8b4\xe8n\x8d8\x8a\x8e\xc7\xe3\x8a\x84\x16+c\xf7\xd1\x10\xcfE\x97hA\xf6\xd5X\xe4\xf0\x8c\xa7\xfa\x18\xab\x15\x83\x89\xac\x07L\xa2\xbeRIt\xa9[4\\o\x7f\x01\x08\x95\xaa\x8b\xf6\x04\x00\x00'
res = main(args=['extract', '--headers', get_test_file('example.warc.gz'), '784'])
assert capsysbinary.readouterr().out == b'WARC/1.0\r\nWARC-Target-URI: http://example.com/\r\nWARC-Date: 2017-03-06T04:02:06Z\r\nWARC-Type: response\r\nWARC-Record-ID: <urn:uuid:a9c51e3e-0221-11e7-bf66-0242ac120005>\r\nWARC-IP-Address: 93.184.216.34\r\nWARC-Block-Digest: sha1:DR5MBP7OD3OPA7RFKWJUD4CTNUQUGFC5\r\nWARC-Payload-Digest: sha1:G7HRM7BGOKSKMSXZAHMUQTTV53QOFSMK\r\nContent-Type: application/http; msgtype=response\r\nContent-Length: 975\r\n\r\nHTTP/1.1 200 OK\r\nContent-Encoding: gzip\r\nAccept-Ranges: bytes\r\nCache-Control: max-age=604800\r\nContent-Type: text/html\r\nDate: Mon, 06 Mar 2017 04:02:06 GMT\r\nEtag: "359670651+gzip"\r\nExpires: Mon, 13 Mar 2017 04:02:06 GMT\r\nLast-Modified: Fri, 09 Aug 2013 23:54:35 GMT\r\nServer: ECS (iad/182A)\r\nVary: Accept-Encoding\r\nX-Cache: HIT\r\nContent-Length: 606\r\nConnection: close\r\n\r\n'
res = main(args=['extract', '--payload', get_test_file('example.warc.gz'), '784'])
assert capsysbinary.readouterr().out == b'<!doctype html>\n<html>\n<head>\n <title>Example Domain</title>\n\n <meta charset="utf-8" />\n <meta http-equiv="Content-type" content="text/html; charset=utf-8" />\n <meta name="viewport" content="width=device-width, initial-scale=1" />\n <style type="text/css">\n body {\n background-color: #f0f0f2;\n margin: 0;\n padding: 0;\n font-family: "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;\n \n }\n div {\n width: 600px;\n margin: 5em auto;\n padding: 50px;\n background-color: #fff;\n border-radius: 1em;\n }\n a:link, a:visited {\n color: #38488f;\n text-decoration: none;\n }\n @media (max-width: 700px) {\n body {\n background-color: #fff;\n }\n div {\n width: auto;\n margin: 0 auto;\n border-radius: 0;\n padding: 1em;\n }\n }\n </style> \n</head>\n\n<body>\n<div>\n <h1>Example Domain</h1>\n <p>This domain is established to be used for illustrative examples in documents. You may use this\n domain in examples without prior coordination or asking for permission.</p>\n <p><a href="http://www.iana.org/domains/example">More information...</a></p>\n</div>\n</body>\n</html>\n'
# @pytest.mark.xfail
# warcio doesn't support ARC output yet, and @xfail tests have some bad
# interaction with capture_stdout(capsys), thus the failing tests are commented out
def test_extract_arc(capsysbinary):
res = main(args=['extract', '--payload', get_test_file('example.arc'), '151'])
assert capsysbinary.readouterr().out == b'<!doctype html>\n<html>\n<head>\n <title>Example Domain</title>\n\n <meta charset="utf-8" />\n <meta http-equiv="Content-type" content="text/html; charset=utf-8" />\n <meta name="viewport" content="width=device-width, initial-scale=1" />\n <style type="text/css">\n body {\n background-color: #f0f0f2;\n margin: 0;\n padding: 0;\n font-family: "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;\n \n }\n div {\n width: 600px;\n margin: 5em auto;\n padding: 50px;\n background-color: #fff;\n border-radius: 1em;\n }\n a:link, a:visited {\n color: #38488f;\n text-decoration: none;\n }\n @media (max-width: 700px) {\n body {\n background-color: #fff;\n }\n div {\n width: auto;\n margin: 0 auto;\n border-radius: 0;\n padding: 1em;\n }\n }\n </style> \n</head>\n\n<body>\n<div>\n <h1>Example Domain</h1>\n <p>This domain is established to be used for illustrative examples in documents. You may use this\n domain in examples without prior coordination or asking for permission.</p>\n <p><a href="http://www.iana.org/domains/example">More information...</a></p>\n</div>\n</body>\n</html>\n'
# with capture_stdout(capsys) as buff:
# res = main(args=['extract', '--headers', get_test_file('example.arc'), '151'])
# assert buff.getvalue() == b'http://example.com/ 93.184.216.119 20140216050221 text/html 1591\nHTTP/1.1 200 OK\r\nAccept-Ranges: bytes\r\nCache-Control: max-age=604800\r\nContent-Type: text/html\r\nDate: Sun, 16 Feb 2014 05:02:20 GMT\r\nEtag: "359670651"\r\nExpires: Sun, 23 Feb 2014 05:02:20 GMT\r\nLast-Modified: Fri, 09 Aug 2013 23:54:35 GMT\r\nServer: ECS (sjc/4FCE)\r\nX-Cache: HIT\r\nx-ec-custom-error: 1\r\nContent-Length: 1270\r\n\r\n'
# with capture_stdout(capsys) as buff:
# res = main(args=['extract', get_test_file('example.arc'), '151'])
# assert buff.getvalue() == b'http://example.com/ 93.184.216.119 20140216050221 text/html 1591\nHTTP/1.1 200 OK\r\nAccept-Ranges: bytes\r\nCache-Control: max-age=604800\r\nContent-Type: text/html\r\nDate: Sun, 16 Feb 2014 05:02:20 GMT\r\nEtag: "359670651"\r\nExpires: Sun, 23 Feb 2014 05:02:20 GMT\r\nLast-Modified: Fri, 09 Aug 2013 23:54:35 GMT\r\nServer: ECS (sjc/4FCE)\r\nX-Cache: HIT\r\nx-ec-custom-error: 1\r\nContent-Length: 1270\r\n\r\n<!doctype html>\n<html>\n<head>\n <title>Example Domain</title>\n\n <meta charset="utf-8" />\n <meta http-equiv="Content-type" content="text/html; charset=utf-8" />\n <meta name="viewport" content="width=device-width, initial-scale=1" />\n <style type="text/css">\n body {\n background-color: #f0f0f2;\n margin: 0;\n padding: 0;\n font-family: "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;\n \n }\n div {\n width: 600px;\n margin: 5em auto;\n padding: 50px;\n background-color: #fff;\n border-radius: 1em;\n }\n a:link, a:visited {\n color: #38488f;\n text-decoration: none;\n }\n @media (max-width: 700px) {\n body {\n background-color: #fff;\n }\n div {\n width: auto;\n margin: 0 auto;\n border-radius: 0;\n padding: 1em;\n }\n }\n </style> \n</head>\n\n<body>\n<div>\n <h1>Example Domain</h1>\n <p>This domain is established to be used for illustrative examples in documents. You may use this\n domain in examples without prior coordination or asking for permission.</p>\n <p><a href="http://www.iana.org/domains/example">More information...</a></p>\n</div>\n</body>\n</html>\n'
# due to NamedTemporaryFile issue on Windows
# see: https://bugs.python.org/issue14243#msg157925
@contextmanager
def named_temp():
f = tempfile.NamedTemporaryFile(delete=False)
try:
yield f
finally:
try:
os.unlink(f.name)
except OSError:
pass
| 19,089
| 71.862595
| 2,603
|
py
|
warcio
|
warcio-master/test/test_archiveiterator.py
|
from warcio.archiveiterator import ArchiveIterator, WARCIterator, ARCIterator
from warcio.exceptions import ArchiveLoadFailed
from warcio.bufferedreaders import DecompressingBufferedReader, BufferedReader
from warcio.warcwriter import BufferWARCWriter
import pytest
from io import BytesIO
import sys
import os
from . import get_test_file
from contextlib import closing, contextmanager
import subprocess
#==============================================================================
class TestArchiveIterator(object):
def _load_archive(self, filename, offset=0, cls=ArchiveIterator,
errs_expected=0, **kwargs):
with open(get_test_file(filename), 'rb') as fh:
fh.seek(offset)
iter_ = cls(fh, **kwargs)
rec_types = [record.rec_type for record in iter_ if record.digest_checker.passed is not False]
assert iter_.err_count == errs_expected
return rec_types
def _load_archive_memory(self, stream, offset=0, cls=ArchiveIterator,
errs_expected=0, full_read=False, **kwargs):
stream.seek(offset)
iter_ = cls(stream, **kwargs)
if full_read:
rec_types = [record.rec_type for record in iter_
if (record.content_stream().read() or True) and record.digest_checker.passed is not False]
else:
rec_types = [record.rec_type for record in iter_ if record.digest_checker.passed is not False]
assert iter_.err_count == errs_expected
return rec_types
def _read_first_response(self, filename):
with self._find_first_by_type(filename, 'response') as record:
if record:
return record.content_stream().read()
@contextmanager
def _find_first_by_type(self, filename, match_type, **params):
with open(get_test_file(filename), 'rb') as fh:
with closing(ArchiveIterator(fh, **params)) as a:
for record in a:
if record.rec_type == match_type:
yield record
break
def test_example_warc_gz(self):
expected = ['warcinfo', 'warcinfo', 'response', 'request', 'revisit', 'request']
assert self._load_archive('example.warc.gz') == expected
def test_example_warc(self):
expected = ['warcinfo', 'warcinfo', 'response', 'request', 'revisit', 'request']
assert self._load_archive('example.warc') == expected
def test_example_warc_2(self):
expected = ['warcinfo', 'response', 'request']
assert self._load_archive('example-iana.org-chunked.warc') == expected
def test_iterator(self):
""" Test iterator semantics on 3 record WARC
"""
with open(get_test_file('example-iana.org-chunked.warc'), 'rb') as fh:
with closing(ArchiveIterator(fh)) as a:
for record in a:
assert record.rec_type == 'warcinfo'
assert a.get_record_offset() == 0
assert record.digest_checker.passed is None
assert len(record.digest_checker.problems) == 0
break
record = next(a)
assert record.rec_type == 'response'
assert a.get_record_offset() == 405
assert record.digest_checker.passed is None
assert len(record.digest_checker.problems) == 0
for record in a:
assert record.rec_type == 'request'
assert a.get_record_offset() == 8379
assert record.digest_checker.passed is None
assert len(record.digest_checker.problems) == 0
break
with pytest.raises(StopIteration):
record = next(a)
assert a.record == None
assert a.reader == None
assert a.read_to_end() == None
def test_unseekable(self):
""" Test iterator on unseekable 3 record uncompressed WARC input
"""
proc = subprocess.Popen(['cat', get_test_file('example-iana.org-chunked.warc')],
stdout=subprocess.PIPE)
def raise_tell(x):
raise Exception()
# on windows, this tell() exists but doesn't work correctly, so just override (in py3)
# this is designed to emulated stdin, which does not have a tell(), as expected
stdout = proc.stdout
if os.name == 'nt' and hasattr(proc.stdout, 'tell'):
if sys.version_info < (3, 0):
stdout = BufferedReader(stdout)
else:
stdout.tell = raise_tell
with closing(ArchiveIterator(stdout)) as a:
for record in a:
assert record.rec_type == 'warcinfo'
assert a.get_record_offset() == 0
break
record = next(a)
assert record.rec_type == 'response'
assert a.get_record_offset() == 405
for record in a:
assert record.rec_type == 'request'
assert a.get_record_offset() == 8379
break
with pytest.raises(StopIteration):
record = next(a)
assert a.record == None
assert a.reader == None
assert a.read_to_end() == None
proc.stdout.close()
proc.wait()
def test_unseekable_gz(self):
""" Test iterator on unseekable 3 record uncompressed gzipped WARC input
"""
proc = subprocess.Popen(['cat', get_test_file('example-resource.warc.gz')],
stdout=subprocess.PIPE)
def raise_tell(x):
raise Exception()
# on windows, this tell() exists but doesn't work correctly, so just override (in py3)
# this is designed to emulated stdin, which does not have a tell(), as expected
stdout = proc.stdout
if os.name == 'nt' and hasattr(proc.stdout, 'tell'):
#can't override tell() in py2
if sys.version_info < (3, 0):
stdout = BufferedReader(stdout)
else:
stdout.tell = raise_tell
with closing(ArchiveIterator(stdout)) as a:
for record in a:
assert record.rec_type == 'warcinfo'
assert a.get_record_offset() == 0
break
record = next(a)
assert record.rec_type == 'warcinfo'
assert a.get_record_offset() == 361
for record in a:
assert record.rec_type == 'resource'
assert a.get_record_offset() == 802
break
with pytest.raises(StopIteration):
record = next(a)
assert a.record == None
assert a.reader == None
assert a.read_to_end() == None
proc.stdout.close()
proc.wait()
def test_example_warc_trunc(self):
""" WARC file with content-length truncated on a response record
Error output printed, but still read
"""
expected = ['warcinfo', 'warcinfo', 'response', 'request']
assert self._load_archive('example-trunc.warc', errs_expected=1) == expected
assert self._load_archive('example-trunc.warc', errs_expected=1,
check_digests=True) == expected
with pytest.raises(ArchiveLoadFailed):
assert self._load_archive('example-trunc.warc', errs_expected=1,
check_digests='raise') == expected
def test_example_arc_gz(self):
expected = ['arc_header', 'response']
assert self._load_archive('example.arc.gz') == expected
def test_example_space_in_url_arc(self):
expected = ['arc_header', 'response']
assert self._load_archive('example-space-in-url.arc') == expected
def test_example_arc(self):
expected = ['arc_header', 'response']
assert self._load_archive('example.arc') == expected
def test_example_arc2warc(self):
expected = ['warcinfo', 'response']
assert self._load_archive('example.arc.gz', arc2warc=True) == expected
def test_example_warc_resource(self):
expected = ['warcinfo', 'warcinfo', 'resource']
assert self._load_archive('example-resource.warc.gz') == expected
def test_resource_no_http_headers(self):
with self._find_first_by_type('example-resource.warc.gz', 'resource') as record:
assert record.http_headers == None
assert len(record.content_stream().read()) == int(record.rec_headers.get('Content-Length'))
def test_resource_with_http_headers(self):
with self._find_first_by_type('example-resource.warc.gz', 'resource',
ensure_http_headers=True) as record:
assert record.http_headers != None
assert (record.http_headers.get_header('Content-Length') ==
record.rec_headers.get_header('Content-Length'))
expected = 'HTTP/1.0 200 OK\r\n\
Content-Type: text/html; charset=utf-8\r\n\
Content-Length: 1303\r\n'
assert str(record.http_headers) == expected
assert len(record.content_stream().read()) == int(record.rec_headers.get('Content-Length'))
def test_read_content(self):
assert 'Example Domain' in self._read_first_response('example.warc.gz').decode('utf-8')
assert 'Example Domain' in self._read_first_response('example.warc').decode('utf-8')
assert 'Example Domain' in self._read_first_response('example.arc.gz').decode('utf-8')
assert 'Example Domain' in self._read_first_response('example.arc').decode('utf-8')
def test_read_content_chunked(self):
buff = self._read_first_response('example-iana.org-chunked.warc').decode('utf-8')
assert buff.startswith('<!doctype html>')
assert 'Internet Assigned Numbers Authority' in buff
def test_bad_warc(self):
with pytest.raises(ArchiveLoadFailed):
self._load_archive('example-bad.warc.gz.bad')
def test_bad_offset_warc(self):
with pytest.raises(ArchiveLoadFailed):
self._load_archive('example.warc.gz', offset=10)
def test_bad_arc_invalid_lengths(self):
expected = ['arc_header', 'response', 'response', 'response']
assert self._load_archive('bad.arc') == expected
def test_err_non_chunked_gzip(self):
with pytest.raises(ArchiveLoadFailed):
self._load_archive('example-bad-non-chunked.warc.gz')
def test_err_warc_iterator_on_arc(self):
expected = ['arc_header', 'response']
with pytest.raises(ArchiveLoadFailed):
self._load_archive('example.arc.gz', cls=WARCIterator)
def test_err_arc_iterator_on_warc(self):
expected = ['arc_header', 'response']
with pytest.raises(ArchiveLoadFailed):
self._load_archive('example.warc.gz', cls=ARCIterator)
def test_corrects_wget_bug(self):
with self._find_first_by_type('example-wget-bad-target-uri.warc.gz', 'response') as record:
assert record.rec_headers.get('WARC-Target-URI') == 'http://example.com/'
def test_corrects_space_in_target_uri(self):
with self._find_first_by_type('example-space-in-target-uri.warc.gz', 'resource') as record:
assert record.rec_headers.get('WARC-Target-URI') == 'file:///example%20with%20spaces.png'
def _digests_mutilate_helper(self, contents, expected_t, expected_f, capsys, full_read=False):
with pytest.raises(ArchiveLoadFailed):
assert self._load_archive_memory(BytesIO(contents), check_digests='raise', full_read=full_read) == expected_t
capsys.readouterr()
assert self._load_archive_memory(BytesIO(contents), check_digests='log', full_read=full_read) == expected_t
out, err = capsys.readouterr()
assert err
assert self._load_archive_memory(BytesIO(contents), check_digests=True, full_read=full_read) == expected_t
out, err = capsys.readouterr()
assert not err
assert self._load_archive_memory(BytesIO(contents), check_digests=False, full_read=full_read) == expected_f
out, err = capsys.readouterr()
assert not err
def test_digests_mutilate(self, capsys):
expected_f = ['warcinfo', 'warcinfo', 'response', 'request', 'revisit', 'request']
expected_t = ['warcinfo', 'warcinfo', 'request', 'revisit', 'request']
with open(get_test_file('example.warc'), 'rb') as fh:
contents = fh.read()
contents_sha = contents.replace(b'WARC-Block-Digest: sha1:', b'WARC-Block-Digest: xxx:', 1)
assert contents != contents_sha, 'a replace happened'
self._digests_mutilate_helper(contents_sha, expected_t, expected_f, capsys)
contents_sha = contents.replace(b'WARC-Payload-Digest: sha1:', b'WARC-Payload-Digest: xxx:', 1)
assert contents != contents_sha, 'a replace happened'
self._digests_mutilate_helper(contents_sha, expected_t, expected_f, capsys)
contents_block = contents
thing = b'WARC-Block-Digest: sha1:'
index = contents_block.find(thing)
index += len(thing)
b = contents_block[index:index+3]
contents_block = contents_block.replace(thing+b, thing+b'111')
assert contents != contents_block, 'a replace happened'
'''
If we don't read the stream, the digest check will not happen & all recs will be seen
'''
self._digests_mutilate_helper(contents_block, expected_f, expected_f, capsys)
self._digests_mutilate_helper(contents_block, expected_t, expected_f, capsys, full_read=True)
contents_payload = contents
thing = b'WARC-Payload-Digest: sha1:'
index = contents_payload.find(thing)
index += len(thing)
b = contents_payload[index:index+3]
contents_payload = contents_payload.replace(thing+b, thing+b'111')
assert contents != contents_payload, 'a replace happened'
self._digests_mutilate_helper(contents_payload, expected_f, expected_f, capsys)
self._digests_mutilate_helper(contents_payload, expected_t, expected_f, capsys, full_read=True)
def test_digests_file(self):
expected_f = ['request', 'request', 'request', 'request']
expected_t = ['request', 'request', 'request']
# record 1: invalid payload digest
assert self._load_archive('example-digest.warc', check_digests=True) == expected_t
assert self._load_archive('example-digest.warc', check_digests=False) == expected_f
# record 2: b64 digest; record 3: b64 filename safe digest
assert self._load_archive('example-digest.warc', offset=922, check_digests=True) == expected_t
assert self._load_archive('example-digest.warc', offset=922, check_digests=False) == expected_t
| 14,939
| 41.20339
| 121
|
py
|
warcio
|
warcio-master/test/test_digestverifyingreader.py
|
import pytest
from warcio.digestverifyingreader import _compare_digest_rfc_3548
from warcio.utils import Digester
empty_sha1_b32 = '3I42H3S6NNFQ2MSVX7XZKYAYSCX5QBYJ'
empty_sha1_b64 = '2jmj7l5rSw0yVb/vlWAYkK/YBwk='
empty_sha1_b64_alt = '2jmj7l5rSw0yVb_vlWAYkK_YBwk='
empty_sha1_b16 = 'DA39A3EE5E6B4B0D3255BFEF95601890AFD80709'
def test_compare_digest_rfc_3548():
assert _compare_digest_rfc_3548(None, None) is None
sha1d = Digester('sha1')
assert _compare_digest_rfc_3548(sha1d, 'sha1:'+empty_sha1_b32) is True
assert _compare_digest_rfc_3548(sha1d, 'sha1:'+empty_sha1_b32.replace('3I', 'xx')) is False
assert _compare_digest_rfc_3548(sha1d, 'sha1:'+empty_sha1_b64) is True
assert _compare_digest_rfc_3548(sha1d, 'sha1:'+empty_sha1_b64_alt) is True
assert _compare_digest_rfc_3548(sha1d, 'sha1:'+empty_sha1_b16) is True
assert _compare_digest_rfc_3548(sha1d, 'sha1:'+empty_sha1_b16.lower()) is True
with pytest.raises(ValueError):
assert _compare_digest_rfc_3548(sha1d, 'foo') is False
assert _compare_digest_rfc_3548(sha1d, 'foo:bar') is False
| 1,103
| 39.888889
| 95
|
py
|
warcio
|
warcio-master/test/test_bufferedreaders.py
|
r"""
# DecompressingBufferedReader Tests
#=================================================================
# decompress with on the fly compression, default gzip compression
>>> print_str(DecompressingBufferedReader(BytesIO(compress('ABC\n1234\n'))).read())
'ABC\n1234\n'
# decompress with on the fly compression, default 'inflate' compression
>>> print_str(DecompressingBufferedReader(BytesIO(compress_alt('ABC\n1234\n')), decomp_type='deflate').read())
'ABC\n1234\n'
# error: invalid compress type
>>> DecompressingBufferedReader(BytesIO(compress('ABC')), decomp_type = 'bzip2').read()
Traceback (most recent call last):
Exception: Decompression type not supported: bzip2
# invalid output when reading compressed data as not compressed
>>> DecompressingBufferedReader(BytesIO(compress('ABC')), decomp_type = None).read() != b'ABC'
True
# test very small block size
>>> dbr = DecompressingBufferedReader(BytesIO(b'ABCDEFG\nHIJKLMN\nOPQR\nXYZ'), block_size = 3)
>>> print_str(dbr.readline()); print_str(dbr.readline(4)); print_str(dbr.readline()); print_str(dbr.readline()); print_str(dbr.readline(2)); print_str(dbr.readline()); print_str(dbr.readline())
'ABCDEFG\n'
'HIJK'
'LMN\n'
'OPQR\n'
'XY'
'Z'
''
# test zero length reads
>>> x = DecompressingBufferedReader(LimitReader(BytesIO(b'\r\n'), 1))
>>> print_str(x.readline(0)); print_str(x.read(0))
''
''
# Chunk-Decoding Buffered Reader Tests
#=================================================================
Properly formatted chunked data:
>>> c = ChunkedDataReader(BytesIO(b"4\r\n1234\r\n0\r\n\r\n"));
>>> print_str(c.read() + c.read(1) + c.read() + c.read())
'1234'
Non-chunked data:
>>> print_str(ChunkedDataReader(BytesIO(b"xyz123!@#")).read())
'xyz123!@#'
Non-chunked data, numbers only:
>>> print_str(ChunkedDataReader(BytesIO(b"ABCDE" * 10)).read())
'ABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDE'
Non-chunked data, numbers new line, large:
>>> print_str(ChunkedDataReader(BytesIO(b"ABCDE" * 10 + b'\r\n')).read())
'ABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDE\r\n'
Non-chunked, compressed data, specify decomp_type
>>> print_str(ChunkedDataReader(BytesIO(compress('ABCDEF')), decomp_type='gzip').read())
'ABCDEF'
Non-chunked, compressed data, specifiy compression seperately
>>> c = ChunkedDataReader(BytesIO(compress('ABCDEF'))); c.set_decomp('gzip'); print_str(c.read())
'ABCDEF'
Non-chunked, compressed data, wrap in DecompressingBufferedReader
>>> print_str(DecompressingBufferedReader(ChunkedDataReader(BytesIO(compress('\nABCDEF\nGHIJ')))).read())
'\nABCDEF\nGHIJ'
Chunked compressed data
Split compressed stream into 10-byte chunk and a remainder chunk
>>> b = compress('ABCDEFGHIJKLMNOP')
>>> l = len(b)
>>> in_ = format(10, 'x').encode('utf-8') + b"\r\n" + b[:10] + b"\r\n" + format(l - 10, 'x').encode('utf-8') + b"\r\n" + b[10:] + b"\r\n0\r\n\r\n"
>>> c = ChunkedDataReader(BytesIO(in_), decomp_type='gzip')
>>> print_str(c.read())
'ABCDEFGHIJKLMNOP'
Starts like chunked data, but isn't:
>>> c = ChunkedDataReader(BytesIO(b"1\r\nxyz123!@#"));
>>> print_str(c.read() + c.read())
'1\r\nx123!@#'
Chunked data cut off part way through:
>>> c = ChunkedDataReader(BytesIO(b"4\r\n1234\r\n4\r\n12"));
>>> print_str(c.read() + c.read())
'123412'
Zero-Length chunk:
>>> print_str(ChunkedDataReader(BytesIO(b"0\r\n\r\n")).read())
''
"""
from io import BytesIO
from warcio.bufferedreaders import ChunkedDataReader, ChunkedDataException
from warcio.bufferedreaders import DecompressingBufferedReader
from warcio.limitreader import LimitReader
from contextlib import closing
import six
import zlib
import pytest
def compress(buff):
buff = buff.encode('utf-8')
compressobj = zlib.compressobj(6, zlib.DEFLATED, zlib.MAX_WBITS + 16)
compressed = compressobj.compress(buff)
compressed += compressobj.flush()
return compressed
# plain "inflate"
def compress_alt(buff):
buff = buff.encode('utf-8')
compressobj = zlib.compressobj(6, zlib.DEFLATED)
compressed = compressobj.compress(buff)
compressed += compressobj.flush()
# drop gzip headers/tail
compressed = compressed[2:-4]
return compressed
# Brotli
@pytest.mark.skipif('br' not in DecompressingBufferedReader.DECOMPRESSORS, reason='brotli not available')
def test_brotli():
brotli_buff = b'[\xff\xaf\x02\xc0"y\\\xfbZ\x8cB;\xf4%U\x19Z\x92\x99\xb15\xc8\x19\x9e\x9e\n{K\x90\xb9<\x98\xc8\t@\xf3\xe6\xd9M\xe4me\x1b\'\x87\x13_\xa6\xe90\x96{<\x15\xd8S\x1c'
with closing(DecompressingBufferedReader(BytesIO(brotli_buff), decomp_type='br')) as x:
assert x.read() == b'The quick brown fox jumps over the lazy dog' * 4096
@pytest.mark.skipif('br' not in DecompressingBufferedReader.DECOMPRESSORS, reason='brotli not available')
def test_brotli_very_small_chunk():
brotli_buff = b'[\xff\xaf\x02\xc0"y\\\xfbZ\x8cB;\xf4%U\x19Z\x92\x99\xb15\xc8\x19\x9e\x9e\n{K\x90\xb9<\x98\xc8\t@\xf3\xe6\xd9M\xe4me\x1b\'\x87\x13_\xa6\xe90\x96{<\x15\xd8S\x1c'
# read 3 bytes at time, will need to read() multiple types before decompressor has enough to return something
with closing(DecompressingBufferedReader(BytesIO(brotli_buff), decomp_type='br', block_size=3)) as x:
assert x.read() == b'The quick brown fox jumps over the lazy dog' * 4096
# Compression
def test_compress_mix():
x = DecompressingBufferedReader(BytesIO(compress('ABC') + b'123'), decomp_type = 'gzip')
b = x.read()
assert b == b'ABC'
x.read_next_member()
assert x.read() == b'123'
# Errors
def test_compress_invalid():
result = compress('ABCDEFG' * 1)
# cut-off part of the block
result = result[:-2] + b'xyz'
x = DecompressingBufferedReader(BytesIO(result), block_size=16)
b = x.read(3)
assert b == b'ABC'
assert b'DE' == x.read()
def test_err_chunk_cut_off():
# Chunked data cut off with exceptions
c = ChunkedDataReader(BytesIO(b"4\r\n1234\r\n4\r\n12"), raise_exceptions=True)
with pytest.raises(ChunkedDataException):
c.read() + c.read()
#ChunkedDataException: Ran out of data before end of chunk
def print_str(string):
return string.decode('utf-8') if six.PY3 else string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6,214
| 32.235294
| 193
|
py
|
warcio
|
warcio-master/test/test_check_digest_examples.py
|
from warcio.cli import main
from warcio import ArchiveIterator
from warcio.warcwriter import BufferWARCWriter
from . import get_test_file
import os
SKIP = ['example-trunc.warc',
'example-iana.org-chunked.warc',
'example-wrong-chunks.warc.gz',
'example-bad-non-chunked.warc.gz',
'example-digest.warc'
]
def pytest_generate_tests(metafunc):
if 'test_filename' in metafunc.fixturenames:
files = [filename for filename in os.listdir(get_test_file('.'))
if filename not in SKIP and filename.endswith(('.warc', '.warc.gz', '.arc', '.arc.gz'))]
metafunc.parametrize('test_filename', files)
class TestExamplesDigest(object):
def check_helper(self, args, expected_exit_value, capsys):
exit_value = None
try:
main(args=args)
except SystemExit as e:
exit_value = e.code
finally:
assert exit_value == expected_exit_value
return capsys.readouterr()[0] # list for py33 support
def test_check_invalid(self, capsys):
filenames = [get_test_file('example-digest.warc')]
args = ['check'] + filenames
value = self.check_helper(args, 1, capsys)
assert value.count('payload digest failed') == 1
assert value.count('WARC-Record-ID') == 1
args = ['check', '-v'] + filenames
value = self.check_helper(args, 1, capsys)
assert value.count('payload digest failed') == 1
assert value.count('digest pass') == 3
assert value.count('WARC-Record-ID') == 4
def test_check_valid(self, capsys):
filenames = [get_test_file('example.warc'), get_test_file('example.warc.gz')]
args = ['check'] + filenames
expected = ''
assert self.check_helper(args, 0, capsys) == expected
args = ['check', '-v'] + filenames
value = self.check_helper(args, 0, capsys)
# two digests per file (payload and block)
assert value.count('digest pass') == 4
assert value.count('WARC-Record-ID') == 12
def test_check_valid_chunked(self, capsys):
filenames = [get_test_file('example-iana.org-chunked.warc')]
args = ['check'] + filenames
expected = ''
assert self.check_helper(args, 0, capsys) == expected
args = ['check', '-v'] + filenames
value = self.check_helper(args, 0, capsys)
# two digests per file (payload and block)
assert value.count('no digest to check') == 1
assert value.count('digest pass') == 2
assert value.count('WARC-Record-ID') == 3
def test_check_no_invalid_files(self, test_filename, capsys):
args = ['check', '-v', get_test_file(test_filename)]
value = self.check_helper(args, 0, capsys)
assert value.count('digest failed') == 0
# if ARC file, no digests to check, so no passing results
if test_filename.endswith(('.arc', '.arc.gz')):
assert value.count('digest pass') == 0
| 3,007
| 34.388235
| 105
|
py
|
warcio
|
warcio-master/test/test_capture_http.py
|
import threading
from wsgiref.simple_server import make_server
from io import BytesIO
import time
# must be imported before 'requests'
from warcio.capture_http import capture_http
from pytest import raises
import requests
import json
import os
import tempfile
from warcio.archiveiterator import ArchiveIterator
from warcio.utils import BUFF_SIZE
from warcio.warcwriter import BufferWARCWriter, WARCWriter
# ==================================================================
class TestCaptureHttpBin(object):
@classmethod
def setup_class(cls):
from httpbin import app as httpbin_app
cls.temp_dir = tempfile.mkdtemp('warctest')
server = make_server('localhost', 0, httpbin_app)
addr, cls.port = server.socket.getsockname()
def run():
try:
server.serve_forever()
except Exception as e:
print(e)
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.1)
@classmethod
def teardown_class(cls):
os.rmdir(cls.temp_dir)
def test_get_no_capture(self):
url = 'http://localhost:{0}/get?foo=bar'.format(self.port)
res = requests.get(url, headers={'Host': 'httpbin.org'})
assert res.json()['args'] == {'foo': 'bar'}
def test_get(self):
url = 'http://localhost:{0}/get?foo=bar'.format(self.port)
with capture_http() as warc_writer:
res = requests.get(url, headers={'Host': 'httpbin.org'})
assert res.json()['args'] == {'foo': 'bar'}
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == url
assert response.rec_headers['WARC-IP-Address'] == '127.0.0.1'
assert res.json() == json.loads(response.content_stream().read().decode('utf-8'))
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == url
assert request.rec_headers['WARC-IP-Address'] == '127.0.0.1'
def test_get_cache_to_file(self):
warc_writer = BufferWARCWriter(gzip=False)
url = 'http://localhost:{0}/bytes/{1}'.format(self.port, BUFF_SIZE * 2)
with capture_http(warc_writer):
res = requests.get(url, headers={'Host': 'httpbin.org'})
assert len(res.content) == BUFF_SIZE * 2
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == url
assert response.rec_headers['WARC-IP-Address'] == '127.0.0.1'
assert res.content == response.content_stream().read()
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == url
assert request.rec_headers['WARC-IP-Address'] == '127.0.0.1'
def test_post_json(self):
warc_writer = BufferWARCWriter(gzip=False)
with capture_http(warc_writer):
res = requests.post('http://localhost:{0}/post'.format(self.port),
headers={'Host': 'httpbin.org'},
json={'some': {'data': 'posted'}})
assert res.json()['json'] == {'some': {'data': 'posted'}}
# response
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert res.json() == json.loads(response.content_stream().read().decode('utf-8'))
# request
request = next(ai)
assert request.rec_type == 'request'
assert request.http_headers['Content-Type'] == 'application/json'
data = request.content_stream().read().decode('utf-8')
assert data == '{"some": {"data": "posted"}}'
def test_post_stream(self):
warc_writer = BufferWARCWriter(gzip=False)
def nop_filter(request, response, recorder):
assert request
assert response
return request, response
postbuff = BytesIO(b'somedatatopost')
url = 'http://localhost:{0}/post'.format(self.port)
with capture_http(warc_writer, nop_filter):
res = requests.post(url, data=postbuff)
# response
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == url
assert response.rec_headers['WARC-IP-Address'] == '127.0.0.1'
assert res.json() == json.loads(response.content_stream().read().decode('utf-8'))
# request
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == url
assert request.rec_headers['WARC-IP-Address'] == '127.0.0.1'
data = request.content_stream().read().decode('utf-8')
assert data == 'somedatatopost'
def test_post_chunked(self):
warc_writer = BufferWARCWriter(gzip=False)
def nop_filter(request, response, recorder):
assert request
assert response
return request, response
def gen():
return iter([b'some', b'data', b'to', b'post'])
#url = 'http://localhost:{0}/post'.format(self.port)
url = 'https://httpbin.org/post'
with capture_http(warc_writer, nop_filter, record_ip=False):
res = requests.post(url, data=gen(), headers={'Content-Type': 'application/json'})
# response
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == url
assert 'WARC-IP-Address' not in response.rec_headers
assert res.json() == json.loads(response.content_stream().read().decode('utf-8'))
# request
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == url
assert 'WARC-IP-Address' not in response.rec_headers
data = request.content_stream().read().decode('utf-8')
assert data == 'somedatatopost'
def test_skip_filter(self):
warc_writer = BufferWARCWriter(gzip=False)
def skip_filter(request, response, recorder):
assert request
assert response
return None, None
with capture_http(warc_writer, skip_filter):
res = requests.get('http://localhost:{0}/get?foo=bar'.format(self.port),
headers={'Host': 'httpbin.org'})
assert res.json()['args'] == {'foo': 'bar'}
# skipped, nothing written
assert warc_writer.get_contents() == b''
def test_capture_to_temp_file_append(self):
full_path = os.path.join(self.temp_dir, 'example.warc.gz')
url = 'http://localhost:{0}/get?foo=bar'.format(self.port)
with capture_http(full_path):
res = requests.get(url)
with capture_http(full_path):
res = requests.get(url)
with open(full_path, 'rb') as stream:
# response
ai = ArchiveIterator(stream)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == url
# request
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == url
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == url
# request
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == url
os.remove(full_path)
def test_error_capture_to_temp_file_no_append_no_overwrite(self):
full_path = os.path.join(self.temp_dir, 'example2.warc.gz')
url = 'http://localhost:{0}/get?foo=bar'.format(self.port)
with capture_http(full_path, append=False):
res = requests.get(url)
with raises(OSError):
with capture_http(full_path, append=False):
res = requests.get(url)
os.remove(full_path)
def test_warc_1_1(self):
full_path = os.path.join(self.temp_dir, 'example3.warc')
url = 'http://localhost:{0}/get?foo=bar'.format(self.port)
with capture_http(full_path, append=False, warc_version='1.1', gzip=False):
res = requests.get(url)
with open(full_path, 'rb') as stream:
# response
ai = ArchiveIterator(stream)
response = next(ai)
assert response.rec_headers.protocol == 'WARC/1.1'
warc_date = response.rec_headers['WARC-Date']
# ISO 8601 date with fractional seconds (microseconds)
assert '.' in warc_date
assert len(warc_date) == 27
os.remove(full_path)
def test_remote(self):
with capture_http(warc_version='1.1', gzip=True) as writer:
requests.get('http://example.com/')
requests.get('https://google.com/')
expected = [('http://example.com/', 'response', True),
('http://example.com/', 'request', True),
('https://google.com/', 'response', True),
('https://google.com/', 'request', True),
('https://www.google.com/', 'response', True),
('https://www.google.com/', 'request', True)
]
actual = [
(record.rec_headers['WARC-Target-URI'],
record.rec_type,
'WARC-IP-Address' in record.rec_headers)
for record in ArchiveIterator(writer.get_stream())
]
assert actual == expected
| 10,071
| 33.258503
| 94
|
py
|
warcio
|
warcio-master/test/test_statusandheaders.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
>>> st1 = StatusAndHeadersParser(['HTTP/1.0']).parse(StringIO(status_headers_1))
>>> st1
StatusAndHeaders(protocol = 'HTTP/1.0', statusline = '200 OK', headers = [('Content-Type', 'ABC'), ('Some', 'Value'), ('Multi-Line', 'Value1 Also This')])
# add range (and byte headers)
>>> StatusAndHeaders(statusline = '200 OK', headers=[(b'Content-Type', b'text/plain')]).add_range(10, 4, 100)
StatusAndHeaders(protocol = '', statusline = '206 Partial Content', headers = [('Content-Type', 'text/plain'), ('Content-Range', 'bytes 10-13/100'), ('Content-Length', '4'), ('Accept-Ranges', 'bytes')])
# other protocol expected
>>> StatusAndHeadersParser(['Other']).parse(StringIO(status_headers_1)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
StatusAndHeadersParserException: Expected Status Line starting with ['Other'] - Found: HTTP/1.0 200 OK
>>> StatusAndHeadersParser(['Other'], verify=False).parse(StringIO(status_headers_1))
StatusAndHeaders(protocol = 'HTTP/1.0', statusline = '200 OK', headers = [('Content-Type', 'ABC'), ('Some', 'Value'), ('Multi-Line', 'Value1 Also This')])
# verify protocol line
>>> StatusAndHeadersParser(['HTTP/1.0'], verify=True).parse(StringIO(unknown_protocol_headers)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
StatusAndHeadersParserException: Expected Status Line starting with ['HTTP/1.0'] - Found: OtherBlah
# allow unexpected/invalid protocol line
>>> StatusAndHeadersParser(['HTTP/1.0'], verify=False).parse(StringIO(unknown_protocol_headers))
StatusAndHeaders(protocol = 'OtherBlah', statusline = '', headers = [('Foo', 'Bar')])
# test equality op
>>> st1 == StatusAndHeadersParser(['HTTP/1.0']).parse(StringIO(status_headers_1))
True
# replace header, print new headers
>>> st1.replace_header('some', 'Another-Value'); st1
'Value'
StatusAndHeaders(protocol = 'HTTP/1.0', statusline = '200 OK', headers = [('Content-Type', 'ABC'), ('Some', 'Another-Value'), ('Multi-Line', 'Value1 Also This')])
# replace header with dict-like api, print new headers
>>> st1['some'] = 'Yet-Another-Value'; st1
StatusAndHeaders(protocol = 'HTTP/1.0', statusline = '200 OK', headers = [('Content-Type', 'ABC'), ('Some', 'Yet-Another-Value'), ('Multi-Line', 'Value1 Also This')])
# remove header
>>> st1.remove_header('some')
True
# already removed
>>> st1.remove_header('Some')
False
# add header with dict-like api, print new headers
>>> st1['foo'] = 'bar'; st1
StatusAndHeaders(protocol = 'HTTP/1.0', statusline = '200 OK', headers = [('Content-Type', 'ABC'), ('Multi-Line', 'Value1 Also This'), ('foo', 'bar')])
# dict-like api existence and get value
>>> 'bar' in st1
False
>>> 'foo' in st1
True
>>> st1['bar']
>>> st1.get('bar')
>>> st1['foo']
'bar'
>>> st1.get('foo')
'bar'
# remove header with dict-like api, print new headers
>>> del st1['foo']; st1
StatusAndHeaders(protocol = 'HTTP/1.0', statusline = '200 OK', headers = [('Content-Type', 'ABC'), ('Multi-Line', 'Value1 Also This')])
# empty
>>> st2 = StatusAndHeadersParser(['HTTP/1.0']).parse(StringIO(status_headers_2)); x = st2.validate_statusline('204 No Content'); st2
StatusAndHeaders(protocol = '', statusline = '204 No Content', headers = [])
>>> StatusAndHeadersParser(['HTTP/1.0']).parse(StringIO(status_headers_3))
StatusAndHeaders(protocol = 'HTTP/1.0', statusline = '204 Empty', headers = [('Content-Type', 'Value'), ('Content-Length', '0')])
# case-insensitive match
>>> StatusAndHeadersParser(['HTTP/1.0']).parse(StringIO(status_headers_4))
StatusAndHeaders(protocol = 'HTTP/1.0', statusline = '204 empty', headers = [('Content-Type', 'Value'), ('Content-Length', '0')])
"""
from warcio.statusandheaders import StatusAndHeadersParser, StatusAndHeaders
from six import StringIO
import pytest
status_headers_1 = "\
HTTP/1.0 200 OK\r\n\
Content-Type: ABC\r\n\
HTTP/1.0 200 OK\r\n\
Some: Value\r\n\
Multi-Line: Value1\r\n\
Also This\r\n\
\r\n\
Body"
status_headers_2 = """
"""
status_headers_3 = "\
HTTP/1.0 204 Empty\r\n\
Content-Type: Value\r\n\
%Invalid%\r\n\
\tMultiline\r\n\
Content-Length: 0\r\n\
\r\n"
status_headers_4 = "\
http/1.0 204 empty\r\n\
Content-Type: Value\r\n\
%Invalid%\r\n\
\tMultiline\r\n\
Content-Length: 0\r\n\
\r\n"
unknown_protocol_headers = "\
OtherBlah\r\n\
Foo: Bar\r\n\
\r\n"
req_headers = "\
GET / HTTP/1.0\r\n\
Foo: Bar\r\n\
Content-Length: 0\r\n"
if __name__ == "__main__":
import doctest
doctest.testmod()
def test_to_str_1():
res = str(StatusAndHeadersParser(['HTTP/1.0']).parse(StringIO(status_headers_1)))
exp = "\
HTTP/1.0 200 OK\r\n\
Content-Type: ABC\r\n\
Some: Value\r\n\
Multi-Line: Value1 Also This\r\n\
"
assert(res == exp)
def test_to_str_exclude():
def exclude(h):
if h[0].lower() == 'multi-line':
return None
return h
sah = StatusAndHeadersParser(['HTTP/1.0']).parse(StringIO(status_headers_1))
res = sah.to_str(exclude)
exp = "\
HTTP/1.0 200 OK\r\n\
Content-Type: ABC\r\n\
Some: Value\r\n\
"
assert(res == exp)
assert(sah.to_bytes(exclude) == (exp.encode('latin-1') + b'\r\n'))
def test_to_str_2():
res = str(StatusAndHeadersParser(['GET']).parse(StringIO(req_headers)))
assert(res == req_headers)
res = str(StatusAndHeadersParser(['GET']).parse(StringIO(req_headers + '\r\n')))
assert(res == req_headers)
def test_to_str_with_remove():
res = StatusAndHeadersParser(['GET']).parse(StringIO(req_headers))
res.remove_header('Foo')
exp = "\
GET / HTTP/1.0\r\n\
Content-Length: 0\r\n"
assert(str(res) == exp)
def test_status_empty():
with pytest.raises(EOFError):
StatusAndHeadersParser([], verify=False).parse(StringIO(''))
def test_status_one_word():
res = StatusAndHeadersParser(['GET'], verify=False).parse(StringIO('A'))
assert(str(res) == 'A\r\n')
def test_validate_status():
assert StatusAndHeaders('200 OK', []).validate_statusline('204 No Content')
assert not StatusAndHeaders('Bad OK', []).validate_statusline('204 No Content')
def test_non_ascii():
st = StatusAndHeaders('200 OK', [('Custom-Header', 'attachment; filename="Éxamplè"')])
res = st.to_ascii_bytes().decode('ascii')
assert res == "\
200 OK\r\n\
Custom-Header: attachment; filename*=UTF-8''%C3%89xampl%C3%A8\r\n\
\r\n\
"
def test_non_ascii_2():
st = StatusAndHeaders('200 OK', [('Custom-Header', 'value; filename="Éxamplè"; param; other=испытание; another')])
res = st.to_ascii_bytes().decode('ascii')
assert res == "\
200 OK\r\n\
Custom-Header: value; filename*=UTF-8''%C3%89xampl%C3%A8; param; other*=UTF-8''%D0%B8%D1%81%D0%BF%D1%8B%D1%82%D0%B0%D0%BD%D0%B8%D0%B5; another\r\n\
\r\n\
"
def test_non_ascii_3():
st = StatusAndHeaders('200 OK', [('Custom-Header', '“max-age=31536000″')])
res = st.to_ascii_bytes().decode('ascii')
assert res == "\
200 OK\r\n\
Custom-Header: %E2%80%9Cmax-age%3D31536000%E2%80%B3\r\n\
\r\n\
"
| 6,990
| 28.129167
| 202
|
py
|
warcio
|
warcio-master/test/__init__.py
|
def get_test_file(filename=''):
import os
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', filename)
| 133
| 32.5
| 86
|
py
|
warcio
|
warcio-master/test/test_limitreader.py
|
from warcio.limitreader import LimitReader
from contextlib import closing
from io import BytesIO
class TestLimitReader(object):
def test_limit_reader_1(self):
assert b'abcdefghji' == LimitReader(BytesIO(b'abcdefghjiklmnopqrstuvwxyz'), 10).read(26)
def test_limit_reader_2(self):
assert b'abcdefgh' == LimitReader(BytesIO(b'abcdefghjiklmnopqrstuvwxyz'), 8).readline(26)
def test_limit_reader_3(self):
reader = LimitReader(BytesIO(b'abcdefghjiklmnopqrstuvwxyz'), 8)
new_reader = LimitReader.wrap_stream(reader, 4)
assert reader == new_reader
assert b'abcd' == new_reader.readline(26)
#assert b'abcd' == LimitReader.wrap_stream(LimitReader(BytesIO(b'abcdefghjiklmnopqrstuvwxyz'), 8), 4).readline(26)
def test_limit_reader_multiple_read(self):
reader = LimitReader(BytesIO(b'abcdefghjiklmnopqrstuvwxyz'), 10)
string = None
for x in [2, 2, 20]:
string = reader.read(x)
assert b'efghji' == string
def test_limit_reader_zero(self):
assert b'' == LimitReader(BytesIO(b'a'), 0).readline(0)
def test_limit_reader_invalid_wrap(self):
b = BytesIO(b'some data')
assert LimitReader.wrap_stream(b, 'abc') == b
def test_limit_reader_close(self):
reader = LimitReader(BytesIO(b'abcdefg'), 3)
with closing(reader):
assert b'abc' == reader.read(10)
assert reader.tell() == 3
| 1,457
| 35.45
| 122
|
py
|
warcio
|
warcio-master/test/test_capture_http_proxy.py
|
from warcio.capture_http import capture_http
import threading
from wsgiref.simple_server import make_server, WSGIServer
import time
import requests
from warcio.archiveiterator import ArchiveIterator
from pytest import raises
# ==================================================================
class TestCaptureHttpProxy():
def setup(cls):
def app(env, start_response):
result = ('Proxied: ' + env['PATH_INFO']).encode('utf-8')
headers = [('Content-Length', str(len(result)))]
start_response('200 OK', headers=headers)
return iter([result])
from wsgiprox.wsgiprox import WSGIProxMiddleware
wsgiprox = WSGIProxMiddleware(app, '/')
class NoLogServer(WSGIServer):
def handle_error(self, request, client_address):
pass
server = make_server('localhost', 0, wsgiprox, server_class=NoLogServer)
addr, cls.port = server.socket.getsockname()
cls.proxies = {'https': 'localhost:' + str(cls.port),
'http': 'localhost:' + str(cls.port)
}
def run():
try:
server.serve_forever()
except Exception as e:
print(e)
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.1)
def test_capture_http_proxy(self):
with capture_http() as warc_writer:
res = requests.get("http://example.com/test", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "http://example.com/test"
assert response.content_stream().read().decode('utf-8') == 'Proxied: /http://example.com/test'
assert response.rec_headers['WARC-Proxy-Host'] == 'http://localhost:{0}'.format(self.port)
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "http://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'http://localhost:{0}'.format(self.port)
with raises(StopIteration):
assert next(ai)
def test_capture_https_proxy(self):
with capture_http() as warc_writer:
res = requests.get("https://example.com/test", proxies=self.proxies, verify=False)
res = requests.get("https://example.com/foo", proxies=self.proxies, verify=False)
# not recording this request
res = requests.get("https://example.com/skip", proxies=self.proxies, verify=False)
with capture_http(warc_writer):
res = requests.get("https://example.com/bar", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/test'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/foo'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/bar"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/bar'
request = next(ai)
assert request.rec_type == 'request'
with raises(StopIteration):
assert next(ai)
def test_capture_https_proxy_same_session(self):
sesh = requests.session()
with capture_http() as warc_writer:
res = sesh.get("https://example.com/test", proxies=self.proxies, verify=False)
res = sesh.get("https://example.com/foo", proxies=self.proxies, verify=False)
# *will* be captured, as part of same session... (fix this?)
res = sesh.get("https://example.com/skip", proxies=self.proxies, verify=False)
with capture_http(warc_writer):
res = sesh.get("https://example.com/bar", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/test'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/foo'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/skip"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/skip'
request = next(ai)
assert request.rec_type == 'request'
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/bar"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/bar'
request = next(ai)
assert request.rec_type == 'request'
with raises(StopIteration):
assert next(ai)
| 7,458
| 43.39881
| 103
|
py
|
warcio
|
warcio-master/test/test_writer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from warcio.statusandheaders import StatusAndHeaders
from warcio.warcwriter import BufferWARCWriter, GzippingWrapper
from warcio.recordbuilder import RecordBuilder
from warcio.recordloader import ArcWarcRecordLoader
from warcio.archiveiterator import ArchiveIterator
from warcio.bufferedreaders import DecompressingBufferedReader
from . import get_test_file
from io import BytesIO
from collections import OrderedDict
import json
import re
import pytest
# ============================================================================
class FixedTestRecordMixin:
@classmethod
def _make_warc_id(cls, id_=None):
return '<urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>'
@classmethod
def _make_warc_date(cls, use_micros=False):
if not use_micros:
return '2000-01-01T00:00:00Z'
else:
return '2000-01-01T00:00:00.123456Z'
class FixedTestRecordBuilder(FixedTestRecordMixin, RecordBuilder):
pass
class FixedTestWARCWriter(FixedTestRecordMixin, BufferWARCWriter):
pass
# ============================================================================
WARCINFO_RECORD = '\
WARC/1.0\r\n\
WARC-Type: warcinfo\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Filename: testfile.warc.gz\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Block-Digest: sha1:GAD6P5BTZPRU57ICXEYUJZGCURZYABID\r\n\
Content-Type: application/warc-fields\r\n\
Content-Length: 86\r\n\
\r\n\
software: recorder test\r\n\
format: WARC File Format 1.0\r\n\
json-metadata: {"foo": "bar"}\r\n\
\r\n\
\r\n\
'
RESPONSE_RECORD = '\
WARC/1.0\r\n\
WARC-Type: response\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:OS3OKGCWQIJOAOC3PKXQOQFD52NECQ74\r\n\
Content-Type: application/http; msgtype=response\r\n\
Content-Length: 97\r\n\
\r\n\
HTTP/1.0 200 OK\r\n\
Content-Type: text/plain; charset="UTF-8"\r\n\
Custom-Header: somevalue\r\n\
\r\n\
some\n\
text\r\n\
\r\n\
'
RESPONSE_RECORD_UNICODE_HEADERS = '\
WARC/1.0\r\n\
WARC-Type: response\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:KMUABC6URWIQ7QXCZDQ5FS6WIBBFRORR\r\n\
Content-Type: application/http; msgtype=response\r\n\
Content-Length: 268\r\n\
\r\n\
HTTP/1.0 200 OK\r\n\
Content-Type: text/plain; charset="UTF-8"\r\n\
Content-Disposition: attachment; filename*=UTF-8\'\'%D0%B8%D1%81%D0%BF%D1%8B%D1%82%D0%B0%D0%BD%D0%B8%D0%B5.txt\r\n\
Custom-Header: somevalue\r\n\
Unicode-Header: %F0%9F%93%81%20text%20%F0%9F%97%84%EF%B8%8F\r\n\
\r\n\
some\n\
text\r\n\
\r\n\
'
RESPONSE_RECORD_2 = '\
WARC/1.0\r\n\
WARC-Type: response\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:U6KNJY5MVNU3IMKED7FSO2JKW6MZ3QUX\r\n\
Content-Type: application/http; msgtype=response\r\n\
Content-Length: 145\r\n\
\r\n\
HTTP/1.0 200 OK\r\n\
Content-Type: text/plain; charset="UTF-8"\r\n\
Content-Length: 9\r\n\
Custom-Header: somevalue\r\n\
Content-Encoding: x-unknown\r\n\
\r\n\
some\n\
text\r\n\
\r\n\
'
REQUEST_RECORD = '\
WARC/1.0\r\n\
WARC-Type: request\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:3I42H3S6NNFQ2MSVX7XZKYAYSCX5QBYJ\r\n\
WARC-Block-Digest: sha1:ONEHF6PTXPTTHE3333XHTD2X45TZ3DTO\r\n\
Content-Type: application/http; msgtype=request\r\n\
Content-Length: 54\r\n\
\r\n\
GET / HTTP/1.0\r\n\
User-Agent: foo\r\n\
Host: example.com\r\n\
\r\n\
\r\n\
\r\n\
'
REQUEST_RECORD_2 = '\
WARC/1.0\r\n\
WARC-Type: request\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:R5VZAKIE53UW5VGK43QJIFYS333QM5ZA\r\n\
WARC-Block-Digest: sha1:L7SVBUPPQ6RH3ANJD42G5JL7RHRVZ5DV\r\n\
Content-Type: application/http; msgtype=request\r\n\
Content-Length: 92\r\n\
\r\n\
POST /path HTTP/1.0\r\n\
Content-Type: application/json\r\n\
Content-Length: 17\r\n\
\r\n\
{"some": "value"}\r\n\
\r\n\
'
REVISIT_RECORD_1 = '\
WARC/1.0\r\n\
WARC-Type: revisit\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Profile: http://netpreserve.org/warc/1.0/revisit/identical-payload-digest\r\n\
WARC-Refers-To-Target-URI: http://example.com/foo\r\n\
WARC-Refers-To-Date: 1999-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:3I42H3S6NNFQ2MSVX7XZKYAYSCX5QBYJ\r\n\
Content-Type: application/http; msgtype=response\r\n\
Content-Length: 0\r\n\
\r\n\
\r\n\
\r\n\
'
REVISIT_RECORD_2 = '\
WARC/1.0\r\n\
WARC-Type: revisit\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Profile: http://netpreserve.org/warc/1.0/revisit/identical-payload-digest\r\n\
WARC-Refers-To-Target-URI: http://example.com/foo\r\n\
WARC-Refers-To-Date: 1999-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:A6J5UTI2QHHCZFCFNHQHCDD3JJFKP53V\r\n\
Content-Type: application/http; msgtype=response\r\n\
Content-Length: 88\r\n\
\r\n\
HTTP/1.0 200 OK\r\n\
Content-Type: text/plain; charset="UTF-8"\r\n\
Custom-Header: somevalue\r\n\
\r\n\
\r\n\
\r\n\
'
REVISIT_RECORD_3 = '\
WARC/1.1\r\n\
WARC-Type: revisit\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00.123456Z\r\n\
WARC-Profile: http://netpreserve.org/warc/1.1/revisit/identical-payload-digest\r\n\
WARC-Refers-To-Target-URI: http://example.com/foo\r\n\
WARC-Refers-To-Date: 1999-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:3I42H3S6NNFQ2MSVX7XZKYAYSCX5QBYJ\r\n\
Content-Type: application/http; msgtype=response\r\n\
Content-Length: 0\r\n\
\r\n\
\r\n\
\r\n\
'
RESOURCE_RECORD = '\
WARC/1.0\r\n\
WARC-Type: resource\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: ftp://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
Content-Type: text/plain\r\n\
Content-Length: 9\r\n\
\r\n\
some\n\
text\r\n\
\r\n\
'
RESOURCE_RECORD_NO_CONTENT_TYPE = '\
WARC/1.0\r\n\
WARC-Type: resource\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: ftp://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
Content-Length: 9\r\n\
\r\n\
some\n\
text\r\n\
\r\n\
'
METADATA_RECORD = '\
WARC/1.0\r\n\
WARC-Type: metadata\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:ZOLBLKAQVZE5DXH56XE6EH6AI6ZUGDPT\r\n\
WARC-Block-Digest: sha1:ZOLBLKAQVZE5DXH56XE6EH6AI6ZUGDPT\r\n\
Content-Type: application/json\r\n\
Content-Length: 67\r\n\
\r\n\
{"metadata": {"nested": "obj", "list": [1, 2, 3], "length": "123"}}\r\n\
\r\n\
'
DNS_RESPONSE_RECORD = '\
WARC/1.0\r\n\
WARC-Type: response\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: dns:google.com\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:2AAVJYKKIWK5CF6EWE7PH63EMNLO44TH\r\n\
WARC-Block-Digest: sha1:2AAVJYKKIWK5CF6EWE7PH63EMNLO44TH\r\n\
Content-Type: application/http; msgtype=response\r\n\
Content-Length: 147\r\n\
\r\n\
20170509000739\n\
google.com. 185 IN A 209.148.113.239\n\
google.com. 185 IN A 209.148.113.238\n\
google.com. 185 IN A 209.148.113.250\n\
\r\n\r\n\
'
DNS_RESOURCE_RECORD = '\
WARC/1.0\r\n\
WARC-Type: resource\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: dns:google.com\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:2AAVJYKKIWK5CF6EWE7PH63EMNLO44TH\r\n\
WARC-Block-Digest: sha1:2AAVJYKKIWK5CF6EWE7PH63EMNLO44TH\r\n\
Content-Type: application/warc-record\r\n\
Content-Length: 147\r\n\
\r\n\
20170509000739\n\
google.com. 185 IN A 209.148.113.239\n\
google.com. 185 IN A 209.148.113.238\n\
google.com. 185 IN A 209.148.113.250\n\
\r\n\r\n\
'
# ============================================================================
# Decorator Setup
# ============================================================================
all_sample_records = {}
def sample_record(name, record_string):
def decorate(f):
all_sample_records[name] = (f, record_string)
return f
return decorate
# ============================================================================
# Sample Record Functions
# ============================================================================
@sample_record('warcinfo', WARCINFO_RECORD)
def sample_warcinfo(builder):
params = OrderedDict([('software', 'recorder test'),
('format', 'WARC File Format 1.0'),
('invalid', ''),
('json-metadata', json.dumps({'foo': 'bar'}))])
return builder.create_warcinfo_record('testfile.warc.gz', params)
# ============================================================================
@sample_record('response_1', RESPONSE_RECORD)
def sample_response(builder):
headers_list = [('Content-Type', 'text/plain; charset="UTF-8"'),
('Custom-Header', 'somevalue')
]
payload = b'some\ntext'
http_headers = StatusAndHeaders('200 OK', headers_list, protocol='HTTP/1.0')
return builder.create_warc_record('http://example.com/', 'response',
payload=BytesIO(payload),
length=len(payload),
http_headers=http_headers)
# ============================================================================
@sample_record('response_1-buff', RESPONSE_RECORD)
def sample_response_from_buff(builder):
payload = '\
HTTP/1.0 200 OK\r\n\
Content-Type: text/plain; charset="UTF-8"\r\n\
Custom-Header: somevalue\r\n\
\r\n\
some\ntext'.encode('utf-8')
return builder.create_warc_record('http://example.com/', 'response',
payload=BytesIO(payload),
length=len(payload))
# ============================================================================
@sample_record('response-unicode-header', RESPONSE_RECORD_UNICODE_HEADERS)
def sample_response_unicode(builder):
headers_list = [('Content-Type', 'text/plain; charset="UTF-8"'),
('Content-Disposition', u'attachment; filename="испытание.txt"'),
('Custom-Header', 'somevalue'),
('Unicode-Header', '📁 text 🗄️'),
]
payload = b'some\ntext'
http_headers = StatusAndHeaders('200 OK', headers_list, protocol='HTTP/1.0')
return builder.create_warc_record('http://example.com/', 'response',
payload=BytesIO(payload),
length=len(payload),
http_headers=http_headers)
# ============================================================================
@sample_record('response_2', RESPONSE_RECORD_2)
def sample_response_2(builder):
payload = b'some\ntext'
headers_list = [('Content-Type', 'text/plain; charset="UTF-8"'),
('Content-Length', str(len(payload))),
('Custom-Header', 'somevalue'),
('Content-Encoding', 'x-unknown'),
]
http_headers = StatusAndHeaders('200 OK', headers_list, protocol='HTTP/1.0')
return builder.create_warc_record('http://example.com/', 'response',
payload=BytesIO(payload),
length=len(payload),
http_headers=http_headers)
# ============================================================================
@sample_record('response_dns', DNS_RESPONSE_RECORD)
def sample_response_dns(builder):
payload = b'''\
20170509000739
google.com. 185 IN A 209.148.113.239
google.com. 185 IN A 209.148.113.238
google.com. 185 IN A 209.148.113.250
'''
return builder.create_warc_record('dns:google.com', 'response',
payload=BytesIO(payload))
# ============================================================================
@sample_record('resource_dns', DNS_RESOURCE_RECORD)
def sample_resource_dns(builder):
payload = b'''\
20170509000739
google.com. 185 IN A 209.148.113.239
google.com. 185 IN A 209.148.113.238
google.com. 185 IN A 209.148.113.250
'''
return builder.create_warc_record('dns:google.com', 'resource',
payload=BytesIO(payload))
# ============================================================================
@sample_record('request_1', REQUEST_RECORD)
def sample_request(builder):
headers_list = [('User-Agent', 'foo'),
('Host', 'example.com')]
http_headers = StatusAndHeaders('GET / HTTP/1.0', headers_list, is_http_request=True)
return builder.create_warc_record('http://example.com/', 'request',
http_headers=http_headers)
# ============================================================================
@sample_record('request_2', REQUEST_RECORD_2)
def sample_request_from_buff(builder):
payload = '\
POST /path HTTP/1.0\r\n\
Content-Type: application/json\r\n\
Content-Length: 17\r\n\
\r\n\
{"some": "value"}'.encode('utf-8')
return builder.create_warc_record('http://example.com/', 'request',
payload=BytesIO(payload),
length=len(payload))
# ============================================================================
@sample_record('resource', RESOURCE_RECORD)
def sample_resource(builder):
payload = b'some\ntext'
return builder.create_warc_record('ftp://example.com/', 'resource',
payload=BytesIO(payload),
length=len(payload),
warc_content_type='text/plain')
# ============================================================================
@sample_record('resource_no_ct', RESOURCE_RECORD_NO_CONTENT_TYPE)
def sample_resource_no_content_type(builder):
payload = b'some\ntext'
rec = builder.create_warc_record('ftp://example.com/', 'resource',
payload=BytesIO(payload),
length=len(payload))
# default content-type added, but removing to match expected string
assert rec.content_type == 'application/warc-record'
rec.content_type = None
return rec
# ============================================================================
@sample_record('metadata', METADATA_RECORD)
def sample_metadata(builder):
payload_dict = {"metadata": OrderedDict([("nested", "obj"),
("list", [1, 2, 3]),
("length", "123")])}
payload = json.dumps(payload_dict).encode('utf-8')
return builder.create_warc_record('http://example.com/', 'metadata',
payload=BytesIO(payload),
length=len(payload),
warc_content_type='application/json')
# ============================================================================
@sample_record('revisit_1', REVISIT_RECORD_1)
def sample_revisit_1(builder):
return builder.create_revisit_record('http://example.com/',
digest='sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O',
refers_to_uri='http://example.com/foo',
refers_to_date='1999-01-01T00:00:00Z')
# ============================================================================
@sample_record('revisit_2', REVISIT_RECORD_2)
def sample_revisit_2(builder):
resp = sample_response(builder)
return builder.create_revisit_record('http://example.com/',
digest='sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O',
refers_to_uri='http://example.com/foo',
refers_to_date='1999-01-01T00:00:00Z',
http_headers=resp.http_headers)
# ============================================================================
@sample_record('revisit_warc_1_1', REVISIT_RECORD_3)
def sample_revisit_1_1(builder):
builder.warc_version = 'WARC/1.1'
res = builder.create_revisit_record('http://example.com/',
digest='sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O',
refers_to_uri='http://example.com/foo',
refers_to_date='1999-01-01T00:00:00Z')
builder.warc_version = 'WARC/1.0'
return res
# ============================================================================
# Fixture Setup
# ============================================================================
@pytest.fixture(params=['gzip', 'plain'])
def is_gzip(request):
return request.param == 'gzip'
@pytest.fixture(params=['writer', 'builder'])
def builder_factory(request):
def factory(writer, builder_cls=FixedTestRecordBuilder, **kwargs):
if request.param == 'writer':
return writer
return builder_cls(**kwargs)
return factory
@pytest.fixture(params=all_sample_records.keys())
def record_sampler(request):
return all_sample_records[request.param]
# ============================================================================
class TestWarcWriter(object):
@classmethod
def _validate_record_content_len(cls, stream):
for record in ArchiveIterator(stream, no_record_parse=True):
assert record.http_headers == None
assert int(record.rec_headers.get_header('Content-Length')) == record.length
assert record.length == len(record.raw_stream.read())
def test_generate_record(self, record_sampler, is_gzip, builder_factory):
writer = FixedTestWARCWriter(gzip=is_gzip)
builder = builder_factory(writer)
record_maker, record_string = record_sampler
record = record_maker(builder)
writer.write_record(record)
raw_buff = writer.get_contents()
self._validate_record_content_len(BytesIO(raw_buff))
stream = DecompressingBufferedReader(writer.get_stream())
buff = stream.read()
if is_gzip:
assert len(buff) > len(raw_buff)
else:
assert len(buff) == len(raw_buff)
assert buff.decode('utf-8') == record_string
# assert parsing record matches as well
stream = DecompressingBufferedReader(writer.get_stream())
parsed_record = ArcWarcRecordLoader().parse_record_stream(stream)
writer2 = FixedTestWARCWriter(gzip=False)
writer2.write_record(parsed_record)
assert writer2.get_contents().decode('utf-8') == record_string
# verify parts of record
stream = DecompressingBufferedReader(writer.get_stream())
parsed_record = ArcWarcRecordLoader().parse_record_stream(stream)
content_buff = parsed_record.content_stream().read().decode('utf-8')
assert content_buff in record_string
rec_type = parsed_record.rec_type
# verify http_headers
# match original
assert record.http_headers == parsed_record.http_headers
if parsed_record.http_headers:
assert rec_type in ('response', 'request', 'revisit')
else:
# empty revisit
if rec_type == 'revisit':
assert len(content_buff) == 0
else:
assert len(content_buff) == parsed_record.length
def test_warcinfo_record(self, is_gzip, builder_factory):
writer = FixedTestWARCWriter(gzip=is_gzip)
builder = builder_factory(writer)
record = sample_warcinfo(builder)
writer.write_record(record)
reader = DecompressingBufferedReader(writer.get_stream())
parsed_record = ArcWarcRecordLoader().parse_record_stream(reader)
assert parsed_record.rec_headers.get_header('WARC-Type') == 'warcinfo'
assert parsed_record.rec_headers.get_header('Content-Type') == 'application/warc-fields'
assert parsed_record.rec_headers.get_header('WARC-Filename') == 'testfile.warc.gz'
assert parsed_record.rec_headers.get_header('WARC-Block-Digest') == 'sha1:GAD6P5BTZPRU57ICXEYUJZGCURZYABID'
buff = parsed_record.content_stream().read().decode('utf-8')
assert 'json-metadata: {"foo": "bar"}\r\n' in buff
assert 'format: WARC File Format 1.0\r\n' in buff
def test_request_response_concur(self, is_gzip, builder_factory):
writer = BufferWARCWriter(gzip=is_gzip)
builder = builder_factory(writer, builder_cls=RecordBuilder)
resp = sample_response(builder)
req = sample_request(builder)
# test explicitly calling ensure_digest with block digest enabled on a record
writer.ensure_digest(resp, block=True, payload=True)
writer.write_request_response_pair(req, resp)
stream = writer.get_stream()
reader = ArchiveIterator(stream)
resp, req = list(reader)
resp_id = resp.rec_headers.get_header('WARC-Record-ID')
req_id = req.rec_headers.get_header('WARC-Record-ID')
assert resp_id != req_id
assert resp_id == req.rec_headers.get_header('WARC-Concurrent-To')
def test_response_warc_1_1(self, is_gzip, builder_factory):
writer = BufferWARCWriter(gzip=is_gzip, warc_version='WARC/1.1')
builder = builder_factory(writer, warc_version='WARC/1.1')
resp = sample_response(builder)
writer.write_record(resp)
stream = writer.get_stream()
reader = ArchiveIterator(stream)
recs = list(reader)
assert len(recs) == 1
assert recs[0].rec_headers.protocol == 'WARC/1.1'
# ISO 8601 date with fractional seconds (microseconds)
assert '.' in recs[0].rec_headers['WARC-Date']
assert len(recs[0].rec_headers['WARC-Date']) == 27
def _conv_to_streaming_record(self, record_buff, rec_type):
# strip-off the two empty \r\n\r\n added at the end of uncompressed record
record_buff = record_buff[:-4]
record_buff = re.sub('Content-Length:[^\r\n]+\r\n', '', record_buff, 1)
# don't remove payload digest for revisit, as it can not be recomputed
if rec_type != 'revisit':
record_buff = re.sub('WARC-Payload-Digest:[^\r\n]+\r\n', '', record_buff, 1)
assert 'WARC-Payload-Digest: ' not in record_buff
record_buff = re.sub('WARC-Block-Digest:[^\r\n]+\r\n', 'WARC-Block-Digest: sha1:x-invalid\r\n', record_buff, 1)
assert 'WARC-Block-Digest: sha1:x-invalid' in record_buff
return record_buff
def test_read_from_stream_no_content_length(self, record_sampler, is_gzip, builder_factory):
writer = FixedTestWARCWriter(gzip=is_gzip)
builder = builder_factory(writer)
record_maker, record_string = record_sampler
full_record = record_maker(builder)
stream = BytesIO()
record_no_cl = self._conv_to_streaming_record(record_string, full_record.rec_type)
if is_gzip:
gzip_stream = GzippingWrapper(stream)
gzip_stream.write(record_no_cl.encode('utf-8'))
gzip_stream.flush()
else:
stream.write(record_no_cl.encode('utf-8'))
# parse to verify http headers + payload matches sample record
# but not rec headers (missing content-length)
stream.seek(0)
parsed_record = ArcWarcRecordLoader().parse_record_stream(DecompressingBufferedReader(stream))
if 'Content-Disposition' not in record_string:
assert full_record.http_headers == parsed_record.http_headers
assert full_record.raw_stream.read() == parsed_record.raw_stream.read()
assert full_record.rec_headers != parsed_record.rec_headers
# parse and write
stream.seek(0)
parsed_record = ArcWarcRecordLoader().parse_record_stream(DecompressingBufferedReader(stream))
writer.write_record(parsed_record)
stream = DecompressingBufferedReader(writer.get_stream())
buff = stream.read()
# assert written record matches expected response record
# with content-length, digests computed
assert buff.decode('utf-8') == record_string
@pytest.mark.parametrize('filename', ['example.arc.gz', 'example.arc'])
def test_arc2warc(self, filename, is_gzip):
writer = FixedTestWARCWriter(gzip=is_gzip)
def validate_warcinfo(record):
assert record.rec_headers.get('WARC-Type') == 'warcinfo'
assert record.rec_headers.get('WARC-Filename') == 'live-web-example.arc.gz'
assert record.rec_headers.get('Content-Type') == 'text/plain'
def validate_response(record):
assert record.rec_headers.get('WARC-Type') == 'response'
assert record.rec_headers.get('Content-Length') == '1591'
assert record.length == 1591
assert record.rec_headers.get('WARC-Target-URI') == 'http://example.com/'
assert record.rec_headers.get('WARC-Date') == '2014-02-16T05:02:21Z'
assert record.rec_headers.get('WARC-Block-Digest') == 'sha1:PEWDX5GTH66WU74WBPGFECIYBMPMP3FP'
assert record.rec_headers.get('WARC-Payload-Digest') == 'sha1:B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A'
with open(get_test_file(filename), 'rb') as fh:
for record in ArchiveIterator(fh, arc2warc=True):
writer.write_record(record)
if record.rec_type == 'response':
validate_response(record)
if record.rec_type == 'warcinfo':
validate_warcinfo(record)
raw_buff = writer.get_contents()
self._validate_record_content_len(BytesIO(raw_buff))
stream = writer.get_stream()
records = list(ArchiveIterator(stream, arc2warc=False))
assert len(records) == 2
validate_warcinfo(records[0])
validate_response(records[1])
validate_warcinfo(records[0])
def test_utf8_rewrite_content_adjust(self):
UTF8_PAYLOAD = u'\
HTTP/1.0 200 OK\r\n\
Content-Type: text/plain; charset="UTF-8"\r\n\
Content-Disposition: attachment; filename="испытание.txt"\r\n\
Custom-Header: somevalue\r\n\
Unicode-Header: %F0%9F%93%81%20text%20%F0%9F%97%84%EF%B8%8F\r\n\
\r\n\
some\n\
text'
content_length = len(UTF8_PAYLOAD.encode('utf-8'))
UTF8_RECORD = u'\
WARC/1.0\r\n\
WARC-Type: response\r\n\
WARC-Record-ID: <urn:uuid:12345678-feb0-11e6-8f83-68a86d1772ce>\r\n\
WARC-Target-URI: http://example.com/\r\n\
WARC-Date: 2000-01-01T00:00:00Z\r\n\
WARC-Payload-Digest: sha1:B6QJ6BNJ3R4B23XXMRKZKHLPGJY2VE4O\r\n\
WARC-Block-Digest: sha1:KMUABC6URWIQ7QXCZDQ5FS6WIBBFRORR\r\n\
Content-Type: application/http; msgtype=response\r\n\
Content-Length: {0}\r\n\
\r\n\
{1}\r\n\
\r\n\
'.format(content_length, UTF8_PAYLOAD)
assert(content_length == 226)
record = ArcWarcRecordLoader().parse_record_stream(BytesIO(UTF8_RECORD.encode('utf-8')))
writer = BufferWARCWriter(gzip=False)
writer.write_record(record)
raw_buff = writer.get_contents()
assert raw_buff.decode('utf-8') == RESPONSE_RECORD_UNICODE_HEADERS
for record in ArchiveIterator(writer.get_stream()):
assert record.length == 268
def test_identity(self):
""" read(write(record)) should yield record """
payload = b'foobar'
writer = BufferWARCWriter(gzip=True)
httpHeaders = StatusAndHeaders('GET / HTTP/1.1', {}, is_http_request=True)
warcHeaders = {'Foo': 'Bar'}
record = writer.create_warc_record('http://example.com/', 'request',
payload=BytesIO(payload),
warc_headers_dict=warcHeaders, http_headers=httpHeaders)
writer.write_record(record)
for new_rec in ArchiveIterator(writer.get_stream()):
assert new_rec.rec_type == record.rec_type
assert new_rec.rec_headers == record.rec_headers
assert new_rec.content_type == record.content_type
assert new_rec.length == record.length
assert new_rec.http_headers == record.http_headers
assert new_rec.raw_stream.read() == payload
| 29,716
| 34.085006
| 119
|
py
|
warcio
|
warcio-master/test/test_utils.py
|
import sys
import pytest
from collections import Counter
from io import BytesIO
import os
import tempfile
import warcio.utils as utils
from . import get_test_file
try:
from multidict import CIMultiDict, MultiDict
except ImportError:
pass
class TestUtils(object):
def test_headers_to_str_headers(self):
result = [('foo', 'bar'), ('baz', 'barf')]
header_dict = {'foo': b'bar', b'baz': 'barf'}
ret = utils.headers_to_str_headers(header_dict)
assert Counter(ret) == Counter(result)
aiohttp_raw_headers = ((b'foo', b'bar'), (b'baz', b'barf'))
assert Counter(utils.headers_to_str_headers(aiohttp_raw_headers)) == Counter(result)
@pytest.mark.skipif('multidict' not in sys.modules, reason='requires multidict be installed')
def test_multidict_headers_to_str_headers(self):
result = [('foo', 'bar'), ('baz', 'barf')]
aiohttp_headers = MultiDict(foo='bar', baz=b'barf')
ret = utils.headers_to_str_headers(aiohttp_headers)
assert Counter(ret) == Counter(result)
# This case-insensitive thingie titlecases the key
aiohttp_headers = CIMultiDict(Foo='bar', Baz=b'barf')
titlecase_result = [('Foo', 'bar'), ('Baz', 'barf')]
ret = utils.headers_to_str_headers(aiohttp_headers)
assert Counter(ret) == Counter(titlecase_result)
def test_open_or_default(self):
default_fh = BytesIO(b'NOTWARC/1.0\r\n')
with utils.open_or_default(get_test_file('example.warc'), 'rb', default_fh) as fh:
assert fh.readline().decode('utf-8') == 'WARC/1.0\r\n'
with utils.open_or_default(None, 'rb', default_fh) as fh:
assert fh.readline().decode('utf-8') == 'NOTWARC/1.0\r\n'
default_fh.seek(0)
with utils.open_or_default(b'-', 'rb', default_fh) as fh:
assert fh.readline().decode('utf-8') == 'NOTWARC/1.0\r\n'
default_fh.seek(0)
with utils.open_or_default(u'-', 'rb', default_fh) as fh:
assert fh.readline().decode('utf-8') == 'NOTWARC/1.0\r\n'
default_fh.seek(0)
with utils.open_or_default(default_fh, 'rb', None) as fh:
assert fh.readline().decode('utf-8') == 'NOTWARC/1.0\r\n'
def test_to_native_str(self):
# binary string
assert utils.to_native_str(b'10') == '10'
# unicode string
assert utils.to_native_str(u'10') == '10'
# default string
assert utils.to_native_str('10') == '10'
# not string, leave as is
assert utils.to_native_str(10) == 10
def test_open_exclusive(self):
temp_dir = tempfile.mkdtemp('warctest')
full_name = os.path.join(temp_dir, 'foo.txt')
with utils.open(full_name, 'xb') as fh:
fh.write(b'test\r\nfoo')
with pytest.raises(OSError):
with utils.open(full_name, 'xb') as fh:
fh.write(b'test\r\nfoo')
with utils.open(full_name, 'rb') as fh:
assert fh.read() == b'test\r\nfoo'
os.remove(full_name)
os.rmdir(temp_dir)
| 3,084
| 31.819149
| 97
|
py
|
lusol
|
lusol-master/gen/interface.py
|
#!/usr/bin/env python3
import io
def parse_org_table(table_lines):
# remove separator row
table_lines.pop(1)
table_list = [[b.strip() for b in a[1:-2].split('|')] for a in table_lines]
# get column list
column_list = table_list.pop(0)
#print(column_names)
# organize table data
table_data = []
for param in table_list:
param_dict = {}
for column, value in zip(column_list,param):
param_dict[column] = value
table_data.append(param_dict)
#print(table_data)
return table_data
def read_org_file(file_name):
# read lines
file = open(file_name,'r')
file_lines = file.readlines()
file.close()
# get function name
function_name = file_lines[0].strip()
#print(function_name)
# parse remaining lines as table
table_data = parse_org_table(file_lines[1:])
return function_name, table_data
def load_interface_files(file_name):
# read lines
file = open(file_name,'r')
file_lines = file.readlines()
file.close()
interface_list = parse_org_table(file_lines)
interface_data = []
for interface_function in interface_list:
function_name, argument_data = read_org_file(interface_function['interface_file'])
d = {}
d['function_name'] = function_name
d['argument_data'] = argument_data
d['format'] = interface_function['format']
interface_data.append(d)
#print(interface_data)
return interface_data
def function_declaration(function_dict,prefix='',suffix=''):
f = io.StringIO()
f.write('void ')
f.write(prefix + function_dict['function_name'] + suffix)
f.write('(\n')
for arg in function_dict['argument_data']:
f.write(' {0}* {1},\n'.format(arg['c_type'],arg['var_name']))
func_dec = f.getvalue()[:-2] + ')'
#print(func_dec)
return func_dec
def function_call(function_dict,prefix='',suffix=''):
f = io.StringIO()
f.write(' ' + prefix + function_dict['function_name'] + suffix)
f.write('(')
for arg in function_dict['argument_data']:
f.write('{},'.format(arg['var_name']))
func_dec = f.getvalue()[:-1] + ')'
#print(func_dec)
return func_dec
def get_header(file_name):
interface_data = load_interface_files(file_name)
# start the header buffer
f = io.StringIO()
# start the header file
f.write('#ifndef CLUSOL_H_\n')
f.write('#define CLUSOL_H_\n')
f.write('\n')
# include directives
f.write('#include <stdint.h>')
f.write('\n\n')
# function declarations
for interface_func in interface_data:
f.write(function_declaration(interface_func,prefix='c'))
f.write(';\n\n')
# end the headerfile
f.write('#endif // CLUSOL_H_\n')
# clean up and return
header_str = f.getvalue()
f.close();
return header_str
def get_source(file_name):
interface_data = load_interface_files(file_name)
# start the source buffer
f = io.StringIO()
# include directives
f.write('#include "clusol.h"\n')
f.write('\n')
# fortran function declarations
f.write('// declarations for fortran function calls\n')
for interface_func in interface_data:
if interface_func['format'] == 'f90':
f.write(function_declaration(interface_func,prefix='__lusol_MOD_'))
if interface_func['format'] == 'f77':
f.write(function_declaration(interface_func,suffix='_'))
f.write(';\n\n')
# function calls in c
f.write('// c interface function definitions\n')
for interface_func in interface_data:
f.write(function_declaration(interface_func,prefix='c'))
f.write(' {\n')
if interface_func['format'] == 'f90':
f.write(function_call(interface_func,prefix='__lusol_MOD_'))
if interface_func['format'] == 'f77':
f.write(function_call(interface_func,suffix='_'))
f.write(';\n')
f.write('}\n\n')
# clean up and return
source_str = f.getvalue()
f.close();
return source_str
# for testing
if __name__ == '__main__':
# parse arguments
import argparse
parser = argparse.ArgumentParser(
description='Generate C interface to LUSOL.')
parser.add_argument('-i','--input',
help='input file name',
required=True)
parser.add_argument('-o','--output',
help='output file name',
required=True)
parser.add_argument('-t','--type',
help='output file type',
required=True,
choices=['header','source'])
args = parser.parse_args()
# generate code
if args.type == 'header':
file_str = get_header(args.input)
elif args.type == 'source':
file_str = get_source(args.input)
else:
raise Exception('uknown type')
# write code
f = open(args.output,'w')
f.write(file_str)
f.close()
| 4,987
| 31.601307
| 90
|
py
|
PT-MAP
|
PT-MAP-master/test_standard.py
|
import collections
import pickle
import random
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import math
import torch.nn.functional as F
import torch.optim as optim
from numpy import linalg as LA
from tqdm.notebook import tqdm
use_gpu = torch.cuda.is_available()
# ========================================
# loading datas
def centerDatas(datas):
datas[:, :n_lsamples] = datas[:, :n_lsamples, :] - datas[:, :n_lsamples].mean(1, keepdim=True)
datas[:, :n_lsamples] = datas[:, :n_lsamples, :] / torch.norm(datas[:, :n_lsamples, :], 2, 2)[:, :, None]
datas[:, n_lsamples:] = datas[:, n_lsamples:, :] - datas[:, n_lsamples:].mean(1, keepdim=True)
datas[:, n_lsamples:] = datas[:, n_lsamples:, :] / torch.norm(datas[:, n_lsamples:, :], 2, 2)[:, :, None]
return datas
def scaleEachUnitaryDatas(datas):
norms = datas.norm(dim=2, keepdim=True)
return datas/norms
def QRreduction(datas):
ndatas = torch.qr(datas.permute(0,2,1)).R
ndatas = ndatas.permute(0,2,1)
return ndatas
class Model:
def __init__(self, n_ways):
self.n_ways = n_ways
# --------- GaussianModel
class GaussianModel(Model):
def __init__(self, n_ways, lam):
super(GaussianModel, self).__init__(n_ways)
self.mus = None # shape [n_runs][n_ways][n_nfeat]
self.lam = lam
def clone(self):
other = GaussianModel(self.n_ways)
other.mus = self.mus.clone()
return self
def cuda(self):
self.mus = self.mus.cuda()
def initFromLabelledDatas(self):
self.mus = ndatas.reshape(n_runs, n_shot+n_queries,n_ways, n_nfeat)[:,:n_shot,].mean(1)
def updateFromEstimate(self, estimate, alpha):
Dmus = estimate - self.mus
self.mus = self.mus + alpha * (Dmus)
def compute_optimal_transport(self, M, r, c, epsilon=1e-6):
r = r.cuda()
c = c.cuda()
n_runs, n, m = M.shape
P = torch.exp(- self.lam * M)
P /= P.view((n_runs, -1)).sum(1).unsqueeze(1).unsqueeze(1)
u = torch.zeros(n_runs, n).cuda()
maxiters = 1000
iters = 1
# normalize this matrix
while torch.max(torch.abs(u - P.sum(2))) > epsilon:
u = P.sum(2)
P *= (r / u).view((n_runs, -1, 1))
P *= (c / P.sum(1)).view((n_runs, 1, -1))
if iters == maxiters:
break
iters = iters + 1
return P, torch.sum(P * M)
def getProbas(self):
# compute squared dist to centroids [n_runs][n_samples][n_ways]
dist = (ndatas.unsqueeze(2)-self.mus.unsqueeze(1)).norm(dim=3).pow(2)
p_xj = torch.zeros_like(dist)
r = torch.ones(n_runs, n_usamples)
c = torch.ones(n_runs, n_ways) * n_queries
p_xj_test, _ = self.compute_optimal_transport(dist[:, n_lsamples:], r, c, epsilon=1e-6)
p_xj[:, n_lsamples:] = p_xj_test
p_xj[:,:n_lsamples].fill_(0)
p_xj[:,:n_lsamples].scatter_(2,labels[:,:n_lsamples].unsqueeze(2), 1)
return p_xj
def estimateFromMask(self, mask):
emus = mask.permute(0,2,1).matmul(ndatas).div(mask.sum(dim=1).unsqueeze(2))
return emus
# =========================================
# MAP
# =========================================
class MAP:
def __init__(self, alpha=None):
self.verbose = False
self.progressBar = False
self.alpha = alpha
def getAccuracy(self, probas):
olabels = probas.argmax(dim=2)
matches = labels.eq(olabels).float()
acc_test = matches[:,n_lsamples:].mean(1)
m = acc_test.mean().item()
pm = acc_test.std().item() *1.96 / math.sqrt(n_runs)
return m, pm
def performEpoch(self, model, epochInfo=None):
p_xj = model.getProbas()
self.probas = p_xj
if self.verbose:
print("accuracy from filtered probas", self.getAccuracy(self.probas))
m_estimates = model.estimateFromMask(self.probas)
# update centroids
model.updateFromEstimate(m_estimates, self.alpha)
if self.verbose:
op_xj = model.getProbas()
acc = self.getAccuracy(op_xj)
print("output model accuracy", acc)
def loop(self, model, n_epochs=20):
self.probas = model.getProbas()
if self.verbose:
print("initialisation model accuracy", self.getAccuracy(self.probas))
if self.progressBar:
if type(self.progressBar) == bool:
pb = tqdm(total = n_epochs)
else:
pb = self.progressBar
for epoch in range(1, n_epochs+1):
if self.verbose:
print("----- epoch[{:3d}] lr_p: {:0.3f} lr_m: {:0.3f}".format(epoch, self.alpha))
self.performEpoch(model, epochInfo=(epoch, n_epochs))
if (self.progressBar): pb.update()
# get final accuracy and return it
op_xj = model.getProbas()
acc = self.getAccuracy(op_xj)
return acc
if __name__ == '__main__':
# ---- data loading
n_shot = 5
n_ways = 5
n_queries = 15
n_runs=10000
n_lsamples = n_ways * n_shot
n_usamples = n_ways * n_queries
n_samples = n_lsamples + n_usamples
import FSLTask
cfg = {'shot':n_shot, 'ways':n_ways, 'queries':n_queries}
FSLTask.loadDataSet("miniimagenet")
FSLTask.setRandomStates(cfg)
ndatas = FSLTask.GenerateRunSet(cfg=cfg)
ndatas = ndatas.permute(0,2,1,3).reshape(n_runs, n_samples, -1)
labels = torch.arange(n_ways).view(1,1,n_ways).expand(n_runs,n_shot+n_queries,5).clone().view(n_runs, n_samples)
# Power transform
beta = 0.5
ndatas[:,] = torch.pow(ndatas[:,]+1e-6, beta)
ndatas = QRreduction(ndatas)
n_nfeat = ndatas.size(2)
ndatas = scaleEachUnitaryDatas(ndatas)
# trans-mean-sub
ndatas = centerDatas(ndatas)
print("size of the datas...", ndatas.size())
# switch to cuda
ndatas = ndatas.cuda()
labels = labels.cuda()
#MAP
lam = 10
model = GaussianModel(n_ways, lam)
model.initFromLabelledDatas()
alpha = 0.2
optim = MAP(alpha)
optim.verbose=False
optim.progressBar=True
acc_test = optim.loop(model, n_epochs=20)
print("final accuracy found {:0.2f} +- {:0.2f}".format(*(100*x for x in acc_test)))
| 6,764
| 28.159483
| 122
|
py
|
PT-MAP
|
PT-MAP-master/wrn_mixup_model.py
|
### dropout has been removed in this code. original code had dropout#####
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import sys, os
import numpy as np
import random
act = torch.nn.ReLU()
import math
from torch.nn.utils.weight_norm import WeightNorm
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = nn.Linear( indim, outdim, bias = False)
self.class_wise_learnable_norm = True #See the issue#4&8 in the github
if self.class_wise_learnable_norm:
WeightNorm.apply(self.L, 'weight', dim=0) #split the weight update component to direction and norm
if outdim <=200:
self.scale_factor = 2; #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax
else:
self.scale_factor = 10; #in omniglot, a larger scale factor is required to handle >1000 output classes.
def forward(self, x):
x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm+ 0.00001)
if not self.class_wise_learnable_norm:
L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)
cos_dist = self.L(x_normalized) #matrix product by forward function, but when using WeightNorm, this also multiply the cosine distance by a class-wise learnable norm, see the issue#4&8 in the github
scores = self.scale_factor* (cos_dist)
return scores
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
def to_one_hot(inp,num_classes):
y_onehot = torch.FloatTensor(inp.size(0), num_classes)
if torch.cuda.is_available():
y_onehot = y_onehot.cuda()
y_onehot.zero_()
x = inp.type(torch.LongTensor)
if torch.cuda.is_available():
x = x.cuda()
x = torch.unsqueeze(x , 1)
y_onehot.scatter_(1, x , 1)
return Variable(y_onehot,requires_grad=False)
# return y_onehot
def mixup_data(x, y, lam):
'''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
batch_size = x.size()[0]
index = torch.randperm(batch_size)
if torch.cuda.is_available():
index = index.cuda()
mixed_x = lam * x + (1 - lam) * x[index,:]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
class WideResNet(nn.Module):
def __init__(self, depth=28, widen_factor=10, num_classes= 200 , loss_type = 'dist', per_img_std = False, stride = 1 ):
dropRate = 0.5
flatten = True
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, stride, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and linear
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.nChannels = nChannels[3]
if loss_type == 'softmax':
self.linear = nn.Linear(nChannels[3], int(num_classes))
self.linear.bias.data.fill_(0)
else:
self.linear = distLinear(nChannels[3], int(num_classes))
self.num_classes = num_classes
if flatten:
self.final_feat_dim = 640
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, target= None, mixup=False, mixup_hidden=True, mixup_alpha=None , lam = 0.4):
if target is not None:
if mixup_hidden:
layer_mix = random.randint(0,3)
elif mixup:
layer_mix = 0
else:
layer_mix = None
out = x
target_a = target_b = target
if layer_mix == 0:
out, target_a , target_b , lam = mixup_data(out, target, lam=lam)
out = self.conv1(out)
out = self.block1(out)
if layer_mix == 1:
out, target_a , target_b , lam = mixup_data(out, target, lam=lam)
out = self.block2(out)
if layer_mix == 2:
out, target_a , target_b , lam = mixup_data(out, target, lam=lam)
out = self.block3(out)
if layer_mix == 3:
out, target_a , target_b , lam = mixup_data(out, target, lam=lam)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[2:])
out = out.view(out.size(0), -1)
out1 = self.linear(out)
return out , out1 , target_a , target_b
else:
out = x
out = self.conv1(out)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[2:])
out = out.view(out.size(0), -1)
out1 = self.linear(out)
return out, out1
def wrn28_10(num_classes=200 , loss_type = 'dist'):
model = WideResNet(depth=28, widen_factor=10, num_classes=num_classes, loss_type = loss_type , per_img_std = False, stride = 1 )
return model
| 7,986
| 36.674528
| 206
|
py
|
PT-MAP
|
PT-MAP-master/res_mixup_model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from torch.nn.utils.weight_norm import WeightNorm
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def mixup_data(x, y, lam):
'''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
batch_size = x.size()[0]
index = torch.randperm(batch_size)
if torch.cuda.is_available():
index = index.cuda()
mixed_x = lam * x + (1 - lam) * x[index,:]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = nn.Linear( indim, outdim, bias = False)
self.class_wise_learnable_norm = True #See the issue#4&8 in the github
if self.class_wise_learnable_norm:
WeightNorm.apply(self.L, 'weight', dim=0) #split the weight update component to direction and norm
if outdim <=200:
self.scale_factor = 2; #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax
else:
self.scale_factor = 10; #in omniglot, a larger scale factor is required to handle >1000 output classes.
def forward(self, x):
x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm+ 0.00001)
if not self.class_wise_learnable_norm:
L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)
cos_dist = self.L(x_normalized) #matrix product by forward function, but when using WeightNorm, this also multiply the cosine distance by a class-wise learnable norm, see the issue#4&8 in the github
scores = self.scale_factor* (cos_dist)
return scores
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=200, zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = distLinear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, target=None, mixup=False, mixup_hidden = True, mixup_alpha=None, lam=0.4):
if target is not None:
if mixup_hidden:
layer_mix = random.randint(0,5)
elif mixup:
layer_mix = 0
else:
layer_mix = None
out = x
if layer_mix == 0:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
if layer_mix == 1:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.layer2(out)
if layer_mix == 2:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.layer3(out)
if layer_mix == 3:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.layer4(out)
if layer_mix == 4:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out1 = self.fc.forward(out)
if layer_mix == 5:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
return out, out1, target_a, target_b
else:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out1 = self.fc.forward(out)
return out, out1
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| 7,280
| 35.58794
| 206
|
py
|
PT-MAP
|
PT-MAP-master/save_plk.py
|
from __future__ import print_function
import argparse
import csv
import os
import collections
import pickle
import random
import numpy as np
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from io_utils import parse_args
from data.datamgr import SimpleDataManager , SetDataManager
import configs
import wrn_mixup_model
import res_mixup_model
import torch.nn.functional as F
from io_utils import parse_args, get_resume_file ,get_assigned_file
from os import path
use_gpu = torch.cuda.is_available()
class WrappedModel(nn.Module):
def __init__(self, module):
super(WrappedModel, self).__init__()
self.module = module
def forward(self, x):
return self.module(x)
def save_pickle(file, data):
with open(file, 'wb') as f:
pickle.dump(data, f)
def load_pickle(file):
with open(file, 'rb') as f:
return pickle.load(f)
def extract_feature(val_loader, model, checkpoint_dir, tag='last'):
save_dir = '{}/{}'.format(checkpoint_dir, tag)
if os.path.isfile(save_dir + '/output.plk'):
data = load_pickle(save_dir + '/output.plk')
return data
else:
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
#model.eval()
with torch.no_grad():
output_dict = collections.defaultdict(list)
for i, (inputs, labels) in enumerate(val_loader):
# compute output
inputs = inputs.cuda()
labels = labels.cuda()
outputs,_ = model(inputs)
outputs = outputs.cpu().data.numpy()
for out, label in zip(outputs, labels):
output_dict[label.item()].append(out)
all_info = output_dict
save_pickle(save_dir + '/output.plk', all_info)
return all_info
if __name__ == '__main__':
params = parse_args('test')
loadfile = configs.data_dir[params.dataset] + 'novel.json'
if params.dataset == 'miniImagenet' or params.dataset == 'CUB':
datamgr = SimpleDataManager(84, batch_size = 256)
novel_loader = datamgr.get_data_loader(loadfile, aug = False)
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
modelfile = get_resume_file(checkpoint_dir)
if params.model == 'WideResNet28_10':
model = wrn_mixup_model.wrn28_10(num_classes=params.num_classes)
elif params.model == 'ResNet18':
model = res_mixup_model.resnet18(num_classes=params.num_classes)
model = model.cuda()
cudnn.benchmark = True
checkpoint = torch.load(modelfile)
state = checkpoint['state']
state_keys = list(state.keys())
callwrap = False
if 'module' in state_keys[0]:
callwrap = True
if callwrap:
model = WrappedModel(model)
model_dict_load = model.state_dict()
model_dict_load.update(state)
model.load_state_dict(model_dict_load)
model.eval()
output_dict=extract_feature(novel_loader, model, checkpoint_dir, tag='last')
print("features saved!")
| 3,145
| 27.342342
| 108
|
py
|
PT-MAP
|
PT-MAP-master/FSLTask.py
|
import os
import pickle
import numpy as np
import torch
# from tqdm import tqdm
# ========================================================
# Usefull paths
_datasetFeaturesFiles = {"miniimagenet": "./checkpoints/miniImagenet/WideResNet28_10_S2M2_R/last/output.plk",
"cub": "./checkpoints/CUB/WideResNet28_10_S2M2_R/last/output.plk",
"cifar": "./checkpoints/cifar/WideResNet28_10_S2M2_R/last/output.plk",
"cross": "./checkpoints/cross/WideResNet28_10_S2M2_R/last/output.plk"}
_cacheDir = "./cache"
_maxRuns = 10000
_min_examples = -1
# ========================================================
# Module internal functions and variables
_randStates = None
_rsCfg = None
def _load_pickle(file):
with open(file, 'rb') as f:
data = pickle.load(f)
labels = [np.full(shape=len(data[key]), fill_value=key)
for key in data]
data = [features for key in data for features in data[key]]
dataset = dict()
dataset['data'] = torch.FloatTensor(np.stack(data, axis=0))
dataset['labels'] = torch.LongTensor(np.concatenate(labels))
return dataset
# =========================================================
# Callable variables and functions from outside the module
data = None
labels = None
dsName = None
def loadDataSet(dsname):
if dsname not in _datasetFeaturesFiles:
raise NameError('Unknwown dataset: {}'.format(dsname))
global dsName, data, labels, _randStates, _rsCfg, _min_examples
dsName = dsname
_randStates = None
_rsCfg = None
# Loading data from files on computer
# home = expanduser("~")
dataset = _load_pickle(_datasetFeaturesFiles[dsname])
# Computing the number of items per class in the dataset
_min_examples = dataset["labels"].shape[0]
for i in range(dataset["labels"].shape[0]):
if torch.where(dataset["labels"] == dataset["labels"][i])[0].shape[0] > 0:
_min_examples = min(_min_examples, torch.where(
dataset["labels"] == dataset["labels"][i])[0].shape[0])
print("Guaranteed number of items per class: {:d}\n".format(_min_examples))
# Generating data tensors
data = torch.zeros((0, _min_examples, dataset["data"].shape[1]))
labels = dataset["labels"].clone()
while labels.shape[0] > 0:
indices = torch.where(dataset["labels"] == labels[0])[0]
data = torch.cat([data, dataset["data"][indices, :]
[:_min_examples].view(1, _min_examples, -1)], dim=0)
indices = torch.where(labels != labels[0])[0]
labels = labels[indices]
print("Total of {:d} classes, {:d} elements each, with dimension {:d}\n".format(
data.shape[0], data.shape[1], data.shape[2]))
def GenerateRun(iRun, cfg, regenRState=False, generate=True):
global _randStates, data, _min_examples
if not regenRState:
np.random.set_state(_randStates[iRun])
classes = np.random.permutation(np.arange(data.shape[0]))[:cfg["ways"]]
shuffle_indices = np.arange(_min_examples)
dataset = None
if generate:
dataset = torch.zeros(
(cfg['ways'], cfg['shot']+cfg['queries'], data.shape[2]))
for i in range(cfg['ways']):
shuffle_indices = np.random.permutation(shuffle_indices)
if generate:
dataset[i] = data[classes[i], shuffle_indices,
:][:cfg['shot']+cfg['queries']]
return dataset
def ClassesInRun(iRun, cfg):
global _randStates, data
np.random.set_state(_randStates[iRun])
classes = np.random.permutation(np.arange(data.shape[0]))[:cfg["ways"]]
return classes
def setRandomStates(cfg):
global _randStates, _maxRuns, _rsCfg
if _rsCfg == cfg:
return
rsFile = os.path.join(_cacheDir, "RandStates_{}_s{}_q{}_w{}".format(
dsName, cfg['shot'], cfg['queries'], cfg['ways']))
if not os.path.exists(rsFile):
print("{} does not exist, regenerating it...".format(rsFile))
np.random.seed(0)
_randStates = []
for iRun in range(_maxRuns):
_randStates.append(np.random.get_state())
GenerateRun(iRun, cfg, regenRState=True, generate=False)
torch.save(_randStates, rsFile)
else:
print("reloading random states from file....")
_randStates = torch.load(rsFile)
_rsCfg = cfg
def GenerateRunSet(start=None, end=None, cfg=None):
global dataset, _maxRuns
if start is None:
start = 0
if end is None:
end = _maxRuns
if cfg is None:
cfg = {"shot": 1, "ways": 5, "queries": 15}
setRandomStates(cfg)
print("generating task from {} to {}".format(start, end))
dataset = torch.zeros(
(end-start, cfg['ways'], cfg['shot']+cfg['queries'], data.shape[2]))
for iRun in range(end-start):
dataset[iRun] = GenerateRun(start+iRun, cfg)
return dataset
# define a main code to test this module
if __name__ == "__main__":
print("Testing Task loader for Few Shot Learning")
loadDataSet('miniimagenet')
cfg = {"shot": 1, "ways": 5, "queries": 15}
setRandomStates(cfg)
run10 = GenerateRun(10, cfg)
print("First call:", run10[:2, :2, :2])
run10 = GenerateRun(10, cfg)
print("Second call:", run10[:2, :2, :2])
ds = GenerateRunSet(start=2, end=12, cfg=cfg)
print("Third call:", ds[8, :2, :2, :2])
print(ds.size())
| 5,459
| 32.090909
| 109
|
py
|
PT-MAP
|
PT-MAP-master/train_cifar.py
|
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from __future__ import print_function
import argparse
import csv
import os
import numpy as np
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from data.datamgr import SimpleDataManager
import configs
import wrn_mixup_model
import res_mixup_model
from io_utils import parse_args, get_resume_file ,get_assigned_file
use_gpu = torch.cuda.is_available()
def train_manifold_mixup(base_loader, base_loader_test, model, start_epoch, stop_epoch, params):
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
print("stop_epoch", start_epoch, stop_epoch)
for epoch in range(start_epoch, stop_epoch):
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0
reg_loss = 0
correct = 0
correct1 = 0.0
total = 0
for batch_idx, (input_var, target_var) in enumerate(base_loader):
if use_gpu:
input_var, target_var = input_var.cuda(), target_var.cuda()
input_var, target_var = Variable(input_var), Variable(target_var)
lam = np.random.beta(params.alpha, params.alpha)
_ , outputs , target_a , target_b = model(input_var, target_var, mixup_hidden= True, mixup_alpha = params.alpha , lam = lam)
loss = mixup_criterion(criterion, outputs, target_a, target_b, lam)
train_loss += loss.data.item()
_, predicted = torch.max(outputs.data, 1)
total += target_var.size(0)
correct += (lam * predicted.eq(target_a.data).cpu().sum().float()
+ (1 - lam) * predicted.eq(target_b.data).cpu().sum().float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx%50 ==0 :
print('{0}/{1}'.format(batch_idx,len(base_loader)), 'Loss: %.3f | Acc: %.3f%% '
% (train_loss/(batch_idx+1),100.*correct/total))
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict() }, outfile)
model.eval()
with torch.no_grad():
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(base_loader_test):
if use_gpu:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
f , outputs = model.forward(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('Loss: %.3f | Acc: %.3f%%'
% (test_loss/(batch_idx+1), 100.*correct/total ))
torch.cuda.empty_cache()
return model
def train_rotation(base_loader, base_loader_test, model, start_epoch, stop_epoch, params , tmp):
if params.model == 'WideResNet28_10':
rotate_classifier = nn.Sequential(nn.Linear(640,4))
elif params.model == 'ResNet18':
rotate_classifier = nn.Sequential(nn.Linear(512,4))
if use_gpu:
rotate_classifier.cuda()
if tmp is not None and 'rotate' in tmp:
print("loading rotate model")
rotate_classifier.load_state_dict(tmp['rotate'])
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': rotate_classifier.parameters()}
])
lossfn = nn.CrossEntropyLoss()
max_acc = 0
print("stop_epoch" , start_epoch, stop_epoch )
for epoch in range(start_epoch,stop_epoch):
rotate_classifier.train()
model.train()
avg_loss=0
avg_rloss=0
for i, (x,y) in enumerate(base_loader):
bs = x.size(0)
x_ = []
y_ = []
a_ = []
for j in range(bs):
x90 = x[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
x_ += [x[j], x90, x180, x270]
y_ += [y[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
x_ = Variable(torch.stack(x_,0))
y_ = Variable(torch.stack(y_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
x_ = x_.cuda()
y_ = y_.cuda()
a_ = a_.cuda()
f,scores = model.forward(x_)
rotate_scores = rotate_classifier(f)
optimizer.zero_grad()
rloss = lossfn(rotate_scores,a_)
closs = lossfn(scores, y_)
loss = 0.5*closs + 0.5*rloss
loss.backward()
optimizer.step()
avg_loss = avg_loss+closs.data.item()
avg_rloss = avg_rloss+rloss.data.item()
if i % 50 ==0:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Rotate Loss {:f}'.format(epoch, i, len(base_loader), avg_loss/float(i+1),avg_rloss/float(i+1) ))
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict() , 'rotate': rotate_classifier.state_dict()}, outfile)
model.eval()
rotate_classifier.eval()
with torch.no_grad():
correct = rcorrect = total = 0
for i,(x,y) in enumerate(base_loader_test):
if i<10:
bs = x.size(0)
x_ = []
y_ = []
a_ = []
for j in range(bs):
x90 = x[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
x_ += [x[j], x90, x180, x270]
y_ += [y[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
x_ = Variable(torch.stack(x_,0))
y_ = Variable(torch.stack(y_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
x_ = x_.cuda()
y_ = y_.cuda()
a_ = a_.cuda()
f,scores = model(x_)
rotate_scores = rotate_classifier(f)
p1 = torch.argmax(scores,1)
correct += (p1==y_).sum().item()
total += p1.size(0)
p2 = torch.argmax(rotate_scores,1)
rcorrect += (p2==a_).sum().item()
print("Epoch {0} : Accuracy {1}, Rotate Accuracy {2}".format(epoch,(float(correct)*100)/total,(float(rcorrect)*100)/total))
torch.cuda.empty_cache()
return model
if __name__ == '__main__':
params = parse_args('train')
params.dataset = 'cifar'
image_size = 32
base_file = configs.data_dir[params.dataset] + 'base.json'
params.checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
start_epoch = params.start_epoch
stop_epoch = params.stop_epoch
base_datamgr = SimpleDataManager(image_size, batch_size = params.batch_size)
base_loader = base_datamgr.get_data_loader( base_file , aug = params.train_aug )
base_datamgr_test = SimpleDataManager(image_size, batch_size = params.test_batch_size)
base_loader_test = base_datamgr_test.get_data_loader( base_file , aug = False )
if params.model == 'WideResNet28_10':
model = wrn_mixup_model.wrn28_10(num_classes=64)
elif params.model == 'ResNet18':
model = res_mixup_model.resnet18(num_classes=64)
if params.method =='S2M2_R':
if use_gpu:
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))
model.cuda()
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
model.load_state_dict(state)
else:
resume_rotate_file_dir = params.checkpoint_dir.replace("S2M2_R","rotation")
resume_file = get_resume_file( resume_rotate_file_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
state_keys = list(state.keys())
'''
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state[key.replace("classifier.","linear.")] = state[key]
state.pop(key)
'''
model.load_state_dict(state)
model = train_manifold_mixup(base_loader, base_loader_test, model, start_epoch, start_epoch+stop_epoch, params)
elif params.method =='rotation':
if use_gpu:
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))
model.cuda()
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
model.load_state_dict(state)
model = train_rotation(base_loader, base_loader_test, model, start_epoch, stop_epoch, params, None)
| 11,421
| 35.375796
| 199
|
py
|
PT-MAP
|
PT-MAP-master/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from __future__ import print_function
import argparse
import csv
import os
import numpy as np
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from data.datamgr import SimpleDataManager , SetDataManager
import configs
import wrn_mixup_model
import res_mixup_model
from io_utils import parse_args, get_resume_file ,get_assigned_file
from os import path
use_gpu = torch.cuda.is_available()
image_size = 84
def train_s2m2(base_loader, base_loader_test, model, start_epoch, stop_epoch, params, tmp):
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
criterion = nn.CrossEntropyLoss()
if params.model == 'WideResNet28_10':
rotate_classifier = nn.Sequential(nn.Linear(640,4))
elif params.model == 'ResNet18':
rotate_classifier = nn.Sequential(nn.Linear(512,4))
rotate_classifier.cuda()
if 'rotate' in tmp:
print("loading rotate model")
rotate_classifier.load_state_dict(tmp['rotate'])
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': rotate_classifier.parameters()}
])
print("stop_epoch", start_epoch, stop_epoch)
for epoch in range(start_epoch, stop_epoch):
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0
rotate_loss = 0
correct = 0
total = 0
torch.cuda.empty_cache()
for batch_idx, (inputs, targets) in enumerate(base_loader):
if use_gpu:
inputs, targets = inputs.cuda(), targets.cuda()
lam = np.random.beta(params.alpha, params.alpha)
f , outputs , target_a , target_b = model(inputs, targets, mixup_hidden= True , mixup_alpha = params.alpha , lam = lam)
loss = mixup_criterion(criterion, outputs, target_a, target_b, lam)
train_loss += loss.data.item()
optimizer.zero_grad()
loss.backward()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (lam * predicted.eq(target_a.data).cpu().sum().float()
+ (1 - lam) * predicted.eq(target_b.data).cpu().sum().float())
bs = inputs.size(0)
inputs_ = []
targets_ = []
a_ = []
indices = np.arange(bs)
np.random.shuffle(indices)
split_size = int(bs/4)
for j in indices[0:split_size]:
x90 = inputs[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
inputs_ += [inputs[j], x90, x180, x270]
targets_ += [targets[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
inputs = Variable(torch.stack(inputs_,0))
targets = Variable(torch.stack(targets_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
inputs = inputs.cuda()
targets = targets.cuda()
a_ = a_.cuda()
rf , outputs = model(inputs)
rotate_outputs = rotate_classifier(rf)
rloss = criterion(rotate_outputs,a_)
closs = criterion(outputs, targets)
loss = (rloss+closs)/2.0
rotate_loss += rloss.data.item()
loss.backward()
optimizer.step()
if batch_idx%50 ==0 :
print('{0}/{1}'.format(batch_idx,len(base_loader)),
'Loss: %.3f | Acc: %.3f%% | RotLoss: %.3f '
% (train_loss/(batch_idx+1),
100.*correct/total,rotate_loss/(batch_idx+1)))
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict() }, outfile)
model.eval()
with torch.no_grad():
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(base_loader_test):
if use_gpu:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
f , outputs = model.forward(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('Loss: %.3f | Acc: %.3f%%'
% (test_loss/(batch_idx+1), 100.*correct/total ))
return model
def train_rotation(base_loader, base_loader_test, model, start_epoch, stop_epoch, params, tmp):
if params.model == 'WideResNet28_10':
rotate_classifier = nn.Sequential(nn.Linear(640,4))
elif params.model == 'ResNet18':
rotate_classifier = nn.Sequential(nn.Linear(512,4))
if use_gpu:
rotate_classifier.cuda()
if 'rotate' in tmp:
print("loading rotate model")
rotate_classifier.load_state_dict(tmp['rotate'])
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': rotate_classifier.parameters()}
])
lossfn = nn.CrossEntropyLoss()
max_acc = 0
print("stop_epoch" , start_epoch, stop_epoch )
for epoch in range(start_epoch,stop_epoch):
rotate_classifier.train()
model.train()
avg_loss=0
avg_rloss=0
for i, (x,y) in enumerate(base_loader):
bs = x.size(0)
x_ = []
y_ = []
a_ = []
for j in range(bs):
x90 = x[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
x_ += [x[j], x90, x180, x270]
y_ += [y[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
x_ = Variable(torch.stack(x_,0))
y_ = Variable(torch.stack(y_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
x_ = x_.cuda()
y_ = y_.cuda()
a_ = a_.cuda()
f,scores = model.forward(x_)
rotate_scores = rotate_classifier(f)
optimizer.zero_grad()
rloss = lossfn(rotate_scores,a_)
closs = lossfn(scores, y_)
loss = closs + rloss
loss.backward()
optimizer.step()
avg_loss = avg_loss+closs.data.item()
avg_rloss = avg_rloss+rloss.data.item()
if i % 50 ==0:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Rotate Loss {:f}'.format(epoch, i, len(base_loader), avg_loss/float(i+1),avg_rloss/float(i+1) ))
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict() , 'rotate': rotate_classifier.state_dict()}, outfile)
model.eval()
rotate_classifier.eval()
with torch.no_grad():
correct = rcorrect = total = 0
for i,(x,y) in enumerate(base_loader_test):
if i<2:
bs = x.size(0)
x_ = []
y_ = []
a_ = []
for j in range(bs):
x90 = x[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
x_ += [x[j], x90, x180, x270]
y_ += [y[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
x_ = Variable(torch.stack(x_,0))
y_ = Variable(torch.stack(y_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
x_ = x_.cuda()
y_ = y_.cuda()
a_ = a_.cuda()
f,scores = model(x_)
rotate_scores = rotate_classifier(f)
p1 = torch.argmax(scores,1)
correct += (p1==y_).sum().item()
total += p1.size(0)
p2 = torch.argmax(rotate_scores,1)
rcorrect += (p2==a_).sum().item()
print("Epoch {0} : Accuracy {1}, Rotate Accuracy {2}".format(epoch,(float(correct)*100)/total,(float(rcorrect)*100)/total))
torch.cuda.empty_cache()
return model
if __name__ == '__main__':
params = parse_args('train')
base_file = configs.data_dir[params.dataset] + 'base.json'
params.checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
start_epoch = params.start_epoch
stop_epoch = params.stop_epoch
base_datamgr = SimpleDataManager(image_size, batch_size = params.batch_size)
base_loader = base_datamgr.get_data_loader( base_file , aug = params.train_aug )
base_datamgr_test = SimpleDataManager(image_size, batch_size = params.test_batch_size)
base_loader_test = base_datamgr_test.get_data_loader( base_file , aug = False )
if params.model == 'WideResNet28_10':
model = wrn_mixup_model.wrn28_10(num_classes=params.num_classes)
elif params.model == 'ResNet18':
model = res_mixup_model.resnet18(num_classes=params.num_classes)
if params.method =='S2M2_R':
if use_gpu:
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))
model.cuda()
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
model.load_state_dict(state)
else:
resume_rotate_file_dir = params.checkpoint_dir.replace("S2M2_R","rotation")
resume_file = get_resume_file( resume_rotate_file_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
state_keys = list(state.keys())
'''
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state[key.replace("classifier.","linear.")] = state[key]
state.pop(key)
'''
model.load_state_dict(state)
model = train_s2m2(base_loader, base_loader_test, model, start_epoch, start_epoch+stop_epoch, params, {})
elif params.method =='rotation':
if use_gpu:
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))
model.cuda()
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
model.load_state_dict(state)
model = train_rotation(base_loader, base_loader_test, model, start_epoch, stop_epoch, params, {})
| 13,168
| 35.278237
| 199
|
py
|
PT-MAP
|
PT-MAP-master/configs.py
|
save_dir = '.'
data_dir = {}
data_dir['cifar'] = './filelists/cifar/'
data_dir['CUB'] = './filelists/CUB/'
data_dir['miniImagenet'] = './filelists/miniImagenet/'
| 211
| 34.333333
| 58
|
py
|
PT-MAP
|
PT-MAP-master/io_utils.py
|
import numpy as np
import os
import glob
import argparse
import numpy as np
import os
import glob
import argparse
def parse_args(script):
parser = argparse.ArgumentParser(description= 'few-shot script %s' %(script))
parser.add_argument('--dataset' , default='miniImagenet', help='CUB/miniImagenet')
parser.add_argument('--model' , default='WideResNet28_10', help='model: WideResNet28_10/ResNet{18}')
parser.add_argument('--method' , default='S2M2_R', help='rotation/S2M2_R')
parser.add_argument('--train_aug' , action='store_true', help='perform data augmentation or not during training ') #still required for save_features.py and test.py to find the model path correctly
if script == 'train':
parser.add_argument('--num_classes' , default=200, type=int, help='total number of classes') #make it larger than the maximum label value in base class
parser.add_argument('--save_freq' , default=10, type=int, help='Save frequency')
parser.add_argument('--start_epoch' , default=0, type=int,help ='Starting epoch')
parser.add_argument('--stop_epoch' , default=400, type=int, help ='Stopping epoch') #for meta-learning methods, each epoch contains 100 episodes. The default epoch number is dataset dependent. See train.py
parser.add_argument('--resume' , action='store_true', help='continue from previous trained model with largest epoch')
parser.add_argument('--lr' , default=0.001, type=int, help='learning rate')
parser.add_argument('--batch_size' , default=16, type=int, help='batch size ')
parser.add_argument('--test_batch_size' , default=2, type=int, help='batch size ')
parser.add_argument('--alpha' , default=2.0, type=int, help='for S2M2 training ')
elif script == 'test':
parser.add_argument('--num_classes' , default=200, type=int, help='total number of classes')
return parser.parse_args()
def get_assigned_file(checkpoint_dir,num):
assign_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(num))
return assign_file
def get_resume_file(checkpoint_dir):
filelist = glob.glob(os.path.join(checkpoint_dir, '*.tar'))
if len(filelist) == 0:
return None
filelist = [ x for x in filelist if os.path.basename(x) != 'best.tar' ]
epochs = np.array([int(os.path.splitext(os.path.basename(x))[0]) for x in filelist])
max_epoch = np.max(epochs)
resume_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(max_epoch))
return resume_file
def get_best_file(checkpoint_dir):
best_file = os.path.join(checkpoint_dir, 'best.tar')
if os.path.isfile(best_file):
return best_file
else:
return get_resume_file(checkpoint_dir)
| 2,785
| 46.220339
| 214
|
py
|
PT-MAP
|
PT-MAP-master/filelists/cifar/write_cifar_filelist.py
|
import glob
import json
import os
test = {'label_names': [] , 'image_names':[] , 'image_labels':[]}
pathname = os.getcwd()
#pathname = pathname.split('filelists')[0]
print(pathname)
f = open(pathname + '/cifar-FS/splits/bertinetto/test.txt')
classes = f.readlines()
count = 80
for each in classes:
each = each.strip()
test['label_names'].append(each)
files = glob.glob( pathname + '/cifar-FS/data/' + each + '/*')
for image_name in files:
test['image_names'].append( image_name)
test['image_labels'].append(count)
count +=1
json.dump(test , open('novel.json','w'))
base = {'label_names': [] , 'image_names':[] , 'image_labels':[]}
f = open(pathname + '/cifar-FS/splits/bertinetto/train.txt')
classes = f.readlines()
count = 0
for each in classes:
each = each.strip()
base['label_names'].append(each)
files = glob.glob( pathname + '/cifar-FS/data/' + each + '/*')
for image_name in files:
base['image_names'].append( image_name)
base['image_labels'].append(count)
count +=1
json.dump(base , open('base.json','w'))
val = {'label_names': [] , 'image_names':[] , 'image_labels':[]}
f = open(pathname + '/cifar-FS/splits/bertinetto/val.txt')
classes = f.readlines()
count = 0
for each in classes:
each = each.strip()
val['label_names'].append(each)
files = glob.glob( pathname + '/cifar-FS/data/' + each + '/*')
for image_name in files:
val['image_names'].append( image_name)
val['image_labels'].append(count)
count +=1
json.dump(val , open('val.json','w'))
| 1,504
| 22.515625
| 65
|
py
|
PT-MAP
|
PT-MAP-master/filelists/miniImagenet/write_miniImagenet_filelist.py
|
import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
import re
cwd = os.getcwd()
data_path = join(cwd,'ILSVRC2015/Data/CLS-LOC/train')
#data_path = join('/home/yuqing/phd/code/miniimagenet/images')
savedir = './'
dataset_list = ['base', 'val', 'novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
cl = -1
folderlist = []
datasetmap = {'base':'train','val':'val','novel':'test'};
filelists = {'base':{},'val':{},'novel':{} }
filelists_flat = {'base':[],'val':[],'novel':[] }
labellists_flat = {'base':[],'val':[],'novel':[] }
for dataset in dataset_list:
with open(datasetmap[dataset] + ".csv", "r") as lines:
for i, line in enumerate(lines):
if i == 0:
continue
fid, _ , label = re.split(',|\.', line)
label = label.replace('\n','')
if not label in filelists[dataset]:
folderlist.append(label)
filelists[dataset][label] = []
fnames = listdir( join(data_path, label) )
fname_number = [ int(re.split('_|\.', fname)[1]) for fname in fnames]
sorted_fnames = list(zip( *sorted( zip(fnames, fname_number), key = lambda f_tuple: f_tuple[1] )))[0]
fid = int(fid[-5:])-1
fname = join( data_path,label, sorted_fnames[fid] )
filelists[dataset][label].append(fname)
for key, filelist in filelists[dataset].items():
cl += 1
random.shuffle(filelist)
filelists_flat[dataset] += filelist
labellists_flat[dataset] += np.repeat(cl, len(filelist)).tolist()
for dataset in dataset_list:
fo = open(savedir + dataset + ".json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folderlist])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in filelists_flat[dataset]])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in labellists_flat[dataset]])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("%s -OK" %dataset)
| 2,362
| 31.819444
| 118
|
py
|
PT-MAP
|
PT-MAP-master/filelists/CUB/write_CUB_filelist.py
|
import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
cwd = os.getcwd()
data_path = join(cwd,'CUB_200_2011/images')
savedir = './'
dataset_list = ['base','val','novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
folder_list = [f for f in listdir(data_path) if isdir(join(data_path, f))]
folder_list.sort()
label_dict = dict(zip(folder_list,range(0,len(folder_list))))
classfile_list_all = []
for i, folder in enumerate(folder_list):
folder_path = join(data_path, folder)
classfile_list_all.append( [ join(folder_path, cf) for cf in listdir(folder_path) if (isfile(join(folder_path,cf)) and cf[0] != '.')])
random.shuffle(classfile_list_all[i])
for dataset in dataset_list:
file_list = []
label_list = []
for i, classfile_list in enumerate(classfile_list_all):
if 'base' in dataset:
if (i%2 == 0):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
if 'val' in dataset:
if (i%4 == 1):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
if 'novel' in dataset:
if (i%4 == 3):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
fo = open(savedir + dataset + ".json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folder_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in file_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in label_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("%s -OK" %dataset)
| 2,096
| 30.772727
| 138
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.