code stringlengths 1 1.49M | vector listlengths 0 7.38k | snippet listlengths 0 7.38k |
|---|---|---|
#
# Stochastic Optimization PS#1 Problem 6a
# Nurse Staffing
#
# Reference Format: Vehicle Routing Problem
#
# Imports
#
from coopr.pyomo import *
#from coopr.opt.base import solver
#
# Model
#
model = AbstractModel()
#
# Parameters
#
# Define sets
model.I = Set() # units
model.J = Set() # days
# Data_deterministic
model.s = Param(model.I) # # of patients can be served each shift for each inital assignment
model.r = Param(model.I) # # of patients can be served after each reassignment
model.Cost = Param() # cost per shift
model.Penalty = Param() # penalty cost per unserved patient
# Data_stochastic
model.d = Param(model.I, model.J)
#
# Variables
#
# integer
model.X = Var(model.I, model.J, within=PositiveIntegers) # # of assigned nuerses to ith unit on jth day
# continuous
model.U = Var(model.I, model.J, within=PositiveReals) # # of reassigned nuerses from ith unit on jth day
model.V = Var(model.I, model.J, within=PositiveReals) # # of reassigned nerses to ith unit on jth day
model.W = Var(model.I, model.J, within=PositiveReals) # # of unserved patients on ith unit on jth day
model.FirstStageCost = Var()
model.SecondStageCost = Var()
#
# Stage-specific cost computations
#
def first_stage_cost_rule(mod):
return (mod.FirstStageCost - mod.Cost * sum( mod.X[i,j] for i in mod.I for j in mod.J ) ) == 0.0
model.ComputeFirstStageCost = Constraint(rule=first_stage_cost_rule)
def second_stage_cost_rule(mod):
expcost = mod.Penalty * sum( mod.W[i,j] for i in mod.I for j in mod.J )
return (mod.SecondStageCost - expcost) == 0.0
model.ComputeSecondStageCost = Constraint(rule=second_stage_cost_rule)
#
# Constraints
#
def demand_rule(mod, i, j):
return mod.W[i,j] >= ( mod.d[i,j] - (mod.X[i,j] - mod.U[i,j]) * mod.s[i] - mod.V[i,j] * mod.r[i] )
model.DemandRule = Constraint(model.I, model.J, rule=demand_rule)
def reassign_dominate_rule(mod, i, j):
return mod.X[i,j] >= mod.U[i,j]
model.ReassignDominateRule = Constraint(model.I, model.J, rule=reassign_dominate_rule)
def reassign_balance_rule(mod, j):
return sum(mod.U[i,j] for i in mod.I) == sum(mod.V[i,j] for i in mod.I)
model.ReassignBalanceRule = Constraint(model.J, rule=reassign_balance_rule)
#
# Objective
#
def total_cost_rule(mod):
return (mod.FirstStageCost + mod.SecondStageCost)
model.TotalCost = Objective(rule=total_cost_rule, sense=minimize)
#model.create('ReferenceModel.py')
| [
[
1,
0,
0.125,
0.0114,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
14,
0,
0.1932,
0.0114,
0,
0.66,
0.0385,
722,
3,
0,
0,
0,
338,
10,
1
],
[
14,
0,
0.2727,
0.0114,
0,
... | [
"from coopr.pyomo import *",
"model = AbstractModel()",
"model.I = Set() # units",
"model.J = Set() # days",
"model.s = Param(model.I) # # of patients can be served each shift for each inital assignment",
"model.r = Param(model.I) # # of patients can be served after each reassignment",
"model.Cost = ... |
#
# Stochastic Optimization PS#1 Problem 6a
# Nurse Staffing
#
# Reference Format: Vehicle Routing Problem
#
# Imports
#
from coopr.pyomo import *
#from coopr.opt.base import solver
#
# Model
#
model = AbstractModel()
#
# Parameters
#
# Define sets
model.I = Set() # units
model.J = Set() # days
# Data_deterministic
model.s = Param(model.I) # # of patients can be served each shift for each inital assignment
model.r = Param(model.I) # # of patients can be served after each reassignment
model.Cost = Param() # cost per shift
model.Penalty = Param() # penalty cost per unserved patient
# Data_stochastic
model.d = Param(model.I, model.J)
#
# Variables
#
# integer
model.X = Var(model.I, model.J, within=PositiveIntegers) # # of assigned nuerses to ith unit on jth day
# continuous
model.U = Var(model.I, model.J, within=PositiveReals) # # of reassigned nuerses from ith unit on jth day
model.V = Var(model.I, model.J, within=PositiveReals) # # of reassigned nerses to ith unit on jth day
model.W = Var(model.I, model.J, within=PositiveReals) # # of unserved patients on ith unit on jth day
#model.FirstStageProfit = Var()
#model.SecondStateProfit = Var()
#
# Constraints
#
def demand_rule(mod, i, j):
return mod.W[i,j] >= ( mod.d[i,j] - (mod.X[i,j] - mod.U[i,j]) * mod.s[i] - mod.V[i,j] * mod.r[i] )
model.DemandRule = Constraint(model.I, model.J, rule=demand_rule)
def reassign_dominate_rule(mod, i, j):
return mod.X[i,j] >= mod.U[i,j]
model.ReassignDominateRule = Constraint(model.I, model.J, rule=reassign_dominate_rule)
def reassign_balance_rule(mod, j):
return sum(mod.U[i,j] for i in mod.I) == sum(mod.V[i,j] for i in mod.I)
model.ReassignBalanceRule = Constraint(model.J, rule=reassign_balance_rule)
#
# Objective
#
def total_cost_rule(mod):
return mod.Cost * sum( mod.X[i,j] for i in mod.I for j in mod.J ) + mod.Penalty * sum( mod.W[i,j] for i in mod.I for j in mod.J )
model.TotalCost = Objective(rule=total_cost_rule, sense=minimize)
| [
[
1,
0,
0.1486,
0.0135,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
14,
0,
0.2297,
0.0135,
0,
0.66,
0.05,
722,
3,
0,
0,
0,
338,
10,
1
],
[
14,
0,
0.3243,
0.0135,
0,
... | [
"from coopr.pyomo import *",
"model = AbstractModel()",
"model.I = Set() # units",
"model.J = Set() # days",
"model.s = Param(model.I) # # of patients can be served each shift for each inital assignment",
"model.r = Param(model.I) # # of patients can be served after each reassignment",
"model.Cost = ... |
import cplex
import random
import numpy as np
from pylab import *
from stoch_trnsport_gen import *
from cplex.exceptions import CplexError
NI = 20
NJ = 20
NW = 20
prob,demand = gen_stoch_trnsport(NI, NJ, NW)
prob.solve()
I = range(NI)
J = range(NJ)
O = range(NW)
bigm = 0.1
print prob.solution.get_objective_value(),
print prob.solution.progress.get_num_iterations()
bigm_vec = [0.005 * i for i in range(1,100)]
bigm_arr = np.array(bigm_vec)
vals_arr = np.zeros(len(bigm_arr))
for l in range(len(bigm_vec)):
for w in O:
for j in J:
bigm = bigm_vec[l]
row_name = 'scenario' + str(w) + '_' + 'customer' + str(j)
col_name = 'y' + '_' + str(w)
prob.linear_constraints.set_coefficients(row_name, col_name, -bigm * demand[w][j])
prob.linear_constraints.set_rhs(row_name, demand[w][j] * (1 - bigm) )
prob.solve()
vals_arr[l] = prob.solution.get_objective_value()
print prob.solution.get_objective_value(),
print prob.solution.progress.get_num_iterations(),
print bigm
plot(bigm_arr, vals_arr, '-x')
xlabel('Normalized big-M')
ylabel('Optimal cost')
grid(True)
title('|I|=|J|=|O|=20')
show()
| [
[
1,
0,
0.0208,
0.0208,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0417,
0.0208,
0,
0.66,
0.0385,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0625,
0.0208,
0,
... | [
"import cplex",
"import random",
"import numpy as np",
"from pylab import *",
"from stoch_trnsport_gen import *",
"from cplex.exceptions import CplexError",
"NI = 20",
"NJ = 20",
"NW = 20",
"prob,demand = gen_stoch_trnsport(NI, NJ, NW)",
"prob.solve()",
"I = range(NI)",
"J = range(NJ)",
"O... |
import cplex
import random
import numpy as np
import time
from stoch_trnsport_gen import *
from cplex.exceptions import CplexError
import cplex.callbacks as CPX_CB
class MySolve(CPX_CB.SolveCallback):
def __call__(self):
self.times_called += 1
print "hello"
self.solve()
# print "Lower bounds", prob_ref.variables.get_upper_bounds()
# print prob_ref.solution.get_status()
# print prob_ref.solution.basis.get_basis()
NI = 10
NJ = 10
NW = 10
I = range(NI)
J = range(NJ)
O = range(NW)
prob,demand = gen_stoch_trnsport(NI, NJ, NW, eps=0.2)
#prob.solve()
prob_ref = prob
solve_instance = prob.register_callback(MySolve)
solve_instance.times_called = 0
flag = 0
m0 = 0.01
bigm_row_name = [[0]*NJ]*NW
bigm_col_name = [[0]*NJ]*NW
bigm_vals = [m0]* NJ*NW
for w in O:
for j in J:
bigm_row_name[w][j] = 'scenario' + str(w) + '_' + 'customer' + str(j)
bigm_col_name[w][j] = 'y' + '_' + str(w)
while flag == 0:
# change the big-Ms
# prob.linear_constraints.set_coefficients( zip(bigm_row_name,, bigm_col_name, bigm_vals) )
# prob.lienar_constraints.set_rhs( zip(bigm_row_name, bigm_vals) )
# solve the MIP
start_time = time.time()
prob.solve()
use_time = time.time() - start_time
flag = 1
"""
for k in range(len(bigm_vec)):
row_name = []
col_name = []
bigm_coefs = []
rhs_vals = []
prob.linear_constraints.set_coefficients( zip(row_name, col_name, bigm_coefs) )
prob.linear_constraints.set_rhs( zip(row_name, rhs_vals) )
# prob.linear_constraints.set_coefficients(row_name, col_name, -bigm_vec[k] * demand[w][j])
# prob.linear_constraints.set_rhs(row_name, demand[w][j] * (1 - bigm_vec[k]) )
start_time = time.time()
prob.solve()
use_time = time.time() - start_time
print prob.solution.get_objective_value(),
print prob.solution.progress.get_num_iterations(),
print use_time,
print bigm_vec[k]
"""
#vals_arr[l] = prob.solution.get_objective_value()
#time_arr[l, k] = use_time
#vals_arr[l, k] = prob.solution.get_objective_value()
"""
temp_opt_val = prob.solution.get_objective_value()
if abs(temp_opt_val - true_opt_val) <= 0.000001 and use_time <= best_use_time:
best_use_time = use_time
x_bigm_star = X_bigm_vec[l]
y_bigm_star = Y_bigm_vec[k]
"""
#print X_bigm_vec[l], Y_bigm_vec[k]
| [
[
1,
0,
0.0102,
0.0102,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0204,
0.0102,
0,
0.66,
0.0385,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0306,
0.0102,
0,
... | [
"import cplex",
"import random",
"import numpy as np",
"import time",
"from stoch_trnsport_gen import *",
"from cplex.exceptions import CplexError",
"import cplex.callbacks as CPX_CB",
"class MySolve(CPX_CB.SolveCallback):\n def __call__(self):\n self.times_called += 1\n print(\"hello... |
from coopr.pyomo import *
#
# Model
#
model = AbstractModel()
model.I = Set()
model.J = Set()
model.A = Param(model.J, model.I)
model.b = Param(model.J)
model.c = Param(model.I)
model.p = Param(model.J) #probability for scenarios
model.alpha = Param() #required chance
model.bigm = Param(model.J)
model.X = Var(model.I, within=PositiveReals)
model.Y = Var(model.J, within=Binary)
def knapsack_rule(mod):
return sum(mod.p[j] * mod.Y[j] for j in mod.J) >= mod.alpha
model.KnapsackRule = Constraint(rule=knapsack_rule)
def chance_rule(mod, j):
lhs = sum(mod.A[j, i] * mod.X[i] for i in mod.I) - mod.b[j]
rhs = mod.bigm[j] * (mod.Y[j] - 1)
return lhs >= rhs
model.ChanceRule = Constraint(model.J, rule=chance_rule)
def total_cost_rule(mod):
return sum(mod.X[i] * mod.c[i] for i in mod.I)
model.TotalCost = Objective(rule = total_cost_rule, sense=minimize)
| [
[
1,
0,
0.0278,
0.0278,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
14,
0,
0.1667,
0.0278,
0,
0.66,
0.0588,
722,
3,
0,
0,
0,
338,
10,
1
],
[
14,
0,
0.2222,
0.0278,
0,
... | [
"from coopr.pyomo import *",
"model = AbstractModel()",
"model.I = Set()",
"model.J = Set()",
"model.A = Param(model.J, model.I)",
"model.b = Param(model.J)",
"model.c = Param(model.I)",
"model.p = Param(model.J) #probability for scenarios",
"model.alpha = Param() #required chance",
"model.bigm... |
import cplex
import random
import numpy as np
prob = cplex.Cplex()
prob.objective.set_sense(prob.objective.sense.minimize)
N = 5
M = 10
# Add continuous variables
bin_var_name = ["y" + str(i) for i in range(N)]
bin_var_type = ["B" for i in range(N)]
bin_var_obj = [random.randint(1,10) for i in range(N)]
con_var_name = ["x" + str(i) for i in range(N)]
con_var_type = ["C" for i in range(N)]
con_var_obj = [random.randint(1,10) for i in range(N)]
print var_name
print var_obj
print var_type
prob.variables.add( obj = bin_var_obj,
types = bin_var_type,
names = bin_var_name)
prob.variables.add( obj = con_var_obj,
types = con_var_type,
names = con_var_name)
for i in range(M):
prob.linear_constraints.add( lin_expr = bin_expr,
senses = bin_senses,
rhs = bin_rhs
# Add binary variables
#prob.variables.add(obj =
# types =
#prob.write("chance.lp")
| [
[
1,
0,
0.0244,
0.0244,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0488,
0.0244,
0,
0.66,
0.0556,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0732,
0.0244,
0,
... | [
"import cplex",
"import random",
"import numpy as np",
"prob = cplex.Cplex()",
"prob.objective.set_sense(prob.objective.sense.minimize)",
"N = 5",
"M = 10",
"bin_var_name = [\"y\" + str(i) for i in range(N)]",
"bin_var_type = [\"B\" for i in range(N)]",
"bin_var_obj = [random.randint(1,10) for i ... |
import cplex
import random
import numpy as np
import time
from pylab import *
from stoch_trnsport_gen import *
from cplex.exceptions import CplexError
import mpl_toolkits.mplot3d.axes3d as p3
NI = 50
NJ = 50
NW = 500
I = range(NI)
J = range(NJ)
O = range(NW)
prob,demand = gen_stoch_trnsport(NI, NJ, NW, eps=0.2)
#prob.solve()
bigm = 0
bigm_w = [0.2 for w in O]
#true_opt_val = prob.solution.get_objective_value()
#print prob.solution.get_objective_value(),
#print prob.solution.progress.get_num_iterations()
bigm_vec = [0.005 * i for i in range(1,21)]
X_bigm_vec = [0.02 * i for i in range(1,21)]
Y_bigm_vec = [0.02 * i for i in range(1,21)]
X_bigm_arr = np.array(X_bigm_vec)
Y_bigm_arr = np.array(Y_bigm_vec)
vals_arr = np.zeros((len(X_bigm_vec), len(X_bigm_vec)))
time_arr = np.zeros((len(X_bigm_vec), len(X_bigm_vec)))
x_bigm_star = 0
y_bigm_star = 0
Z = vals_arr
T = time_arr
for k in range(len(bigm_vec)):
row_name = []
col_name = []
bigm_coefs = []
rhs_vals = []
for w in O:
for j in J:
row_name.append('scenario' + str(w) + '_' + 'customer' + str(j))
col_name.append('y' + '_' + str(w))
bigm_coefs.append( -bigm_vec[k] * demand[w][j] )
rhs_vals.append( demand[w][j] * (1 - bigm_vec[k]) )
prob.linear_constraints.set_coefficients( zip(row_name, col_name, bigm_coefs) )
prob.linear_constraints.set_rhs( zip(row_name, rhs_vals) )
# prob.linear_constraints.set_coefficients(row_name, col_name, -bigm_vec[k] * demand[w][j])
# prob.linear_constraints.set_rhs(row_name, demand[w][j] * (1 - bigm_vec[k]) )
start_time = time.time()
prob.solve()
use_time = time.time() - start_time
print prob.solution.get_objective_value(),
print prob.solution.progress.get_num_iterations(),
print use_time,
print bigm_vec[k]
#vals_arr[l] = prob.solution.get_objective_value()
#time_arr[l, k] = use_time
#vals_arr[l, k] = prob.solution.get_objective_value()
"""
temp_opt_val = prob.solution.get_objective_value()
if abs(temp_opt_val - true_opt_val) <= 0.000001 and use_time <= best_use_time:
best_use_time = use_time
x_bigm_star = X_bigm_vec[l]
y_bigm_star = Y_bigm_vec[k]
"""
#print X_bigm_vec[l], Y_bigm_vec[k]
| [
[
1,
0,
0.011,
0.011,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.022,
0.011,
0,
0.66,
0.0345,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.033,
0.011,
0,
0.66,
... | [
"import cplex",
"import random",
"import numpy as np",
"import time",
"from pylab import *",
"from stoch_trnsport_gen import *",
"from cplex.exceptions import CplexError",
"import mpl_toolkits.mplot3d.axes3d as p3",
"NI = 50",
"NJ = 50",
"NW = 500",
"I = range(NI)",
"J = range(NJ)",
"O = r... |
import random
fname = 'chance_mod.dat'
f = open(fname, 'w')
random.seed(1000)
f.write('\n')
#f.write('hello, world' + '\n')
#f.write('second line')
def write_set(name, data, file):
f = file
size = len(data)
f.write('set' + ' ' + name + ' ' + ':='+' ')
for i in range(size):
f.write(data[i] + ' ')
f.write(';'+'\n\n')
def write_param_0d(name, data, file):
f = file
f.write('param' + ' ' + name + ' ' + ':='+' ')
f.write(str(data))
f.write(';'+'\n\n')
def write_param_1d(name, set, data, file, njust = 10, prec=6):
f = file
size = len(set)
entry = 0
f.write('param' + ' ' + name + ' ' + ':='+'\n')
for i in range(size):
if isinstance(data[i], int):
entry = data[i]
else:
entry = round(data[i], prec)
f.write(set[i].ljust(njust) + ' ' + str( entry ) + '\n')
f.write(';'+'\n\n')
def write_param_2d(name, set_x, set_y, data, file, njust = 10, prec = 6):
f = file
entry = 0
row_size = len(set_x)
col_size = len(set_y)
f.write('param' + ' ' + name + ' ' + ':'+'\n')
f.write(' '.rjust(njust+1))
for j in range(col_size):
f.write(set_y[j].rjust(njust) + ' ')
f.write(':= ' + '\n')
for i in range(row_size):
f.write(set_x[i].ljust(njust) + ' ')
for j in range(col_size):
if isinstance(data[i][j], int):
entry = data[i][j]
else:
entry = round(data[i][j], prec)
f.write(str(entry).rjust(njust) + ' ')
f.write('\n')
f.write(';'+'\n\n')
NI = 100
NJ = 50
I = ['i' + str(i+1) for i in range(NI)]
write_set('I', I, f)
J = ['j' + str(j+1) for j in range(NJ)]
write_set('J', J, f)
A = [ [random.uniform(0.5,1) for i in range(NI)] for j in range(NJ)]
write_param_2d('A', J, I , A, f)
b = [random.uniform(NI,5*NI) for j in range(NJ) ]
write_param_1d('b', J, b, f)
c = [random.uniform(1,1) for i in range(NI) ]
write_param_1d('c', I, c, f)
p = [random.uniform(0,1) for j in range(NJ) ]
ps = sum(p)
for i in range(len(p)):
p[i] = p[i]/ps
write_param_1d('p', J, p, f)
bigm = [50] * NJ
write_param_1d('bigm', J, bigm, f)
alpha = 0.6
write_param_0d('alpha', alpha, f)
| [
[
1,
0,
0.0111,
0.0111,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.0222,
0.0111,
0,
0.66,
0.0357,
190,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0333,
0.0111,
0,
... | [
"import random",
"fname = 'chance_mod.dat'",
"f = open(fname, 'w')",
"random.seed(1000)",
"f.write('\\n')",
"def write_set(name, data, file):\n f = file\n size = len(data)\n f.write('set' + ' ' + name + ' ' + ':='+' ')\n for i in range(size):\n f.write(data[i] + ' ')\n f.write(';'+'\... |
import cplex
import random
import numpy as np
import time
from pylab import *
from stoch_trnsport_gen import *
from cplex.exceptions import CplexError
import mpl_toolkits.mplot3d.axes3d as p3
NI = 20
NJ = 20
NW = 20
I = range(NI)
J = range(NJ)
O = range(NW)
prob,demand = gen_stoch_trnsport(NI, NJ, NW, eps=0.5)
prob.solve()
bigm = 0.1
true_opt_val = prob.solution.get_objective_value()
print prob.solution.get_objective_value(),
print prob.solution.progress.get_num_iterations()
X_bigm_vec = [0.02 * i for i in range(0,26)]
Y_bigm_vec = [0.02 * i for i in range(0,26)]
X_bigm_arr = np.array(X_bigm_vec)
Y_bigm_arr = np.array(Y_bigm_vec)
vals_arr = np.zeros((len(X_bigm_vec), len(X_bigm_vec)))
time_arr = np.zeros((len(X_bigm_vec), len(X_bigm_vec)))
x_bigm_star = 0
y_bigm_star = 0
best_use_time = 100000.0
Z = vals_arr
T = time_arr
#Iteratve over X_bigms and Y_bigms
for l in range(len(X_bigm_vec)):
for k in range(len(Y_bigm_vec)):
for w in O:
for j in J:
row_name = 'scenario' + str(w) + '_' + 'customer' + str(j)
col_name = 'y' + '_' + str(w)
if w%2 == 0:
bigm = X_bigm_vec[l]
else:
bigm = Y_bigm_vec[k]
prob.linear_constraints.set_coefficients(row_name, col_name, -bigm * demand[w][j])
prob.linear_constraints.set_rhs(row_name, demand[w][j] * (1 - bigm) )
start_time = time.time()
prob.solve()
use_time = time.time() - start_time
# vals_arr[l] = prob.solution.get_objective_value()
time_arr[l, k] = use_time
vals_arr[l, k] = prob.solution.get_objective_value()
temp_opt_val = prob.solution.get_objective_value()
if abs(temp_opt_val - true_opt_val) <= 0.000001 and use_time <= best_use_time:
best_use_time = use_time
x_bigm_star = X_bigm_vec[l]
y_bigm_star = Y_bigm_vec[k]
print prob.solution.get_objective_value(),
print prob.solution.progress.get_num_iterations(),
print X_bigm_vec[l], Y_bigm_vec[k]
print "================="
print x_bigm_star, y_bigm_star
print best_use_time
X,Y = meshgrid(X_bigm_arr, Y_bigm_arr)
print X
print Y
print Z
print T
fig=figure()
ax = p3.Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
#ax.plot_surface(X,Y,Z)
#ax.contourf3D(X, Y, Z)
#ax.contour3D(X, Y, Z)
#ax.plot(ravel(X),ravel(Y),ravel(Z))
ax.set_xlabel('Normalized big-M(1)')
ax.set_ylabel('Normalized big-M(2)')
ax.set_zlabel('Optimal cost')
show()
| [
[
1,
0,
0.0102,
0.0102,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0204,
0.0102,
0,
0.66,
0.0217,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0306,
0.0102,
0,
... | [
"import cplex",
"import random",
"import numpy as np",
"import time",
"from pylab import *",
"from stoch_trnsport_gen import *",
"from cplex.exceptions import CplexError",
"import mpl_toolkits.mplot3d.axes3d as p3",
"NI = 20",
"NJ = 20",
"NW = 20",
"I = range(NI)",
"J = range(NJ)",
"O = ra... |
"""
This script is used for benchmarking the time takes for different formulations
includes
IP, SIP, IP(M*)
"""
from time import time
from ccfs.fscplex import fscplex
f = open("../output/bench_formulations.txt", 'w')
header = "%4s %4s" % ('NI', 'NS')
header += "%10s %10s %10s" % ("IP_v", "SIP_v", "IP(M*)_v")
header += "%10s %10s %10s" % ('IP_n', "SIP_n", 'IP(M*)_n')
header += "%10s %10s %10s" % ('IP_t', "SIP_t", 'IP(M*)_t')
header += "\n"
f.write(header)
ni_lo = 10
ni_up = 20
ni_de = 5
ns_lo = 10
ns_up = 20
ns_de = 5
# Loop through NI and NS
for NI in range(ni_lo, ni_up, ni_de):
for NS in range(ns_lo, ns_up, ns_de):
fc = fscplex()
fc.generate_instance(NI, NS, eps=0.2, Ordered=False)
fc.parameters.read_file('../../param/stofac.prm')
# Solve by original formulation
t0 = time()
fc.solve()
t1 = time()
ip_v = fc.solution.get_objective_value()
ip_n = fc.solution.progress.get_num_nodes_processed()
ip_t = t1-t0
# Solve by strenghthened formulation
fc.strengthen_formulation()
t0 = time()
fc.solve()
t1 = time()
sip_v = fc.solution.get_objective_value()
sip_n = fc.solution.progress.get_num_nodes_processed()
sip_t = t1-t0
# Sovle by big-m coefficients
x_val = fc.solution.get_values(fc.x_name)
y_val = fc.solution.get_values(fc.y_name)
bigm_val = [0] * fc.num_bigm
for s in fc.S:
for i in fc.I:
ind = i + s*fc.NI
if y_val[s] == 1.0:
bigm_val[ind] = max(0, fc.rhs[s][i] - x_val[i] + 0.01)
else:
bigm_val[ind] = 0.01
fc.set_bigm(bigm_val)
t0 = time()
fc.solve()
t1 = time()
ipm_v = fc.solution.get_objective_value()
ipm_n = fc.solution.progress.get_num_nodes_processed()
ipm_t = t1-t0
# Print the results
line = "%4d %4d " % (NI, NS)
line += "%10f %10f %10f" % (ip_v, sip_v, ipm_v)
line += "%10d %10d %10d" % (ip_n, sip_n, ipm_n)
line += "%10f %10f %10f" % (ip_t, sip_t, ipm_t)
line += "\n"
f.write(line)
fc = []
f.close()
| [
[
8,
0,
0.038,
0.0633,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0759,
0.0127,
0,
0.66,
0.0769,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0886,
0.0127,
0,
0.66,... | [
"\"\"\"\nThis script is used for benchmarking the time takes for different formulations\nincludes\nIP, SIP, IP(M*)\n\"\"\"",
"from time import time",
"from ccfs.fscplex import fscplex",
"f = open(\"../output/bench_formulations.txt\", 'w')",
"header = \"%4s %4s\" % ('NI', 'NS')",
"f.write(header)",
"ni_... |
"""
This script is used for benchmarking the time takes for different formulations
includes
IP, SIP, IP(M*)
"""
from time import time
from ccfs.fscplex import fscplex
f = open("../output/bench_formulations.txt", 'w')
header = "%4s %4s" % ('NI', 'NS')
header += "%10s %10s %10s" % ("IP_v", "SIP_v", "IP(M*)_v")
header += "%10s %10s %10s" % ('IP_n', "SIP_n", 'IP(M*)_n')
header += "%10s %10s %10s" % ('IP_t', "SIP_t", 'IP(M*)_t')
header += "\n"
f.write(header)
ni_lo = 50
ni_up = 80
ni_de = 10
ns_lo = 50
ns_up = 80
ns_de = 10
for NI in range(ni_lo, ni_up, ni_de):
for NS in range(ns_lo, ns_up, ns_de):
fc = fscplex()
fc.generate_instance(NI, NS, eps=0.2, Ordered=False)
fc.parameters.read_file('../../param/stofac.prm')
# Solve by original formulation
t0 = time()
fc.solve()
t1 = time()
ip_v = fc.solution.get_objective_value()
ip_n = fc.solution.progress.get_num_nodes_processed()
ip_t = t1-t0
# Solve by strenghthened formulation
fc.strengthen_formulation()
t0 = time()
fc.solve()
t1 = time()
sip_v = fc.solution.get_objective_value()
sip_n = fc.solution.progress.get_num_nodes_processed()
sip_t = t1-t0
# Sovle by big-m coefficients
x_val = fc.solution.get_values(fc.x_name)
y_val = fc.solution.get_values(fc.y_name)
bigm_val = [0] * fc.num_bigm
for s in fc.S:
if y_val[s] == 1.0:
for i in fc.I:
ind = i + s*fc.NI
bigm_val[ind] = max(0, fc.rhs[s][i] - x_val[i])
fc.set_bigm(bigm_val)
t0 = time()
fc.solve()
t1 = time()
ipm_v = fc.solution.get_objective_value()
ipm_n = fc.solution.progress.get_num_nodes_processed()
ipm_t = t1-t0
line = "%4d %4d " % (NI, NS)
line += "%10f %10f %10f" % (ip_v, sip_v, ipm_v)
line += "%10d %10d %10d" % (ip_n, sip_n, ipm_n)
line += "%10f %10f %10f" % (ip_t, sip_t, ipm_t)
line += "\n"
f.write(line)
fc = []
f.close()
| [
[
8,
0,
0.0411,
0.0685,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0822,
0.0137,
0,
0.66,
0.0769,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0959,
0.0137,
0,
0.66... | [
"\"\"\"\nThis script is used for benchmarking the time takes for different formulations\nincludes\nIP, SIP, IP(M*)\n\"\"\"",
"from time import time",
"from ccfs.fscplex import fscplex",
"f = open(\"../output/bench_formulations.txt\", 'w')",
"header = \"%4s %4s\" % ('NI', 'NS')",
"f.write(header)",
"ni_... |
from ccfs.fscplex import fscplex
f = open("../output/bench_singlebigm.txt", 'w')
header = "%4s %4s" % ('NI', 'NS')
header += "%10s %10s %10s" % ("BigM", "Obj", "Nodes")
#header += "%10s %10s" % ('t_stre', 't_bigm')
header += "\n"
f.write(header)
NI = 20
NS = 20
fc = fscplex()
fc.generate_instance(NI, NS, eps=0.2)
fc.parameters.read_file("../../param/stofac.prm")
fc.write("fc.lp")
for i in range(50):
bigm_val = 0.1 * i
fc.set_bigm_one([bigm_val])
fc.solve()
bigm_obj_val=fc.solution.get_objective_value()
bigm_num_nodes = fc.solution.progress.get_num_nodes_processed()
line = "%4d %4d " % (i, i)
line += "%10f %10f %10d" % (bigm_val, bigm_obj_val, bigm_num_nodes)
#line += "%10f %10f"% (time_stre, time_bigm)
line += "\n"
f.write(line)
f.close()
| [
[
1,
0,
0.0323,
0.0323,
0,
0.66,
0,
780,
0,
1,
0,
0,
780,
0,
0
],
[
14,
0,
0.0968,
0.0323,
0,
0.66,
0.0909,
899,
3,
2,
0,
0,
693,
10,
1
],
[
14,
0,
0.129,
0.0323,
0,
... | [
"from ccfs.fscplex import fscplex",
"f = open(\"../output/bench_singlebigm.txt\", 'w')",
"header = \"%4s %4s\" % ('NI', 'NS')",
"f.write(header)",
"NI = 20",
"NS = 20",
"fc = fscplex()",
"fc.generate_instance(NI, NS, eps=0.2)",
"fc.parameters.read_file(\"../../param/stofac.prm\")",
"fc.write(\"fc... |
#################
# Created 5.5
# Used for benchmarking with range of two bigms
#################
from ccfs.fscplex import fscplex
fc = fscplex()
fc.generate_instance(NI = 2, NS=100, eps=0.2)
fc.parameters.read_file("../../param/stofac.prm")
fc.write("fc.lp")
for i in range(20):
val1 = i * 0.1
for j in range(20):
val2 = j * 0.1
fc.set_bigm_two(val1, val2)
fc.solve()
print val1, val2,
print fc.solution.get_objective_value(),
print fc.solution.progress.get_num_nodes_processed()
| [
[
1,
0,
0.3,
0.05,
0,
0.66,
0,
780,
0,
1,
0,
0,
780,
0,
0
],
[
14,
0,
0.4,
0.05,
0,
0.66,
0.2,
436,
3,
0,
0,
0,
968,
10,
1
],
[
8,
0,
0.45,
0.05,
0,
0.66,
0.4,
... | [
"from ccfs.fscplex import fscplex",
"fc = fscplex()",
"fc.generate_instance(NI = 2, NS=100, eps=0.2)",
"fc.parameters.read_file(\"../../param/stofac.prm\")",
"fc.write(\"fc.lp\")",
"for i in range(20):\n val1 = i * 0.1\n for j in range(20):\n val2 = j * 0.1\n fc.set_bigm_two(val1, val2... |
import time
import cplex
import cplex.callbacks as CPX_CB
import random
import numpy as np
from ccfs.instances import genfs
from ccfs.callbacks import CheckBounds
def bench_checkresolve(NI = 40, NS=35):
step = 0.1
flag = [0] #mutable
I = range(NI)
S = range(NS)
x_name = ["x" + '_' + str(i) for i in I]
v_name = ["v" + '_' + str(i) for i in I]
y_name = ["y" + "_" + str(s) for s in S]
vb_name = ["vb"+str(i) for i in I]
c = genfs(NI, NS)
c.parameters.read_file('../../param/stofac.prm')
c.solve()
true_obj_val = c.solution.get_objective_value()
# Set initial bounds for v
v_lb = [15]*NI
#v_lb = [true_v_sol[i] + 0.1 for i in I]
v_chgind = [0]*NI
c.variables.set_lower_bounds(zip(v_name, v_lb))
# Pass references to the callback function
cbinstance = c.register_callback(CheckBounds)
cbinstance.times_called = 0
cbinstance.v_lb = v_lb
cbinstance.x_name = x_name
cbinstance.v_name = v_name
cbinstance.y_name = y_name
cbinstance.vb_name = vb_name
cbinstance.flag = flag
cbinstance.NI = NI
cbinstance.NS = NS
cbinstance.I = I
cbinstance.S = S
iter = 0
# Start the loop
while(iter < 200):
iter += 1
# resolve the problem
c.solve()
if flag[0] == 1: #pass all the test
break
elif flag[0] == 0: #binding being checked
# decreate the lower bound for v
c.variables.set_lower_bounds(zip(v_name, v_lb))
else: # at the root node integer solution is found
v_lb = [v_lb[i] - step for i in I]
c.variables.set_lower_bounds(zip(v_name, v_lb))
my_obj_val = c.solution.get_objective_value()
return true_obj_val, my_obj_val
f = open('bench_checkresolve.res', 'w')
for ni in range(10, 20, 2):
for ns in range(10, 20, 2):
to, mo = bench_checkresolve(ni, ns)
f.write(str(ni)+' '+str(ns)+' '+str(to)+' '+str(mo)+'\n')
| [
[
1,
0,
0.0152,
0.0152,
0,
0.66,
0,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0303,
0.0152,
0,
0.66,
0.1111,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0455,
0.0152,
0,
... | [
"import time",
"import cplex",
"import cplex.callbacks as CPX_CB",
"import random",
"import numpy as np",
"from ccfs.instances import genfs",
"from ccfs.callbacks import CheckBounds",
"def bench_checkresolve(NI = 40, NS=35):\n step = 0.1\n flag = [0] #mutable\n I = range(NI)\n S = range(NS... |
"""
This is the main benchmark script for the check and resolve procedure.
Created: May 7th
First version May 7th. Things begin to work as expected.
Todo:
Output result on instance from 10 to 100 with or without heuristics
Author: Derek Zhang
"""
import cplex
import random
import numpy as np
import sys
from time import time
from ccfs.fscplex import fscplex
from ccfs.callbacks import CheckBigM
# Setup range
ni_lo = int(sys.argv[1])
ni_up = int(sys.argv[2])
ni_de = int(sys.argv[3])
ns_lo = int(sys.argv[1])
ns_up = int(sys.argv[2])
ns_de = int(sys.argv[3])
# Print header of the table
f = open("../output/bench_checkbigmresolve.txt", 'w')
header = "%4s %4s" % ('NI', 'NS')
header += "%10s %10s " % ("SIP Obj", "SIP(M) Obj")
header += "%10s %10s " % ("SIP Nodes", "SIP(M) Nodes")
header += "%10s %10s" % ('SIP Time', 'SIP(M) Time')
header += "%10s" % ("Iter")
header += "\n"
f.write(header)
# Main loop over the NI and NS
for NI in range(ni_lo, ni_up, ni_de):
for NS in range(ns_lo, ns_up, ns_de):
fc = fscplex()
fc.generate_instance(NI, NS, eps=0.2, Ordered=False)
fc.parameters.read_file("../../param/stofac.prm")
#fc.get_x_heur()
#fc.strengthen_formulation()
t0 = time()
fc.solve()
t1 = time()
sip_x_val = fc.solution.get_values(fc.x_name)
sip_y_val = fc.solution.get_values(fc.y_name)
sip_obj_val = fc.solution.get_objective_value()
sip_nodes = fc.solution.progress.get_num_nodes_processed()
sip_time = t1 - t0
delta = 1.0
#Start with bigm_val to be 0
bigm_val = [0] * fc.num_bigm
for s in fc.S:
#if y_val[s] == 1.0:
if 1:
for i in fc.I:
ind = i + s*fc.NI
#bigm_val[ind] = 0.1
# Naive set initial values of bigm
#print s, i
if 1:
#if fc.rhs[s][i] - fc.xi[i] > 0:
#fc.rhs[s][i] - fc.xi[i] >= 0:
#bigm_val[ind] = max(fc.rhs[s][i] - sip_x_val[i]-3*random.uniform(0,1), 0)
bigm_val[ind] = 0.1
#bigm_val[ind] = 0.2 * (fc.rhs[s][i] - fc.xi[i])
else:
bigm_val[ind] = 0
#Use heuristic solution ( Expected to be better than naive initial values )
#bigm_val[ind] = max(0, fc.rhs[s][i] - fc.x_heur[i])
# Cheating use the true solution ( Should take one iter since optimality condition is satisfied)
#bigm_val[ind] = max(0, fc.rhs[s][i] - x_val[i] + 0.3) #2 * abs(random.normalvariate(1,1)))
fc.set_bigm(bigm_val)
fc.register_callback(CheckBigM)
iter = 0
# Start check and resolve procedure
t0 = time()
while(iter < NI*NS*2):
iter += 1
fc.flag[0] = 1
fc.solve()
if fc.flag[0] == 1: #pass all the test
#print "finish loop"
#print fc.solution.progress.get_num_nodes_processed()
break
elif fc.flag[0] == 0: #binding being checked
#print fc.solution.progress.get_num_nodes_processed()
#fc.increase_bigm(step_length=delta)
fc.increase_bigm(step_length=delta)
#fc.increase_bigm(step_ratio=0.1)
fc.unregister_callback(CheckBigM)
fc.solve()
t1 = time()
sipm_time = t1-t0
sipm_obj_val = fc.solution.get_objective_value()
sipm_nodes = fc.solution.progress.get_num_nodes_processed()
# Write the results for each case
line = "%4d %4d " % (NI, NS)
line += "%10f %10f" % (sip_obj_val, sipm_obj_val)
line += "%10d %10d" % (sip_nodes, sipm_nodes)
line += "%10f %10f"% (sip_time, sipm_time)
line += "%10d" % (iter)
line += "\n"
f.write(line)
fc = []
f.close()
| [
[
8,
0,
0.0444,
0.0806,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0968,
0.0081,
0,
0.66,
0.0556,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.1048,
0.0081,
0,
0.66... | [
"\"\"\"\nThis is the main benchmark script for the check and resolve procedure.\nCreated: May 7th\nFirst version May 7th. Things begin to work as expected.\n\nTodo:\nOutput result on instance from 10 to 100 with or without heuristics",
"import cplex",
"import random",
"import numpy as np",
"import sys",
"... |
"""
This script contains all callback functions that might be called during the procedure
Created: May 1st?
Version 1.0: May 7th
Author: Derek Zhang
Todo:
- Optimality condition improvement
- Code optimization and organization
"""
import cplex.callbacks as CPX_CB
class MySolve(CPX_CB.SolveCallback):
def __call__(self):
if self.get_num_nodes() < 1:
self.solve(self.method.primal)
else:
self.solve(self.method.dual)
status = self.get_cplex_status()
self.use_solution()
class CheckBigM(CPX_CB.CutCallback):
def __call__(self):
self.times_called += 1
local_flag = 0
#rhs = self.rhs
#xi = self.xi
self.node_y_val = self.get_values(self.y_name)
#node_v_val = self.get_values(self.v_name)
self.node_x_val = self.get_values(self.x_name)
self.node_v_val = self.get_values(self.v_name)
self.slack = self.get_linear_slacks(self.bigm_rowname)
bigm = self.bigm_coef
#print self.get_num_nodes(),
#print self.get_num_remaining_nodes()
feas_ind = (sum(self.get_feasibilities(self.y_name)) == 0)
#if not feas_ind:
#self.flag[0] = 1
if 0:
print "factional solution found, go to the next node"
print "Current incumbent", self.get_incumbent_objective_value()
else:
#reset the bigm indicator vector
for i in range(len(self.bigm_ind)):
self.bigm_ind[i] = 0
for i in self.I:
for s in self.S:
ind = i + s*self.NI
#if abs(node_y_val[s] - 1.0) <= 0.00001 and abs(slack[ind]) <= 0.00001:
#
"""
The main Checking routine is here:
1. y is positive
2. slackness is positive
3. bigm value if within the bound, which is strengthed value!!!
"""
# compare with the org rhs
#if abs(self.node_y_val[s]) > 0.000001 and abs(self.slack[ind]) <= 0.000001 and self.bigm_coef[ind] < self.rhs[s][i]:
# compare with the strengthened rhs
#if abs(self.node_y_val[s] - 1.0) < 0.0001 and abs(self.slack[ind]) <= 0.0001 and self.bigm_coef[ind] < self.rhs[s][i] - self.xi[i]:
if abs(self.node_y_val[s]) > 0.0000001 and abs(self.slack[ind]) <= 0.0000001 and self.bigm_coef[ind] < self.rhs[s][i]:
local_flag = 1
self.bigm_ind[ind] = 1
"""
if abs(self.node_y_val[s]) > 0.000001 and abs(self.slack[ind]) <= 0.000001:
if self.rhs[s][i] - self.xi[i] >= 0 and self.bigm_coef[ind] < self.rhs[s][i] - self.xi[i]:
local_flag = 1
self.bigm_ind[ind] = 1
elif self.rhs[s][i] - self.xi[i] < 0 and self.bigm_coef[ind] < self.rhs[s][i]:
local_flag = 1
self.bigm_ind[ind] = 1
"""
if self.Print and local_flag == 1:
self.print_nodeinfo()
if local_flag == 1:
print "resolve the problem"
#self.incumbent[0] =self.get_cutoff()
#self.incumbent[0] =self.get_objective_value()
self.incumbent[0] = self.get_incumbent_objective_value()
print "incumbent is ", self.incumbent[0]
self.flag[0] = 0
self.abort()
return
else:
"all nodes fine, go to the next node"
#if feas_ind:
def print_nodeinfo(self):
print "solution at this node", self.get_objective_value()
print "incumbent at this node", self.get_incumbent_objective_value()
print "%4s %4s " %('sce', 'i'),
print "%10s %10s %10s %10s %10s %10s" % ('x_val', 'v_val', 'y_val', 'slack', 'bigm', 'rhs')
print '-'*60
for i in self.I:
#print
for s in self.S:
ind = i + s*self.NI
#if 1:
if self.bigm_ind[ind] == 1:
print "%4d %4d " % (s, i),
print "%10f" % (self.node_x_val[i]) ,
print "%10f" % (self.node_v_val[i]),
print "%10f" % (self.node_y_val[s]),
print "%10f" % (self.slack[ind]),
print "%10f" % (self.bigm_coef[ind]),
print "%10f" % (self.rhs[s][i]),
if self.bigm_ind[ind] == 1:
print " ***",
print ""
class CheckSI(CPX_CB.CutCallback):
def __call__(self):
self.times_called += 1
print "\n\n\n"
#if the node has integer solution go ahead
node_y_val = self.get_values(self.y_name)
node_v_val = self.get_values(self.v_name)
node_x_val = self.get_values(self.x_name)
print "x_val", node_x_val
print "y_val", node_y_val
for i in self.I:
print i, "th variable\n---------------"
si_coef = []
si_lhs = node_v_val[i]
# Choose the largest coefficient
si_tmp_ceof = [self.rhs[s][i] for s in self.S]
si_coef_max = max(si_tmp_ceof)
for s in self.S:
if self.rhs[s][i] >= self.xi[i]:# and node_y_val[s] > 0
si_coef += [[self.rhs[s][i], node_y_val[s]]]
# Always include the coefficient pair with largest coefficient
# elif abs(self.rhs[s][i] - si_coef_max) < 0.000001:
# si_coef += [[self.rhs[s][i], node_y_val[s]]]
si_coef.sort(reverse=True)
l = len(si_coef)
print "si_coef lenght", l
si_coef += [[self.xi[i], 0]]
#print si_coef
for k in range(l):
si_lhs += (si_coef[k][0]-si_coef[k+1][0]) * si_coef[k][1]
if si_lhs >= si_coef[0][0]:
print "Star inequality satisfied, abort solving"
else:
print "Star inequality violated, valid LP lower bound"
print "lhs", si_lhs, "rhs", si_coef[0][0]
"""
if node_y_val[s] > 0 and self.rhs[s][i] >= self.xi[i]:
si_ind = (node_v_val[i] + (self.rhs[s][i] - self.xi[i]) * node_y_val[s] >= self.rhs[s][i])
#node_y_val[i] + (rhs[]- rhs[]) * node_v_val[i] >= rhs[]:#satisify the SI::
if si_ind:
# print node_y_val
print "y is positive", i, s
print "Star inequality satisfied, abort solving"
print "lhs=", node_v_val[i] + (self.rhs[s][i] - self.xi[i]) * node_y_val[s],
print "rhs=", self.rhs[s][i]
#self.abort()
#return
# self.abort()
# return
else:
print "Star inequality is violated, valid LP lower bound"
else:
print "y is zero, keep going"
"""
class CheckBounds(CPX_CB.CutCallback):
def __call__(self):
self.times_called += 1
# create local variables
node_y_val = self.get_values(self.y_name)
node_v_val = self.get_values(self.v_name)
node_x_val = self.get_values(self.x_name)
nslack = 0
for i in self.I:
#--Case 1: v_val drops below the hard bound then
# recover the lower bound and treat it as slack
if node_v_val[i] <= self.v_hlb[i]+0.0001:
self.v_lb[i] = self.v_hlb[i]
nslack += 1
#--Case 2: v_val is above the hard lower bound and reaches lower bound
# we lower the lower bound
elif abs(node_v_val[i] - self.v_lb[i]) < 0.00001:
self.v_lb[i] = self.v_lb[i] * 0.95 #change the v_lb here for next iteration
#--Case 3: v_val is above the soft lower bound
# we just recognize it to be slack
else:
nslack += 1
if nslack == self.NI: #all slack go to the next node
self.flag[0] = 1
"""
print "this node has no binding constraints, continue with traversal"
print self.get_num_nodes(),
print self.get_num_remaining_nodes()
"""
else:
self.flag[0] = 0
print "this node has active constraint abort",
print self.get_num_nodes(),
print self.get_num_remaining_nodes()
self.abort()
return
| [
[
8,
0,
0.0247,
0.0453,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0535,
0.0041,
0,
0.66,
0.2,
959,
0,
1,
0,
0,
959,
0,
0
],
[
3,
0,
0.0761,
0.0329,
0,
0.66,
... | [
"\"\"\"\nThis script contains all callback functions that might be called during the procedure\n\nCreated: May 1st?\nVersion 1.0: May 7th\n\nAuthor: Derek Zhang\nTodo:",
"import cplex.callbacks as CPX_CB",
"class MySolve(CPX_CB.SolveCallback):\n def __call__(self):\n if self.get_num_nodes() < 1:\n ... |
"""
The main script that contains subclass of cplex class.
All customized featured are to be added incrementally
Created: May 1st?
Version 1.0 May 7th
Author Derek Zhang
"""
import cplex
import random
from math import floor
from heapq import nlargest
class fscplex(cplex.Cplex):
def __init__(self):
cplex.Cplex.__init__(self) #has to be called to activate the constructor of Cplex object
self.NI = 1
self.NS = 1
self.eps = 0.1
self.I = range(self.NI)
self.J = range(self.NS)
self.x_name = []
self.v_name = []
self.y_name = []
self.dm_name = []
self.vb_name = []
self.rhs = []
self.num_bigm = 0
self.bigm_rowname = []
self.bigm_colname = []
self.bigm_coef = []
self.bigm_ind = []
self.v_lb = []
self.v_hlb = []
self.p = 0
self.xi = []
self.xi_ind = []
self.xi_max=[]
self.xi_min=[]
self.x_heur=[]
self.flag = [1]
self.incumbent = [1]
def register_callback(self, mycallback, Print=False):
cbinstance = cplex.Cplex.register_callback(self, mycallback)
cbinstance.times_called = 0
cbinstance.Print = Print
cbinstance.x_name = self.x_name
cbinstance.v_name = self.v_name
cbinstance.y_name = self.y_name
cbinstance.vb_name = self.vb_name
cbinstance.v_lb = self.v_lb
cbinstance.v_hlb = self.v_hlb
cbinstance.flag = self.flag
cbinstance.incumbent = self.incumbent
cbinstance.NI = self.NI
cbinstance.NS = self.NS
cbinstance.I = self.I
cbinstance.S = self.S
cbinstance.p = self.p
cbinstance.rhs = self.rhs
cbinstance.xi = self.xi
cbinstance.xi_max = self.xi_max
cbinstance.xi_min = self.xi_min
cbinstance.dm_name = self.dm_name
cbinstance.bigm_rowname = self.bigm_rowname
cbinstance.bigm_colname = self.bigm_colname
cbinstance.bigm_coef = self.bigm_coef
cbinstance.bigm_ind = self.bigm_ind
cbinstance.num_bigm = self.num_bigm
cbinstance.node_x_val = [0] * self.NI
cbinstance.node_y_val = [0] * self.NS
cbinstance.slack = [0] * self.num_bigm
return cbinstance
def generate_instance(self, NI=5, NS=5, eps=0.3, Ordered=False):
random.seed(100)
self.NI = NI
self.NS = NS
self.eps = eps
self.I = range(NI)
self.S = range(NS)
self.x_name = ["x" + '_' + str(i) for i in self.I]
self.v_name = ["v" + '_' + str(i) for i in self.I]
self.y_name = ["y" + '_' + str(s) for s in self.S]
self.v_hlb = [0] * NI
self.p = int(floor(eps*NS)) #!!!!!! it is not int(floor(eps*NS)) big bug found on May 9th
cost = [random.uniform(0,1) for i in self.I]
prob = [1.0/NS]*NS
# demand to be sorted later in terms of each continuous variable
if Ordered:
self.rhs = [[[] for i in self.I] for s in self.S]
for i in self.I:
i_rhs = [random.normalvariate(5,1) for s in self.S]
i_rhs.sort(reverse=True)
#i_rhs.sort()
for s in self.S:
self.rhs[s][i] = i_rhs[s]
# To tweak the priority list
#self.rhs[0][i] = random.normalvariate(10,2)
else:
self.rhs = [[random.normalvariate(5,1) for i in self.I] for s in self.S]
#self.rhs = [[random.uniform(4,6) for i in self.I] for s in self.S]
demand = self.rhs
#Create variables
x_name = self.x_name
x_type = ["C"] * NI
x_lb = [0] * NI
x_obj = cost
v_name = self.v_name
v_type = ["C"] * NI
v_lb = [0] * NI
v_obj = [0] * NI
y_name = self.y_name
y_type = ["B"] * NS
y_lb = [0] * NS
y_obj = [0] * NS #used to be 0
self.objective.set_sense(self.objective.sense.minimize)
# Add columns in one pass
self.variables.add( obj = x_obj + v_obj + y_obj,
types = x_type + v_type + y_type,
names = x_name + v_name + y_name,
lb = x_lb + v_lb + y_lb)
# Add rows
# knapsack constraint
expr = [[y_name, prob]]
sen = ["L"]
rhs = [eps]
names = ["KP"]
self.linear_constraints.add( lin_expr = expr,
senses = sen,
rhs = rhs,
names = names)
# capacity constraint
self.dm_name = ["dm_" + str(i) + '_s_' +str(s) for s in self.S for i in self.I]
expr = [ [ [v_name[i], y_name[s]], [1, demand[s][i]] ] for s in self.S for i in self.I]
sen = ["G"] * NI * NS
rhs = [ demand[s][i] for s in self.S for i in self.I]
names = self.dm_name
self.linear_constraints.add( lin_expr = expr,
senses = sen,
rhs = rhs,
names = names)
# bounds constraint
expr = [ [ [x_name[i], v_name[i]], [1, -1] ] for i in self.I]
sen = ["G"] * NI
rhs = [0] * NI
names = ["vb"+str(i) for i in self.I]
self.linear_constraints.add( lin_expr = expr,
senses = sen,
rhs = rhs,
names = names)
# set bigm names
self.num_bigm = len(self.dm_name)
self.bigm_ind = [0] * self.num_bigm
self.bigm_rowname = self.dm_name
self.bigm_colname = [ 'y_'+str(s) for s in self.S for i in self.I]
self.bigm_coef = self.linear_constraints.get_coefficients(zip(self.bigm_rowname, self.bigm_colname))
#self.write("ccfs.lp")
def get_x_heur(self):
# solve the problem directly by lp relaxation to obtain heurisitc solution
lp = cplex.Cplex(self)
lp.set_problem_type(lp.problem_type.LP)
lp.solve()
self.x_heur = lp.solution.get_values(self.x_name)
lp = []
def get_hard_v_lower_bounds(self):
p = 0 #follow the notation on the paper
xi_array = []
for i in self.I:
print "----\n"
rowname = ["dm_" + str(i) + '_s_' +str(s) for s in self.S]
xi_array = self.linear_constraints.get_rhs(rowname)
xi_array.sort(reverse=True)
print xi_array
p = int(floor(self.eps * self.NS))
self.v_hlb[i] = xi_array[p]
print "\n\n***********"
print self.v_hlb
# Set initial bigm values
def set_bigm(self, bigm_val):
if len(bigm_val) != self.num_bigm:
print "Wrong"
return
self.linear_constraints.set_coefficients(zip(self.bigm_rowname, self.bigm_colname, bigm_val))
self.bigm_coef = self.linear_constraints.get_coefficients(zip(self.bigm_rowname, self.bigm_colname))
# Increase bigm values
def increase_bigm(self, step_length=0.0, step_ratio=0.0, batch = False):
#print step_length
#print self.linear_constraints.get_coefficients(zip(self.bigm_rowname, self.bigm_colname))
if batch == False:
for i in range(self.num_bigm):
bigm_val = self.linear_constraints.get_coefficients(self.bigm_rowname[i], self.bigm_colname[i])
bigm_val_new = 0.0
if self.bigm_ind[i] == 1:
#print i
local_rhs = self.linear_constraints.get_rhs(self.bigm_rowname[i])
if step_length != 0.0:
bigm_val_new = min(bigm_val + step_length, local_rhs)
elif step_ratio != 0.0:
bigm_val_new = min(bigm_val + step_ratio * local_rhs, local_rhs)
#print "bigm_val before modification ", bigm_val
#local_i = i%self.NI
### We bound the maximum of bigm to be set by the corresponding rhs value (if not setting then the iteration will be too many!!!)
self.linear_constraints.set_coefficients(self.bigm_rowname[i], self.bigm_colname[i], bigm_val_new)
#print "bigm_val after modification", self.linear_constraints.get_coefficients(self.bigm_rowname[i], self.bigm_colname[i])
self.bigm_coef[i] = self.linear_constraints.get_coefficients(self.bigm_rowname[i], self.bigm_colname[i])
elif batch == True:
for i in range(self.NI):
chg_ind = 0
for s in self.S:
ind = i+ s*self.NI
if self.bigm_ind[ind] == 1:
chg_ind = 1
break
if chg_ind == 1:
for s in self.S:
ind = i+ s*self.NI
bigm_val = self.linear_constraints.get_coefficients(self.bigm_rowname[ind], self.bigm_colname[ind])
bigm_val_new = 0.0
#if self.bigm_ind[ind] == 1:
if 1:
local_rhs = self.linear_constraints.get_rhs(self.bigm_rowname[ind])
if step_length != 0.0:
bigm_val_new = min(bigm_val + step_length, local_rhs)
elif step_ratio != 0.0:
bigm_val_new = min(bigm_val + step_ratio * local_rhs, local_rhs)
self.linear_constraints.set_coefficients(self.bigm_rowname[ind], self.bigm_colname[ind], bigm_val_new)
self.bigm_coef[ind] = self.linear_constraints.get_coefficients(self.bigm_rowname[ind], self.bigm_colname[ind])
def set_bigm_two(self, val1, val2):
if self.NI != 2:
return "Wrong"
bigm_val1 = [val1] * self.NS
bigm_val2 = [val2] * self.NS
bigm_rowname1 = ['dm_0' + '_s_' +str(s) for s in self.S]
bigm_colname1 = ['y_' + str(s) for s in self.S]
bigm_rowname2 = ['dm_1' + '_s_' +str(s) for s in self.S]
bigm_colname2 = ['y_' + str(s) for s in self.S]
self.linear_constraints.set_coefficients(zip(bigm_rowname1, bigm_colname1, bigm_val1))
self.linear_constraints.set_coefficients(zip(bigm_rowname2, bigm_colname2, bigm_val2))
# find the pth largest element of xi
def get_xi(self):
self.xi = [0] * self.NI
self.xi_max = [0] * self.NI
self.xi_min = [0] * self.NI
for i in range(self.NI):
xi_rowname = ["dm_" + str(i) + '_s_' +str(s) for s in self.S]
xi_colname = self.y_name
xi_temp = self.linear_constraints.get_coefficients(zip(xi_rowname, xi_colname))
xi_temp.sort(reverse=True)
self.xi[i] = xi_temp[self.p]
print self.xi[i]
self.xi_max[i] = xi_temp[0]
self.xi_min[i] = xi_temp[-1]
# Strenghten the formulation
def strengthen_formulation(self):
self.get_xi()
bigm_val = [0] * self.num_bigm
for s in self.S:
for i in self.I:
ind = i + s*self.NI
if self.rhs[s][i] >= self.xi[i]:
bigm_val[ind] = self.rhs[s][i] - self.xi[i]
elif self.rhs[s][i] < self.xi[i]:
bigm_val[ind] = 0
self.linear_constraints.set_rhs(self.bigm_rowname[ind], 0)
self.linear_constraints.set_coefficients(zip(self.bigm_rowname, self.bigm_colname, bigm_val))
self.bigm_coef = self.linear_constraints.get_coefficients(zip(self.bigm_rowname, self.bigm_colname))
| [
[
8,
0,
0.0174,
0.0316,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0348,
0.0032,
0,
0.66,
0.2,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.038,
0.0032,
0,
0.66,
... | [
"\"\"\"\nThe main script that contains subclass of cplex class.\nAll customized featured are to be added incrementally\n\nCreated: May 1st?\n\nVersion 1.0 May 7th",
"import cplex",
"import random",
"from math import floor",
"from heapq import nlargest",
"class fscplex(cplex.Cplex):\n def __init__(self)... |
import cplex
from time import time
from fs import fs
from callback import check_bigm_at_incumbent
from callback import check_bigm_at_branch
SZ = 40
f = open("results.txt","w")
c = fs()
c.parameters.read_file("../../param/stofac.prm")
c.gen_inst(NI = 2, NS = 50, RLB = 40, seed = 100)
c.solve()
f.write(str(c.solution.get_objective_value()) + "\n\n")
c.set_bigm(rank = [1,1])
c.register_callback(check_bigm_at_incumbent)
c.register_callback(check_bigm_at_branch)
#print c.M_rank
#print c.solution.progress.get_num_nodes_processed()
#c.solve()
"""
print c.solution.basis.get_basis()
print c.solution.get_linear_slacks()
print c.solution.get_objective_value()
print "--------"
print c.variables.get_names()
print c.solution.get_values()
"""
#c.set_problem_type(c.problem_type.LP)
for i in range(SZ):
print c.M_rank
c.solve()
f.write(str(c.M_rank) + str(c.solution.get_objective_value()) + " " + str(c.flag) + "\n")
c.set_bigm(rank = c.M_rank)
c.flag = 1
f.close()
| [
[
1,
0,
0.0263,
0.0263,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0526,
0.0263,
0,
0.66,
0.0588,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0789,
0.0263,
0,
... | [
"import cplex",
"from time import time",
"from fs import fs",
"from callback import check_bigm_at_incumbent",
"from callback import check_bigm_at_branch",
"SZ = 40",
"f = open(\"results.txt\",\"w\")",
"c = fs()",
"c.parameters.read_file(\"../../param/stofac.prm\")",
"c.gen_inst(NI = 2, NS = 50, RL... |
from pylab import *
from time import time
from fs import fs
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
NI = 2
NS = 40
RLB = 20
SZ = 30
c = fs()
c.parameters.read_file("../../param/stofac.prm")
c.gen_inst(NI = NI, NS = NS, RLB = RLB, seed = 100)
Z = np.zeros((SZ, SZ))
c.solve()
print c.solution.get_objective_value()
print c.solution.get_values()
print c.variables.get_names()
#c.set_problem_type(c.problem_type.LP)
print "------------------"
for i in range(SZ):
for j in range(SZ):
c.set_bigm(rank = [i+1,j+1])
c.solve()
Z[i,j] = c.solution.get_objective_value()
print [i,j], c.solution.get_values(['x_0','x_1'])
#X_cor = [c.dm_diff_lists[0][i] for i in range(SZ)]
#Y_cor = [c.dm_diff_lists[1][i] for i in range(SZ)]
X_cor = range(SZ)
Y_cor = range(SZ)
X,Y = meshgrid(X_cor, Y_cor)
fig=figure()
ax = p3.Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
print X
print Y
print Z
ax.set_xlabel('M(1) rank')
ax.set_ylabel('M(2) rank')
ax.set_zlabel('Optimal cost')
show()
| [
[
1,
0,
0.0238,
0.0238,
0,
0.66,
0,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.0476,
0.0238,
0,
0.66,
0.0323,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0714,
0.0238,
0,
... | [
"from pylab import *",
"from time import time",
"from fs import fs",
"import numpy as np",
"import mpl_toolkits.mplot3d.axes3d as p3",
"NI = 2",
"NS = 40",
"RLB = 20",
"SZ = 30",
"c = fs()",
"c.parameters.read_file(\"../../param/stofac.prm\")",
"c.gen_inst(NI = NI, NS = NS, RLB = RLB, seed = 1... |
from time import time
from fs import fs
org_times = []
str_times = []
org_vals = []
str_vals = []
org_nodes = []
str_nodes = []
for i in range(10):
c = fs()
c.parameters.read_file("../../param/stofac.prm")
c.gen_inst(NI = 5, NS = 5, seed = i*20)
c.init_lists()
t0 = time()
c.solve()
t1 = time()
org_times += [t1 - t0]
org_vals += [c.solution.get_objective_value()]
org_nodes += [c.solution.progress.get_num_nodes_processed()]
t0 = time()
c.init_bigm()
t1 = time()
c.solve()
str_times += [t1 - t0]
str_vals += [c.solution.get_objective_value()]
str_nodes += [c.solution.progress.get_num_nodes_processed()]
print "time"
print org_times
print str_times
print "vals"
print org_vals
print str_vals
print "nodes"
print org_nodes
print str_nodes
| [
[
1,
0,
0.027,
0.027,
0,
0.66,
0,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0541,
0.027,
0,
0.66,
0.0588,
245,
0,
1,
0,
0,
245,
0,
0
],
[
14,
0,
0.1081,
0.027,
0,
0.6... | [
"from time import time",
"from fs import fs",
"org_times = []",
"str_times = []",
"org_vals = []",
"str_vals = []",
"org_nodes = []",
"str_nodes = []",
"for i in range(10):\n c = fs()\n c.parameters.read_file(\"../../param/stofac.prm\")\n c.gen_inst(NI = 5, NS = 5, seed = i*20)\n c.init_... |
import cplex
import random
class fs(cplex.Cplex):
def __init__(self):
cplex.Cplex.__init__(self) #has to be called to activa
self.NI = 1; self.NS = 1
self.I = range(self.NI); self.J = range(self.NS)
self.x_name = []; self.y_name = []; self.rhs = []
self.num_M = 0; self.M_rowname = []; self.M_colname = []; self.M_coef = []
self.M_val = []; self.M_rank = []
self.dm_lists = []; self.dm_diff_lists = []
self.RLB = 0
self.EPS = 0.00001
self.times_called = 0
self.flag = 1
def gen_inst(self, NI=3, NS=3, RLB = 2, seed = 500):
random.seed(seed)
self.NI = NI; self.NS = NS;
self.RLB = RLB
self.I = range(NI); self.S = range(NS)
self.x_name = ["x" + '_' + str(i) for i in self.I]
self.y_name = ["y" + '_' + str(s) for s in self.S]
#self.rhs = [[random.normalvariate(5,1) for i in self.I] for s in self.S]
self.rhs = [[random.uniform(3,6) for i in self.I] for s in self.S]
demand = self.rhs
#cost = [random.uniform(0,1) for i in self.I]
cost = [1]*NI
x_name = self.x_name; x_type = ["C"] * NI
x_lb = [0] * NI; x_obj = cost
y_name = self.y_name; y_type = ["B"] * NS
y_lb = [0] * NS; y_obj = [0] * NS
# Add variables in one pass
self.variables.add(
obj = x_obj + y_obj,
types = x_type + y_type,
names = x_name + y_name,
lb = x_lb + y_lb)
# Add knapsack constraints
self.linear_constraints.add(
lin_expr = [[y_name, [1]*NS]],
senses = ["L"],
rhs = [self.NS - self.RLB], #this number means the number scenario to be dropped
names = ["KP"])
# capacity constraint
self.dm_name = ["dm_" + str(i) + '_s_' +str(s) for s in self.S for i in self.I]
self.linear_constraints.add(
lin_expr = [ [ [x_name[i], y_name[s]], [1, demand[s][i]] ] for s in self.S for i in self.I],
senses = ["G"] * NI * NS,
rhs = [ demand[s][i] for s in self.S for i in self.I],
names = self.dm_name)
# set M names
self.num_M = self.NI
self.M_ind = [[[0] for s in self.S] for i in self.I]
self.M_rowname = [ ["dm_" + str(i) + '_s_' + str(s) for s in self.S] for i in self.I ]
self.M_colname = [ ['y_'+str(s) for s in self.S] for i in self.I]
self.M_coef = self.linear_constraints.get_coefficients(zip(self.M_rowname[0], self.M_colname[0]))
self.write("ccfs.lp")
self.init_lists()
def init_lists(self):
self.dm_lists = [[0.0 for s in self.S] for i in self.I]
self.dm_diff_lists = [[0.0 for s in range(self.NS - 1)] for i in self.I]
for i in self.I:
self.dm_lists[i] = sorted(self.linear_constraints.get_rhs(self.M_rowname[i]), reverse=True)
for s in range(self.NS -1 ):
# the largest minus the following rank to get the difference
self.dm_diff_lists[i][s] = self.dm_lists[i][0] - self.dm_lists[i][s+1]
def set_bigm(self, rank = [1]):
if len(rank) == 1:
self.M_rank = [rank[0] for i in self.I]
elif len(rank) == self.NI:
self.M_rank = list(rank)
else:
print "Wrong rank length"
return
self.M_val = [self.EPS + self.dm_diff_lists[i][self.M_rank[i]-1] for i in self.I]
M_val = self.M_val
print M_val
# Set bigm values
for i in self.I:
self.linear_constraints.set_coefficients(zip(self.M_rowname[i], self.M_colname[i], [M_val[i]]*self.NS ))
print "Reset bigm values"
self.write("ccfs_mod.lp")
#This is the tricky part, we need to create overloaded
#register_call function to call the register_call function
# of the cplex version to pass data structures to them,
def register_callback(self, mycallback):
cb = cplex.Cplex.register_callback(self, mycallback)
cb.fc = self
| [
[
1,
0,
0.0093,
0.0093,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0187,
0.0093,
0,
0.66,
0.5,
715,
0,
1,
0,
0,
715,
0,
0
],
[
3,
0,
0.5,
0.9346,
0,
0.66,
... | [
"import cplex",
"import random",
"class fs(cplex.Cplex):\n def __init__(self):\n cplex.Cplex.__init__(self) #has to be called to activa\n self.NI = 1; self.NS = 1\n self.I = range(self.NI); self.J = range(self.NS)\n self.x_name = []; self.y_name = []; self.rhs = []\n self.n... |
import cplex.callbacks as CPX_CB
class check_bigm_at_incumbent(CPX_CB.IncumbentCallback):
def __call__(self):
print "\n"
if self.fc.flag == 1:
#self.times_called += 1
x_val = self.get_values(self.fc.x_name)
y_val = self.get_values(self.fc.y_name)
slacks = [self.get_linear_slacks(self.fc.M_rowname[i]) for i in self.fc.I]
M_inc_list = []
print y_val
for i in self.fc.I:
if self.fc.M_rank[i] > self.fc.NS-self.fc.RLB:
continue
for s in self.fc.S:
if abs(y_val[s]) > 0.00001 and abs(slacks[i][s]) < 0.0001:
M_inc_list += [i]
break
if len(M_inc_list) == 0:
self.fc.flag = 1
print "M valid at this node", M_inc_list
else:
self.fc.flag = 0
for i in M_inc_list:
self.fc.M_rank[i] += 1
print "M too small, resolve"
#self.abort()
class check_bigm_at_branch(CPX_CB.BranchCallback):
def __call__(self):
print "\n"
if self.fc.flag == 1:
#self.times_called += 1
x_val = self.get_values(self.fc.x_name)
y_val = self.get_values(self.fc.y_name)
slacks = [self.get_linear_slacks(self.fc.M_rowname[i]) for i in self.fc.I]
M_inc_list = []
print y_val
for i in self.fc.I:
if self.fc.M_rank[i] > self.fc.NS-self.fc.RLB:
continue
for s in self.fc.S:
if abs(y_val[s]) > 0.0001 and abs(slacks[i][s]) < 0.0001:
M_inc_list += [i]
break
if len(M_inc_list) == 0:
self.fc.flag = 1
print "M valid at this node", M_inc_list
else:
self.fc.flag = 0
for i in M_inc_list:
self.fc.M_rank[i] += 1
print "M too small, resolve"
#self.abort()
| [
[
1,
0,
0.0159,
0.0159,
0,
0.66,
0,
959,
0,
1,
0,
0,
959,
0,
0
],
[
3,
0,
0.2619,
0.4444,
0,
0.66,
0.5,
461,
0,
1,
0,
0,
175,
0,
10
],
[
2,
1,
0.2698,
0.4286,
1,
0.... | [
"import cplex.callbacks as CPX_CB",
"class check_bigm_at_incumbent(CPX_CB.IncumbentCallback):\n def __call__(self):\n print(\"\\n\")\n if self.fc.flag == 1:\n #self.times_called += 1\n x_val = self.get_values(self.fc.x_name)\n y_val = self.get_values(self.fc.y_nam... |
import cplex
import random
import numpy as np
#from instances import fscplex
from ccfs.fscplex import fscplex
from ccfs.callbacks import CheckBigM
from ccfs.callbacks import Reject
# Generate instances and read in parameters
fc = fscplex()
fc.generate_instance(NI = 15, NS=15, eps=0.2, Ordered=False)
fc.parameters.read_file("../param/stofac.prm")
fc.strengthen_formulation()
fc.write("fc_stre.lp")
fc.solve()
x_val = fc.solution.get_values(fc.x_name)
y_val = fc.solution.get_values(fc.y_name)
obj_val = fc.solution.get_objective_value()
delta = 0.1
bigm_val = [0] * fc.num_bigm
for s in fc.S:
# if y_val[s] == 1.0:
for i in fc.I:
ind = i + s*fc.NI
bigm_val[ind] = 0.2
#bigm_val[ind] = max(0, fc.rhs[s][i] - x_val[i]-0.5)
fc.set_bigm(bigm_val)
fc.register_callback(CheckBigM)
#fc.register_callback(Reject)
iter = 0
# Start check and resolve procedure
while(iter < 200):
print "----------------------"
print "Iteration ", iter
print "----------------------"
iter += 1
# resolve the problem
fc.flag[0] = 1
fc.solve()
if fc.flag[0] == 1: #pass all the test
print "finish loop"
print fc.solution.progress.get_num_nodes_processed()
break
elif fc.flag[0] == 0: #binding being checked
print fc.solution.progress.get_num_nodes_processed()
#fc.increase_bigm(step_length=delta)
fc.increase_bigm(step_length=delta)
# increase the bigm here
fc.unregister_callback(CheckBigM)
#fc.solve()
fc.write("fc_bigm.lp")
print "Ture obj", obj_val
print "My obj", fc.solution.get_objective_value()
| [
[
1,
0,
0.0159,
0.0159,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0317,
0.0159,
0,
0.66,
0.04,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0476,
0.0159,
0,
0.... | [
"import cplex",
"import random",
"import numpy as np",
"from ccfs.fscplex import fscplex",
"from ccfs.callbacks import CheckBigM",
"from ccfs.callbacks import Reject",
"fc = fscplex()",
"fc.generate_instance(NI = 15, NS=15, eps=0.2, Ordered=False)",
"fc.parameters.read_file(\"../param/stofac.prm\")"... |
import cplex
import random
import sys
from time import time
f1 = sys.argv[1]
f2 = sys.argv[2]
c = cplex.Cplex()
c.parameters.read_file("../../param/stofac.prm")
c.read(f1)
t0 = time()
c.solve()
t1 = time()
print "time is ", t1 - t0
print "solution is", c.solution.get_objective_value()
print "nodes is ", c.solution.progress.get_num_nodes_processed()
print
c.read(f2)
t0 = time()
c.solve()
t1 = time()
print "time is ", t1 - t0
print "solution is", c.solution.get_objective_value()
print "nodes is ", c.solution.progress.get_num_nodes_processed()
| [
[
1,
0,
0.04,
0.04,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.08,
0.04,
0,
0.66,
0.0476,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.12,
0.04,
0,
0.66,
0.... | [
"import cplex",
"import random",
"import sys",
"from time import time",
"f1 = sys.argv[1]",
"f2 = sys.argv[2]",
"c = cplex.Cplex()",
"c.parameters.read_file(\"../../param/stofac.prm\")",
"c.read(f1)",
"t0 = time()",
"c.solve()",
"t1 = time()",
"print(\"time is \", t1 - t0)",
"print(\"solut... |
#This is a script that used on the command line to
# run .lp files
# The advantage is to be able to change the parameters
# directly from .lp file
import cplex
c=cplex.Cplex()
c.read("ccfs_new.lp")
c.parameters.read_file("../../param/stofac.prm")
c.solve()
print c.solution.get_objective_value()
print c.solution.get_values()
print c.variables.get_names()
print c.solution.get_linear_slacks()
print c.solution.basis.get_basis()
| [
[
1,
0,
0.3529,
0.0588,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
14,
0,
0.4706,
0.0588,
0,
0.66,
0.1111,
411,
3,
0,
0,
0,
890,
10,
1
],
[
8,
0,
0.5294,
0.0588,
0,
... | [
"import cplex",
"c=cplex.Cplex()",
"c.read(\"ccfs_new.lp\")",
"c.parameters.read_file(\"../../param/stofac.prm\")",
"c.solve()",
"print(c.solution.get_objective_value())",
"print(c.solution.get_values())",
"print(c.variables.get_names())",
"print(c.solution.get_linear_slacks())",
"print(c.solution... |
import cplex
import cplex.callbacks as CPX_CB
import random
import numpy as np
#from instances import fscplex
from fscplex import fscplex
from callbacks import CheckBounds
NI = 30
NS = 30
step = 0.1
# Generate instances and read in parameters
fc = fscplex()
fc.generate_instance(NI, NS)
fc.write("fc_org.lp")
fc.parameters.read_file('../param/stofac.prm')
# Solve the original model
fc.solve()
true_v_sol = fc.solution.get_values(fc.v_name)
true_obj_val = fc.solution.get_objective_value()
true_num_nodes = fc.solution.progress.get_num_nodes_processed()
fc.get_hard_v_lower_bounds()
# Set initial bounds for v
fc.v_lb = [13]*NI
#fc.v_lb = [true_v_sol[i] + 0.1 for i in fc.I]
fc.variables.set_lower_bounds(zip(fc.v_name, fc.v_lb))
# Pass data structures to the callback function
# use the overrided register_callback function
fc.register_callback(CheckBounds)
iter = 0
# Start check and resolve procedure
while(iter < 200):
print "----------------------"
print "Iteration ", iter
print "----------------------"
iter += 1
# resolve the problem
fc.solve()
if fc.flag[0] == 1: #pass all the test
print "finish loop"
print fc.solution.progress.get_num_nodes_processed()
break
elif fc.flag[0] == 0: #binding being checked
# decreate the lower bound for v
print fc.solution.progress.get_num_nodes_processed()
fc.variables.set_lower_bounds(zip(fc.v_name, fc.v_lb))
"""
else: # at the root node integer solution is found
v_lb = [v_lb[i] - step for i in I]
c.variables.set_lower_bounds(zip(v_name, v_lb))
print c.variables.get_lower_bounds(v_name)
"""
fc.write("fc.lp")
print "\n------------------------"
print "last lower bound of v"
print "------------------------"
print np.array(fc.variables.get_lower_bounds(fc.v_name))
print "\n------------------------"
print "last v values"
print "------------------------"
print np.array(fc.solution.get_values(fc.v_name))
print "\n------------------------"
print "true v values"
print "------------------------"
print np.array(true_v_sol)
print "\n------------------------"
print "last objective value"
print "------------------------"
print fc.solution.get_objective_value()
print "\n------------------------"
print "true objective value"
print "------------------------"
print true_obj_val
print true_num_nodes
| [
[
1,
0,
0.0116,
0.0116,
0,
0.66,
0,
287,
0,
1,
0,
0,
287,
0,
0
],
[
1,
0,
0.0233,
0.0116,
0,
0.66,
0.0222,
959,
0,
1,
0,
0,
959,
0,
0
],
[
1,
0,
0.0349,
0.0116,
0,
... | [
"import cplex",
"import cplex.callbacks as CPX_CB",
"import random",
"import numpy as np",
"from fscplex import fscplex",
"from callbacks import CheckBounds",
"NI = 30",
"NS = 30",
"step = 0.1",
"fc = fscplex()",
"fc.generate_instance(NI, NS)",
"fc.write(\"fc_org.lp\")",
"fc.parameters.read_... |
# Farmer: rent out version has a singleton root node var
# note: this will minimize
#
# Imports
#
from coopr.pyomo import *
#
# Model
#
model = AbstractModel()
#
# Parameters
#
model.CROPS = Set()
model.TOTAL_ACREAGE = Param(within=PositiveReals)
model.PriceQuota = Param(model.CROPS, within=PositiveReals)
model.SubQuotaSellingPrice = Param(model.CROPS, within=PositiveReals)
def super_quota_selling_price_validate (model, value, i):
return model.SubQuotaSellingPrice[i] >= model.SuperQuotaSellingPrice[i]
model.SuperQuotaSellingPrice = Param(model.CROPS, validate=super_quota_selling_price_validate)
model.CattleFeedRequirement = Param(model.CROPS, within=NonNegativeReals)
model.PurchasePrice = Param(model.CROPS, within=PositiveReals)
model.PlantingCostPerAcre = Param(model.CROPS, within=PositiveReals)
model.Yield = Param(model.CROPS, within=NonNegativeReals)
#
# Variables
#
model.DevotedAcreage = Var(model.CROPS, bounds=(0.0, model.TOTAL_ACREAGE))
model.QuantitySubQuotaSold = Var(model.CROPS, bounds=(0.0, None))
model.QuantitySuperQuotaSold = Var(model.CROPS, bounds=(0.0, None))
model.QuantityPurchased = Var(model.CROPS, bounds=(0.0, None))
model.FirstStageCost = Var()
model.SecondStageCost = Var()
#
# Constraints
#
def ConstrainTotalAcreage_rule(model):
return summation(model.DevotedAcreage) <= model.TOTAL_ACREAGE
model.ConstrainTotalAcreage = Constraint(rule=ConstrainTotalAcreage_rule)
def EnforceCattleFeedRequirement_rule(model, i):
return model.CattleFeedRequirement[i] <= (model.Yield[i] * model.DevotedAcreage[i]) + model.QuantityPurchased[i] - model.QuantitySubQuotaSold[i] - model.QuantitySuperQuotaSold[i]
model.EnforceCattleFeedRequirement = Constraint(model.CROPS)
def LimitAmountSold_rule(model, i):
return model.QuantitySubQuotaSold[i] + model.QuantitySuperQuotaSold[i] - (model.Yield[i] * model.DevotedAcreage[i]) <= 0.0
model.LimitAmountSold = Constraint(model.CROPS)
def EnforceQuotas_rule(model, i):
return (0.0, model.QuantitySubQuotaSold[i], model.PriceQuota[i])
model.EnforceQuotas = Constraint(model.CROPS)
#
# Stage-specific cost computations
#
def ComputeFirstStageCost_rule(model):
return model.FirstStageCost - summation(model.PlantingCostPerAcre, model.DevotedAcreage) == 0.0
model.ComputeFirstStageCost = Constraint()
def ComputeSecondStageCost_rule(model):
expr = summation(model.PurchasePrice, model.QuantityPurchased)
expr -= summation(model.SubQuotaSellingPrice, model.QuantitySubQuotaSold)
expr -= summation(model.SuperQuotaSellingPrice, model.QuantitySuperQuotaSold)
return (model.SecondStageCost - expr) == 0.0
model.ComputeSecondStageCost = Constraint()
#
# Objective
#
def Total_Cost_Objective_rule(model):
return model.FirstStageCost + model.SecondStageCost
model.Total_Cost_Objective = Objective(sense=minimize)
| [
[
1,
0,
0.0686,
0.0098,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
14,
0,
0.1275,
0.0098,
0,
0.66,
0.0323,
722,
3,
0,
0,
0,
338,
10,
1
],
[
14,
0,
0.1863,
0.0098,
0,
... | [
"from coopr.pyomo import *",
"model = AbstractModel()",
"model.CROPS = Set()",
"model.TOTAL_ACREAGE = Param(within=PositiveReals)",
"model.PriceQuota = Param(model.CROPS, within=PositiveReals)",
"model.SubQuotaSellingPrice = Param(model.CROPS, within=PositiveReals)",
"def super_quota_selling_price_valid... |
# Imports
from coopr.pyomo import *
from coopr.opt import SolverFactory
from ReferenceModel import model
import numpy
# Solve EV for given number of sample realizations with fixed X at X_EV
numSamples = 500
numX=5
optVal=numpy.array([0 for i in range(numSamples)])
# Choose the solver
opt = SolverFactory('gurobi')
# See the result from part b
EV_X = [9.2, 23.45, 5.1, 8.3, 4.95]
# Iterate through all samples
for i in range(numSamples):
datafile = './scenariodata/Scenario' + str(i+1) + '.dat'
instance = model.create(datafile)
# Fix values of x at x_EV
for j in range(numX):
instance.X[j+1] = EV_X[j]
instance.X[j+1].fixed = True
instance.preprocess()
# Solve the instance
results = opt.solve(instance)
print "Solve" + str(i) + "th instance"
#print instance.X.extract_values()
instance.load(results)
optVal[i] = value(instance.TotalProfit)
# Point estimate
EEV = optVal[:].mean()
# Interval estimate
z_alpha = 1.96
EEV_var = optVal[:].var()*numSamples/(numSamples-1)
EEV_halfwidth = z_alpha * sqrt(EEV_var/numSamples)
EEV_CI_lo = EEV - EEV_halfwidth
EEV_CI_hi = EEV + EEV_halfwidth
print EEV
print EEV_CI_lo, EEV_CI_hi
| [
[
1,
0,
0.0417,
0.0208,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
1,
0,
0.0625,
0.0208,
0,
0.66,
0.0588,
599,
0,
1,
0,
0,
599,
0,
0
],
[
1,
0,
0.0833,
0.0208,
0,
... | [
"from coopr.pyomo import *",
"from coopr.opt import SolverFactory",
"from ReferenceModel import model",
"import numpy",
"numSamples = 500",
"numX=5",
"optVal=numpy.array([0 for i in range(numSamples)])",
"opt = SolverFactory('gurobi')",
"EV_X = [9.2, 23.45, 5.1, 8.3, 4.95]",
"for i in range(numSam... |
# Imports
from coopr.pyomo import *
from coopr.opt import SolverFactory
from ReferenceModel import model
import numpy
# Solve WS for given number of sample realizations with fixed X at X_WS
numSamples = 500
numX = 5
optVal = numpy.array([0 for i in range(numSamples)])
# Choose the solver
opt = SolverFactory('gurobi')
# See the result from part c
WS_X = [9.2, 21.66, 5.00, 9.59, 5.49]
for i in range(numSamples):
datafile = './scenariodata/Scenario' + str(i+1) + '.dat'
instance = model.create(datafile)
# Fix values of x at x_WS
for j in range(numX):
instance.X[j+1] = WS_X[j]
instance.X[j+1].fixed = True
instance.preprocess()
# Solve the instance
results = opt.solve(instance)
print "Solve" + str(i) + "th instance"
instance.load(results)
optVal[i] = value(instance.TotalProfit)
# Point estimate
EWS = optVal[:].mean()
# Interval estimate
z_val = 1.96
EWS_var = optVal[:].var()*numSamples/(numSamples-1)
EWS_halfwidth = z_val*sqrt(EWS_var/numSamples)
EWS_CI_lo = EWS - EWS_halfwidth
EWS_CI_hi = EWS + EWS_halfwidth
print EWS
print EWS_CI_lo, EWS_CI_hi
| [
[
1,
0,
0.0435,
0.0217,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
1,
0,
0.0652,
0.0217,
0,
0.66,
0.0588,
599,
0,
1,
0,
0,
599,
0,
0
],
[
1,
0,
0.087,
0.0217,
0,
0... | [
"from coopr.pyomo import *",
"from coopr.opt import SolverFactory",
"from ReferenceModel import model",
"import numpy",
"numSamples = 500",
"numX = 5",
"optVal = numpy.array([0 for i in range(numSamples)])",
"opt = SolverFactory('gurobi')",
"WS_X = [9.2, 21.66, 5.00, 9.59, 5.49]",
"for i in range(... |
#This file estimate EV
# Imports
from coopr.pyomo import *
from coopr.opt import SolverFactory
import numpy
from ReferenceModelBase import model
# Solve WS for given number of sample realizations
numSamples=1
numX = 5
optVal = numpy.array ([0 for i in range(numSamples)])
optSoln = numpy.array([[0 for i in range(numSamples)] for j in range(numX)])
opt = SolverFactory('gurobi')
for i in range(numSamples):
datafile = './scenariodata/Scenario' + str(i+1) + '.dat'
instance = model.create(datafile)
results = opt.solve(instance)
print "Solve" + str(i) + "th instance"
instance.load(results)
optVal[i] = value(instance.TotalProfit)
## for j in range(numX):
# optSoln[j][i] = value(instance.X[j+1])
# Calculate point est / interval est of objective value, point estimate of solution
z_val = 1.96
WS = optVal[:].mean()
WS_var = optVal[:].var() * numSamples/(numSamples-1)
WS_halfwidth = z_val*sqrt(WS_var/numSamples)
WS_CI_lo = WS - WS_halfwidth
WS_CI_hi = WS + WS_halfwidth
#X_WS = [0 for j in range(numX)]
#for j in range(numX):
# X_WS[j] = optSoln[j][:].mean()
print WS
print WS_CI_lo, WS_CI_hi
#print X_WS
| [
[
1,
0,
0.08,
0.02,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
1,
0,
0.1,
0.02,
0,
0.66,
0.0588,
599,
0,
1,
0,
0,
599,
0,
0
],
[
1,
0,
0.12,
0.02,
0,
0.66,
0.1... | [
"from coopr.pyomo import *",
"from coopr.opt import SolverFactory",
"import numpy",
"from ReferenceModelBase import model",
"numSamples=1",
"numX = 5",
"optVal = numpy.array ([0 for i in range(numSamples)])",
"optSoln = numpy.array([[0 for i in range(numSamples)] for j in range(numX)])",
"opt = Sol... |
#This file estimate EV
# Imports
from coopr.pyomo import *
from coopr.opt import SolverFactory
from ReferenceModel import model
import numpy
# Solve WS for given number of sample realizations
numSamples=20
numX = 5
optVal = numpy.array ([0 for i in range(numSamples)])
optSoln = numpy.array([[0 for i in range(numSamples)] for j in range(numX)])
opt = SolverFactory('gurobi')
for i in range(numSamples):
datafile = './scenariodata/Scenario' + str(i+1) + '.dat'
instance = model.create(datafile)
results = opt.solve(instance)
print "Solve" + str(i) + "th instance"
instance.load(results)
optVal[i] = value(instance.TotalProfit)
for j in range(numX):
optSoln[j][i] = value(instance.X[j+1])
# Calculate point est / interval est of objective value, point estimate of solution
z_alpha = 1.96
WS = optVal[:].mean()
WS_var = optVal[:].var() * numSamples/(numSamples-1)
WS_halfwidth = z_alpha * sqrt(WS_var/numSamples)
WS_CI_lo = WS - WS_halfwidth
WS_CI_hi = WS + WS_halfwidth
X_WS = [0 for j in range(numX)]
for j in range(numX):
X_WS[j] = optSoln[j][:].mean()
print WS
print WS_CI_lo, WS_CI_hi
print X_WS
| [
[
1,
0,
0.0612,
0.0204,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
1,
0,
0.0816,
0.0204,
0,
0.66,
0.05,
599,
0,
1,
0,
0,
599,
0,
0
],
[
1,
0,
0.102,
0.0204,
0,
0.6... | [
"from coopr.pyomo import *",
"from coopr.opt import SolverFactory",
"from ReferenceModel import model",
"import numpy",
"numSamples=20",
"numX = 5",
"optVal = numpy.array ([0 for i in range(numSamples)])",
"optSoln = numpy.array([[0 for i in range(numSamples)] for j in range(numX)])",
"opt = Solver... |
# Vehicle Routing Problem
# Imports
from coopr.pyomo import *
from coopr.opt import SolverFactory
# Model
model = AbstractModel()
# Sets
model.I = Set() #node
model.J = Set() #node
model.S = Set() #source node
model.D = Set() #demand node
# Data
model.Arc = Param(model.I, model.J) #arc available
model.Rev = Param(model.I, model.J) #arc revenue
model.Cost = Param(model.I, model.J) #arc cost
model.N = Param()
# Random
model.ArcDemand = Param(model.I, model.J) #arc demand
# Variables
model.X = Var(model.S, bounds=(0.0, None))
model.Y = Var(model.I, model.J, bounds=(0.0, model.N))
model.Z = Var(model.I, model.J, bounds=(0.0, None))
model.FirstStageProfit = Var()
model.SecondStageProfit = Var()
# Constraints
def vehicle_num_cap_rule(model):
return sum(model.X[s] for s in model.S) == model.N
model.VehicleNumCapRule = Constraint(rule=vehicle_num_cap_rule)
def source_balance_rule(model, s):
return sum(model.Y[s,j] for j in model.J if model.Arc[s,j]>=1) == model.X[s]
model.SourceBalanceRule = Constraint(model.S, rule=source_balance_rule)
def flow_balance_rule(model, d):
return (sum(model.Y[i,d] for i in model.I if model.Arc[i,d]>=1)
- sum(model.Y[d,i] for i in model.I if model.Arc[d,i]>=1)) == 0.0
model.FlowBalanceRule = Constraint(model.D, rule=flow_balance_rule)
def extra_routing_rule(model,i,j):
return model.Y[i,j] - model.ArcDemand[i,j] <= model.Z[i,j]
model.ExtraRoutingRule = Constraint(model.I, model.J, rule=extra_routing_rule)
def y_bound_rule(model,i,j):
return (0.0, model.Y[i,j], model.Arc[i,j] * 51)
model.YBoundRule = Constraint(model.I, model.J, rule=y_bound_rule)
# Stage-specific cost
def first_stage_profit_rule(model):
return model.FirstStageProfit == 0.0
model.GetFirstStageProfit = Constraint(rule=first_stage_profit_rule)
def second_stage_profit_rule(model):
return model.SecondStageProfit == \
sum(sum(model.Rev[i,j] * model.Y[i,j] -
(model.Rev[i,j] + model.Cost[i,j])* model.Z[i,j]
for i in model.I) for j in model.J)
model.GetSecondStageProfit = Constraint(rule=second_stage_profit_rule)
# Objective
def total_profit_rule(model):
return (model.FirstStageProfit + model.SecondStageProfit)
model.TotalProfit = Objective(rule=total_profit_rule, sense=maximize)
# Solve and print
opt = SolverFactory('gurobi')
EV_instance = model.create('ReferenceModel.dat')
EV_results = opt.solve(EV_instance)
EV_instance.load(EV_results)
numX = len(EV_instance.X.keys())
EV = value(EV_instance.TotalProfit)
print('EV = ' + str(EV) + '\n')
for i in range(numX):
print 'X' + str(i) + ':',
print value(EV_instance.X[i+1])
| [
[
1,
0,
0.044,
0.011,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
1,
0,
0.0549,
0.011,
0,
0.66,
0.025,
599,
0,
1,
0,
0,
599,
0,
0
],
[
14,
0,
0.0879,
0.011,
0,
0.66... | [
"from coopr.pyomo import *",
"from coopr.opt import SolverFactory",
"model = AbstractModel()",
"model.I = Set() #node",
"model.J = Set() #node",
"model.S = Set() #source node",
"model.D = Set() #demand node",
"model.Arc = Param(model.I, model.J) #arc available",
"model.Rev = Param(model.I, model.J... |
# Imports
from coopr.pyomo import *
from coopr.opt import SolverFactory
import ReferenceModelBase
import numpy
"""
# Model
model = AbstractModel()
# Sets
model.I = Set() #node
model.J = Set() #node
model.S = Set() #source node
model.D = Set() #demand node
# Data
model.Arc = Param(model.I, model.J) #arc available
model.Rev = Param(model.I, model.J) #arc revenue
model.Cost = Param(model.I, model.J) #arc cost
model.B = Param()
# Random
model.ArcDemand = Param(model.I, model.J) #arc demand
# Variables
model.X = Var(model.S, bounds=(0.0, None))
model.Y = Var(model.I, model.J, bounds=(0.0, model.B))
model.Z = Var(model.I, model.J, bounds=(0.0, None))
model.FirstStageProfit = Var()
model.SecondStageProfit = Var()
# Constraints
def vehicle_num_cap_rule(model):
return sum(model.X[s] for s in model.S) == model.B
model.VehicleNumCapRule = Constraint(rule=vehicle_num_cap_rule)
def source_balance_rule(model, s):
return sum(model.Y[s,j] for j in model.J if model.Arc[s,j]>=1) == model.X[s]
model.SourceBalanceRule = Constraint(model.S, rule=source_balance_rule)
def flow_balance_rule(model, d):
return (sum(model.Y[i,d] for i in model.I if model.Arc[i,d]>=1)
- sum(model.Y[d,i] for i in model.I if model.Arc[d,i]>=1)) == 0.0
model.FlowBalanceRule = Constraint(model.D, rule=flow_balance_rule)
def extra_routing_rule(model,i,j):
return model.Y[i,j] - model.ArcDemand[i,j] <= model.Z[i,j]
model.ExtraRoutingRule = Constraint(model.I, model.J, rule=extra_routing_rule)
def y_bound_rule(model,i,j):
return (0.0, model.Y[i,j], model.Arc[i,j] * 51)
model.YBoundRule = Constraint(model.I, model.J, rule=y_bound_rule)
# Stage-specific cost
def first_stage_profit_rule(model):
return model.FirstStageProfit == 0.0
model.GetFirstStageProfit = Constraint(rule=first_stage_profit_rule)
def second_stage_profit_rule(model):
return model.SecondStageProfit == \
sum(sum(model.Rev[i,j] * model.Y[i,j] -
(model.Rev[i,j] + model.Cost[i,j])* model.Z[i,j]
for i in model.I) for j in model.J)
model.GetSecondStageProfit = Constraint(rule=second_stage_profit_rule)
# Objective
def total_profit_rule(model):
return (model.FirstStageProfit + model.SecondStageProfit)
model.TotalProfit = Objective(rule=total_profit_rule, sense=maximize)
"""
# Solve WS for given number of sample realizations
numSamples=10
numX = 5
optVal = numpy.array ([0 for i in range(numSamples)])
optSoln = numpy.array([[0 for i in range(numSamples)] for j in range(numX)])
opt = SolverFactory('gurobi')
for i in range(numSamples):
datafile = '../scenariodata/Scenario' + str(i+1) + '.dat'
instance = model.create(datafile)
results = opt.solve(instance)
print "Solve" + str(i) + "th instance"
instance.load(results)
optVal[i] = value(instance.TotalProfit)
## for j in range(numX):
# optSoln[j][i] = value(instance.X[j+1])
# Calculate point est / interval est of objective value, point estimate of solution
z_val = 1.96
WS = optVal[:].mean()
WS_var = optVal[:].var() * numSamples/(numSamples-1)
WS_halfwidth = z_val*sqrt(WS_var/numSamples)
WS_CI_lo = WS - WS_halfwidth
WS_CI_hi = WS + WS_halfwidth
#X_WS = [0 for j in range(numX)]
#for j in range(numX):
# X_WS[j] = optSoln[j][:].mean()
print WS
print WS_CI_lo, WS_CI_hi
#print X_WS
| [
[
1,
0,
0.0179,
0.0089,
0,
0.66,
0,
594,
0,
1,
0,
0,
594,
0,
0
],
[
1,
0,
0.0268,
0.0089,
0,
0.66,
0.0556,
599,
0,
1,
0,
0,
599,
0,
0
],
[
1,
0,
0.0357,
0.0089,
0,
... | [
"from coopr.pyomo import *",
"from coopr.opt import SolverFactory",
"import ReferenceModelBase",
"import numpy",
"\"\"\"\n# Model\nmodel = AbstractModel()\n\n# Sets\nmodel.I = Set() #node\nmodel.J = Set() #node\nmodel.S = Set() #source node",
"numSamples=10",
"numX = 5",
"optVal = numpy.array ([0 for... |
#! /usr/bin/python
import glob
import shutil
import sys
import subprocess
import os
def cpfile(src, target):
sys.stdout.write("Copying %s to %s\n" % (src, target))
shutil.copy(src, target)
# We only copy the armeabi version of the binary
archs = ["armeabi"]
for arch in archs:
try:
os.makedirs("../cryptonite/assets/%s" % arch)
except os.error:
pass
# Split into 1M chunks for Android <= 2.2:
# encfs
p = subprocess.Popen("/usr/bin/split -b 1m encfs encfs.split",
cwd="./%s/bin" % arch,
shell=True)
p.wait()
splitfiles = glob.glob("./%s/bin/encfs.split*" % arch)
print splitfiles
for splitfile in splitfiles:
cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
# encfsctl
# p = subprocess.Popen("/usr/bin/split -b 1m encfsctl encfsctl.split",
# cwd="./%s/bin" % arch,
# shell=True)
# p.wait()
# splitfiles = glob.glob("./%s/bin/encfsctl.split*" % arch)
# print splitfiles
# for splitfile in splitfiles:
# cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
| [
[
1,
0,
0.0682,
0.0227,
0,
0.66,
0,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.0909,
0.0227,
0,
0.66,
0.1429,
614,
0,
1,
0,
0,
614,
0,
0
],
[
1,
0,
0.1136,
0.0227,
0,
... | [
"import glob",
"import shutil",
"import sys",
"import subprocess",
"import os",
"def cpfile(src, target):\n sys.stdout.write(\"Copying %s to %s\\n\" % (src, target))\n shutil.copy(src, target)",
" sys.stdout.write(\"Copying %s to %s\\n\" % (src, target))",
" shutil.copy(src, target)",
"a... |
#! /usr/bin/python
import glob
import shutil
import sys
import subprocess
import os
def cpfile(src, target):
sys.stdout.write("Copying %s to %s\n" % (src, target))
shutil.copy(src, target)
# We only copy the armeabi version of the binary
archs = ["armeabi"]
for arch in archs:
try:
os.makedirs("../cryptonite/assets/%s" % arch)
except os.error:
pass
# Split into 1M chunks for Android <= 2.2:
# encfs
p = subprocess.Popen("/usr/bin/split -b 1m encfs encfs.split",
cwd="./encfs-1.7.4/%s/bin" % arch,
shell=True)
p.wait()
splitfiles = glob.glob("./encfs-1.7.4/%s/bin/encfs.split*" % arch)
print splitfiles
for splitfile in splitfiles:
cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
# encfsctl
# p = subprocess.Popen("/usr/bin/split -b 1m encfsctl encfsctl.split",
# cwd="./encfs-1.7.4/%s/bin" % arch,
# shell=True)
# p.wait()
# splitfiles = glob.glob("./encfs-1.7.4/%s/bin/encfsctl.split*" % arch)
# print splitfiles
# for splitfile in splitfiles:
# cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
| [
[
1,
0,
0.0682,
0.0227,
0,
0.66,
0,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.0909,
0.0227,
0,
0.66,
0.1429,
614,
0,
1,
0,
0,
614,
0,
0
],
[
1,
0,
0.1136,
0.0227,
0,
... | [
"import glob",
"import shutil",
"import sys",
"import subprocess",
"import os",
"def cpfile(src, target):\n sys.stdout.write(\"Copying %s to %s\\n\" % (src, target))\n shutil.copy(src, target)",
" sys.stdout.write(\"Copying %s to %s\\n\" % (src, target))",
" shutil.copy(src, target)",
"a... |
#! /usr/bin/python
import glob
import shutil
import sys
import subprocess
import os
if 'linux' in sys.platform:
platform = 'linux'
else:
platform = 'darwin'
toolchain = "%s/android-toolchain" % os.getenv("HOME")
openssl_version = "1.0.0k"
encfs_version = "1.7.4"
def cpfile(src, target):
sys.stdout.write("Copying %s to %s\n" % (src, target))
shutil.copy(src, target)
archs = ["armeabi","armeabi-v7a"]
if encfs_version != "svn":
encfs_dir = "encfs-%s/encfs-%s" % (encfs_version, encfs_version)
else:
encfs_dir = "encfs-svn"
for arch in archs:
try:
os.makedirs("./obj/local/%s" % arch)
except os.error:
pass
target_dir = "./obj/local/%s/" % arch
if encfs_version != "svn":
cpfile("../boost/boost_1_46_1/android/lib/libboost_filesystem.a", target_dir)
cpfile("../boost/boost_1_46_1/android/lib/libboost_serialization.a", target_dir)
cpfile("../boost/boost_1_46_1/android/lib/libboost_system.a", target_dir)
else:
cpfile("../protobuf/protobuf-2.4.1/%s/lib/libprotobuf.a" % arch, target_dir)
cpfile("../tinyxml/tinyxml/%s/libtinyxml.a" % arch, target_dir)
cpfile("../fuse28/obj/local/%s/libfuse.a" % arch, target_dir)
cpfile("../rlog/rlog-1.4/%s/lib/librlog.a" % arch, target_dir)
cpfile("../%s/%s/lib/libencfs.a" % (encfs_dir, arch), target_dir)
cpfile("../openssl/openssl-%s/%s/libssl.a" % (openssl_version, arch), target_dir)
cpfile("../openssl/openssl-%s/%s/libcrypto.a" % (openssl_version, arch), target_dir)
if arch=="armeabi":
arch_subdir = ""
elif arch == "armeabi-v7a":
arch_subdir = "armv7-a/"
cpfile("%s/arm-linux-androideabi/lib/%slibstdc++.a" % (toolchain, arch_subdir), target_dir)
cpfile("%s/lib/gcc/arm-linux-androideabi/4.6/%slibgcc.a" % (toolchain, arch_subdir), target_dir)
arch = "armeabi"
try:
os.makedirs("./assets/%s" % arch)
except os.error:
pass
# Split into 1M chunks for Android <= 2.2:
# truecrypt
p = subprocess.Popen("/usr/bin/split -b 1m truecrypt truecrypt.split",
cwd="../tc/truecrypt-7.1a-source/Main",
shell=True)
p.wait()
splitfiles = glob.glob("../tc/truecrypt-7.1a-source/Main/truecrypt.split*")
print(splitfiles)
for splitfile in splitfiles:
cpfile(splitfile, "./assets/%s/" % arch)
| [
[
1,
0,
0.0405,
0.0135,
0,
0.66,
0,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.0541,
0.0135,
0,
0.66,
0.0526,
614,
0,
1,
0,
0,
614,
0,
0
],
[
1,
0,
0.0676,
0.0135,
0,
... | [
"import glob",
"import shutil",
"import sys",
"import subprocess",
"import os",
"if 'linux' in sys.platform:\n platform = 'linux'\nelse:\n platform = 'darwin'",
" platform = 'linux'",
" platform = 'darwin'",
"toolchain = \"%s/android-toolchain\" % os.getenv(\"HOME\")",
"openssl_version... |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| [
[
8,
0,
0.1875,
0.0081,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1976,
0.004,
0,
0.66,
0.0833,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2056,
0.004,
0,
0.66,
... | [
"\"\"\"Google Code file uploader script.\n\"\"\"",
"__author__ = 'danderson@google.com (David Anderson)'",
"import httplib",
"import os.path",
"import optparse",
"import getpass",
"import base64",
"import sys",
"def upload(file, project_name, user_name, password, summary, labels=None):\n \"\"\"Uplo... |
# -*- coding: utf-8 -*-
from ca import *
# Nombre: Claves Candidatas Creator/dor, aka clcncr)
# Funcion que toma un conjunto de cierres de atributos y es esquema universal
# y devuelve un NUEVO conjunto de claves candidatas.
# Requires:
# R = esquema universal (Set)
# LCA = List. de Cierre atributos (Set of tubples => (attr's, cierre))
# Returns:
# LCC = List. de Claves Candidatas
def getCCC(R, LCA):
LCC = []
for cla in LCA:
# verificamos si el cierre (ca[1]) es igual a R
if cla.am <= R <= cla.am:
#si son iguales => la agregamos al conjunto
# agregamos una copia por las dudas...
LCC.append(cla.a.copy())
return LCC
| [
[
1,
0,
0.0952,
0.0476,
0,
0.66,
0,
749,
0,
1,
0,
0,
749,
0,
0
],
[
2,
0,
0.8095,
0.4286,
0,
0.66,
1,
992,
0,
2,
1,
0,
0,
0,
2
],
[
14,
1,
0.6667,
0.0476,
1,
0,
... | [
"from ca import *",
"def getCCC(R, LCA):\n\tLCC = []\n\tfor cla in LCA:\n\t\t# verificamos si el cierre (ca[1]) es igual a R\n\t\tif cla.am <= R <= cla.am:\n\t\t\t#si son iguales => la agregamos al conjunto\n\t\t\t# agregamos una copia por las dudas...\n\t\t\tLCC.append(cla.a.copy())",
"\tLCC = []",
"\tfor cl... |
# -*- coding: utf-8 -*-
from df import *
from ca import *
def cierreAtributos (f, eu):
"""Usando el conjunto de d.f. y el esquema universal construimos el
cierre de atributos de cada atributo. Lo representaremos con un TAD
que es un par ordenado, donde la primer componente ('a') es el
atributo considerado, y la segunda ('am') es el cierre del mismo """
cierreAtr = set ()
""" Como nuestras dependencias funcionales tienen todas un único
atributo a la izquierda, tomamos cada atributo del esquema universal
y calculamos su cierre, en lugar de considerar cada posible
subconjunto de atributos del esquema. """
for atr in eu:
stop = False
cierreSetAtr = set([atr])
while (not stop):
stop = True
for d in f:
if d.alfa.issubset(cierreSetAtr):
temp = cierreSetAtr | d.beta
if not (cierreSetAtr == temp):
stop = False
cierreSetAtr = temp
"""Tenemos el cierre del conjunto de atributos set(atr).
Lo guardamos en cierreAtr """
cierreAtr.add(ca(set([atr]),cierreSetAtr))
return cierreAtr
def cierreAtributosAlfa (alfa,f):
""" Calcula el cierre del atributo 'alfa' dado un conj. 'f' de d.f."""
cierreAtr = alfa.copy()
stop = False
while (not stop):
stop = True
for d in f:
if d.alfa.issubset(cierreAtr):
temp = cierreAtr | d.beta
if cierreAtr != temp:
stop = False
cierreAtr |= d.beta
return cierreAtr
def elimTrivial (cierreAtributos):
""" Elimina las trivialidades de cada cierre """
global cierresDic
cierresDic = {}
for cierre in cierreAtributos:
if cierre.am-cierre.a != set([]): # si no es puramente trivial
cierresDic.setdefault(frozenset(cierre.a.copy()),cierre.am-cierre.a)
# si era puramente trivial ni siquiera lo agregamos
return cierresDic
def genDep (cierreAtr):
""" Con el conjunto de cierres de atributos construimos las d.f. """
Fprima = set()
""" Como queremos tener a->b,c en vez de a->b y a->c => no hace falta
obtener todos los subconjuntos del cierre de un atributo. Podemos ver
las tuplas del conjunto cierreAtr como las d.f. del conjunto F'. """
for cierre in cierreAtr:
Fprima.add(df(cierre,cierreAtr[cierre]))
printFprima(Fprima)
return Fprima
""" Funciones de impresión """
def printCierre (cierreAtr):
print "Cierre de atributos:\n{"
for cierre in cierreAtr:
print "(" + str(cierre.a) + " , " + str(cierre.am) + "),\n"
print "}"
def printElimTrivial (cierresDic):
print "Cierre de atributos sin trivialidades:\n{"
for cierre in cierresDic:
print "(" + str(cierre) + " , " + str(cierresDic[cierre]) + "), \n"
print "}"
def printFprima (Fprima):
print "F':\n{"
for dep in Fprima:
print "(",
for char in dep.alfa:
print str(char),
print " , ",
for char in dep.beta:
print str(char),
print "), \n"
print "}"
| [
[
1,
0,
0.0288,
0.0096,
0,
0.66,
0,
411,
0,
1,
0,
0,
411,
0,
0
],
[
1,
0,
0.0385,
0.0096,
0,
0.66,
0.1111,
749,
0,
1,
0,
0,
749,
0,
0
],
[
2,
0,
0.1923,
0.2788,
0,
... | [
"from df import *",
"from ca import *",
"def cierreAtributos (f, eu):\n\t\"\"\"Usando el conjunto de d.f. y el esquema universal construimos el\n\t cierre de atributos de cada atributo. Lo representaremos con un TAD\n\t que es un par ordenado, donde la primer componente ('a') es el\n\t atributo considerad... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# df.py
#
# Copyright 2009 Drasky Vanderhoff <drasky@drasky-laptop>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from sys import *
class df:
alfa = set()
beta = set()
def __hash__(self):
return 0
def __repr__(self):
rep = "("
for elem in self.alfa:
rep += elem+' '
rep += "-->"
for elem in self.beta:
rep += ' '+elem
rep += ')'
return rep
def __str__(self):
rep = "("
for elem in self.alfa:
rep += elem+' '
rep += "-->"
for elem in self.beta:
rep += ' '+elem
rep += ')'
return rep
def __call__(self):
return "MIERDA\!"
def __cmp__(self,other):
assert self.__class__ == other.__class__
#hay que retornar la negación aunque paresca loco
return (not(self.alfa == other.alfa and self.beta == other.beta))
def __init__(self,x,y):
#assert type(x) == set and type(y) == set
self.alfa = x.copy()
self.beta = y.copy()
def trans (self,df):
assert df.__class__ == self.__class__
if df.alfa == self.beta:
return df(self.alfa,df.beta)
def asoc (self,atrib):
assert type(atrib) == set
return df (self.alfa|atrib,self.beta|atrib)
| [
[
1,
0,
0.3385,
0.0154,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
3,
0,
0.6769,
0.6308,
0,
0.66,
1,
411,
0,
8,
0,
0,
0,
0,
7
],
[
14,
1,
0.3846,
0.0154,
1,
0.58,
... | [
"from sys import *",
"class df:\n\talfa = set()\n\tbeta = set()\n\tdef __hash__(self):\n\t\treturn 0\n\tdef __repr__(self):\n\t\trep = \"(\"\n\t\tfor elem in self.alfa:",
"\talfa = set()",
"\tbeta = set()",
"\tdef __hash__(self):\n\t\treturn 0",
"\t\treturn 0",
"\tdef __repr__(self):\n\t\trep = \"(\"\n\... |
# -*- coding: utf-8 -*-
#
# FC.py
#
# Copyright 2009 Drasky Vanderhoff <drasky.vanderhoff@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# Utilize las operaciones sobre conj'
# {|,&,-} == {unión,intersección,diferencia} las cuales solo devuelven
# un valor pero no modifican a los conjuntos sobre los que operan.
import copy
from df import *
from fprima import *
def union_partes_izq(d1,F):
""" Fusiona todas las dependencias funcionales que tenga a alfa
como parte izquierda de la misma """
trash = []
for d2 in F:
if d1.alfa == d2.alfa and not (d1 == d2):
d1.beta |= (d2.beta)
trash.append (d2)
for d2 in trash:
F.remove (d2)
#print "d1: "+str(d1)+'\n'
def atrib_raros_der(dep,F):
""" Elimina y devuelve una lista de los atributos raros de beta """
raros = []
FNew = F.copy()
tested = set()
i = len(dep.beta)
while (0<i and 0<len(dep.beta)):
A = (dep.beta-tested).pop() # obtenemos cualquier elemento aún no evaluado
tested.add(A)
depNew = copy.deepcopy(dep)
depNew.beta.remove(A)
assert dep in FNew
FNew.remove(dep)
FNew.add(depNew)
b = cierreAtributosAlfa(dep.alfa,FNew)
if A in b: # A era atributo raro
raros += [A]
if (len(dep.beta)==1): # la dependencia quedó vacía
F.remove(dep)
else: # actualizamos los cambios
F.remove(dep)
F.add(depNew)
dep.beta.remove(A)
else:
# no era raro => no había que tocar ese atributo
FNew.remove(depNew)
FNew.add(dep)
i-=1
return raros
def atrib_raros_izq(df,R,F):
""" Elimina y devuelve una lista de los atributos raros de alfa
FIXME: no sabemos si funciona, nunca fue testeada """
raros = []
for A in df.alfa:
# Chequeamos beta subconjunto del cierre de atributos de alfa-A
if df.beta.issubset(cierreAtributos[df.alfa-set(A)]):
F.add(df(df.alfa-set(A),df.beta))
F.remove(df)
raros += [A]
return raros
def calcular_FC(F,R):
res = F.copy() # Copio F para modificarlo a gusto.
raros = ["I ALWAYS WANT TO BE A LUMBERJACK"] # Inicialización
while len(raros) > 0 :# Mientras obtengamos atributos raros
# Unimos las partes izquierdas
i = len(res)
unidas = []
while (i>0 and 0<len(res)):
dep = res.pop()
union_partes_izq(dep,res)
unidas.append(dep)
i-=1
res = set()
for dep in unidas:
res.add(dep)
# Eliminamos los atributos raros
raros = []
tested = set()
i = len(res)
while (i>0):
assert 0<len(res)
untested = res-tested
if len (untested) == 0:
i=0
else:
dep = untested.pop()
tested.add(dep)
raros += atrib_raros_der(dep,res)
i-=1
return res
| [
[
1,
0,
0.1884,
0.0072,
0,
0.66,
0,
739,
0,
1,
0,
0,
739,
0,
0
],
[
1,
0,
0.1957,
0.0072,
0,
0.66,
0.1667,
411,
0,
1,
0,
0,
411,
0,
0
],
[
1,
0,
0.2029,
0.0072,
0,
... | [
"import copy",
"from df import *",
"from fprima import *",
"def union_partes_izq(d1,F):\n\t\n\t\"\"\" Fusiona todas las dependencias funcionales que tenga a alfa\n\t\tcomo parte izquierda de la misma \"\"\"\n\t\n\ttrash = []\n\tfor d2 in F:\n\t\tif d1.alfa == d2.alfa and not (d1 == d2):",
"\t\"\"\" Fusiona ... |
# -*- coding: utf-8 -*-
from attrs import *
from df import *
from clcncr import getCCC
from fprima import *
from c3fn import calculate3FN
from FNBC import calcular_FNBC, chequear_FNBC , chequear_FNBC_df
from FC import *
def mainProg():
# Para saber si ya se ha calculado la descomposición en FNBC
calcFNBC = False
print "\nObteniendo el Esquema Uiversal"
EU = getEsquemaUniversal()
print "Esquema Universal obtenido\n"
print "Obteniendo conjunto de depFunc inicial"
depFun = getDepFunc()
print "Conjunto depFun obtenido\n"
print "Calculando cierre de atributos"
cierreAttrs = cierreAtributos (depFun, EU)
print "Cierre de atributos calculado\n"
print "Eliminando dependencias triviales del cierre de atributos"
cierreAttrsMin = elimTrivial(cierreAttrs)
print "Dependencias triviales eliminadas\n"
print "Calculando F+ sin las dependencias triviales (F prima)"
FPrima = genDep (cierreAttrsMin)
print "Conseguimos FPrima\n"
print "Calculando claves candidatas"
clavesCandidatas = getCCC (EU, cierreAttrs)
print "Conseguimos clavesCandidatas\n"
print "Calculando F Canonica"
FCanonica = calcular_FC(depFun,EU)
print "Conseguimos F Canonica\n"
op = ""
while not (op == "0"):
print "\n\nSeleccione una opcion\n"
print "\t1) Cierre de atributos sin trivialidades"
print "\t2) F Canonica"
print "\t3) F Prima"
print "\t4) Claves Canidatas"
print "\t5) Descomposicion FNBC"
print "\t6) Descomposicion 3FN"
print "\t7) Chequear que la descomposición FNBC realmente",
print "respeta\n\t la Forma Normal de Boyce-Codd"
print "\t8) Chequear que la descomposición FNBC preserva"
print "\t las dependencias funcionales"
print "\t0) Para salir"
op = raw_input()
if op == "1":
print str(cierreAttrsMin),
elif op == "2":
print "\nRecubrimiento canónico de F:\n"
i = 1
for dep in FCanonica:
print str(i)+". "+str(dep)+"\n"
i+=1
print '\n'
elif op == "3":
print "\nF prima:\n"
i = 1
for dep in FPrima:
print str(i)+". "+str(dep)+"\n"
i+=1
print '\n'
elif op == "4":
print str (clavesCandidatas)
elif op == "5":
# calculamos FNBC ahora, que nos pide una lista de Ri, creamos una que
# contenga simplemente EU
RiList = list()
RiList.append (EU)
print "Calculando FNBC del esquema universal"
descFNBC = calcular_FNBC (RiList, FPrima, cierreAttrsMin)
calcFNBC = True
#print str (descFNBC)
print "\nConseguimos FNBC\n"
elif op == "6":
print "Calculando 3FN del esquema universal"
desc3FN = calculate3FN (FCanonica, [], clavesCandidatas)
print str (desc3FN)
print "\nConseguimos 3FN\n"
elif op == "7":
if calcFNBC == True:
if chequear_FNBC (FPrima, descFNBC, cierreAttrsMin):
print "\nRealmente está en FNBC"
else:
print "\n¡¡¡Todo como el chori!!!"
else:
print "\nTodavía no se ha calculado la descomposición en FNBC"
elif op == "8":
if calcFNBC == True:
if chequear_FNBC_df (depFun, descFNBC):
print "\nLas dependencias son preservadas"
else:
print "\nNo se preservaron las dependencias"
else:
print "\nTodavía no se ha calculado la descomposición en FNBC"
elif op == "0":
print "\nChau chau\n"
else:
print "Opcion incorrecta. Por favor señor, ",
print "aprenda a teclear antes de pedirnos algo"
mainProg()
| [
[
1,
0,
0.0172,
0.0086,
0,
0.66,
0,
251,
0,
1,
0,
0,
251,
0,
0
],
[
1,
0,
0.0259,
0.0086,
0,
0.66,
0.125,
411,
0,
1,
0,
0,
411,
0,
0
],
[
1,
0,
0.0345,
0.0086,
0,
0... | [
"from attrs import *",
"from df import *",
"from clcncr import getCCC",
"from fprima import *",
"from c3fn import calculate3FN",
"from FNBC import calcular_FNBC, chequear_FNBC , chequear_FNBC_df",
"from FC import *",
"def mainProg():\n\t# Para saber si ya se ha calculado la descomposición en FNBC\n\tc... |
# -*- coding: utf-8 -*-
from df import *
import copy
#Vamos a hacer la parte primero del foreach
#Esta funcion va a chequear si algun Ri contiene a la dep. func. a->b
#LRi = Conjuntos de Ri (sets of sets)
#dep = Dependencia funcional a->b (es un tubple (a,b))
# Returns:
# True si encontramos
# False caso contrario
def isDepInRi(LRi, dep):
cdep = dep.alfa | dep.beta # hacemos una union de a U b = {a,b}
# ahora vamos a buscar si encontramos en alguno de los Ri cdep
for c in LRi:
if cdep <= c:
# si lo encontramos salimos
return True
return False
# Esta es la la funcion que corresponderia al ciclo foreach
# Requires:
# Fc (F canonico = set of tuples, donde cada tuple es (set,set) == (a,b)
# que corresponden al a->b)
# LRi = Set of Set (Conjunto de Ri's)
# Returns: "nada..."
# En caso de que no exista => agrega automaticamente un Ri al conjunto LRi
def firstLoop (Fc, LRi):
for depF in Fc:
if not isDepInRi(LRi, depF):
#tenemos que agregar el nuevo Ri = {a,b}
LRi.append(depF.alfa | depF.beta)
# Ahora vamos a la 2º parte, vamos a verificar si existe Ri con clave Candidata
# Una simple comparacion de inclusion.
# Funcion que hace esta comparacion (Asegura la reunion sin perdida).
# Requires:
# LRi = conjunto de Ri's
# LCC = Lista de Claves Candidatas :)
# Retuns:
# True si se agrego alguna nueva Ri
# False caso contrario (ya existia Clave Ca. € Ri)
# Esta funcion modifica el LRi en caso de que tengamos que generar una nueva Ri
# (osea que clave_candidata(R) !€ algun Ri para todo Ri)
# clavale un nombre a la funcion XD
def reunionWithoutLoss(LRi, LCC):
for cc in LCC:
for ri in LRi:
if cc <= ri:
# No hay que hacer naranja
return False
# si estamos aca es porque tenemos que generar un nuevo Ri, con
# cualquier Clave Candidata ==> Ri+1 = CCC[0]....
# Aca podriamos fijarnos si influye en algo tomar cualquiera, la mas
# chica, la mas grande, etc... Para optimizar en algo..?
LRi.append(LCC[0])
return True
################################################################################
# Algoritmos principal
################################################################################
# Funcion que va a calcular 3FN.
# Requires:
# Fc = Fcanonico
# LRi = Conjunto de Ri inicial (el conj vacío la mayoría de las veces)
# LCC = Lista Claves Candidatas
# Returns:
# LRi's (Descomposicion 3FN)
# TENER EN CUENTA QUE SE MODIFICAN LOS VALORES, NO SE TRABAJA SOBRE COPIA DE LOS
# MISMOS. (SI NO SE QUIEREN MODIFICAR => trabajar conFc.copy(), LRi.copy()...
def calculate3FN (Fc, LRi, LCC):
firstLoop(Fc, LRi)
reunionWithoutLoss(LRi, LCC)
return LRi
| [
[
1,
0,
0.0247,
0.0123,
0,
0.66,
0,
411,
0,
1,
0,
0,
411,
0,
0
],
[
1,
0,
0.037,
0.0123,
0,
0.66,
0.2,
739,
0,
1,
0,
0,
739,
0,
0
],
[
2,
0,
0.1914,
0.0988,
0,
0.66... | [
"from df import *",
"import copy",
"def isDepInRi(LRi, dep):\n\tcdep = dep.alfa | dep.beta # hacemos una union de a U b = {a,b}\n\t# ahora vamos a buscar si encontramos en alguno de los Ri cdep\n\tfor c in LRi:\n\t\tif cdep <= c:\n\t\t\t# si lo encontramos salimos\n\t\t\treturn True\n\treturn False",
"\tcdep ... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# df.py
#
# Copyright 2009 Drasky Vanderhoff <drasky@drasky-laptop>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from sys import *
class df:
alfa = set()
beta = set()
def __hash__(self):
return 0
def __repr__(self):
rep = "("
for elem in self.alfa:
rep += elem+' '
rep += "-->"
for elem in self.beta:
rep += ' '+elem
rep += ')'
return rep
def __str__(self):
rep = "("
for elem in self.alfa:
rep += elem+' '
rep += "-->"
for elem in self.beta:
rep += ' '+elem
rep += ')'
return rep
def __call__(self):
return "MIERDA\!"
def __cmp__(self,other):
assert self.__class__ == other.__class__
#hay que retornar la negación aunque paresca loco
return (not(self.alfa == other.alfa and self.beta == other.beta))
def __init__(self,x,y):
#assert type(x) == set and type(y) == set
self.alfa = x.copy()
self.beta = y.copy()
def trans (self,df):
assert df.__class__ == self.__class__
if df.alfa == self.beta:
return df(self.alfa,df.beta)
def asoc (self,atrib):
assert type(atrib) == set
return df (self.alfa|atrib,self.beta|atrib)
| [
[
1,
0,
0.3385,
0.0154,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
3,
0,
0.6769,
0.6308,
0,
0.66,
1,
411,
0,
8,
0,
0,
0,
0,
7
],
[
14,
1,
0.3846,
0.0154,
1,
0.63,
... | [
"from sys import *",
"class df:\n\talfa = set()\n\tbeta = set()\n\tdef __hash__(self):\n\t\treturn 0\n\tdef __repr__(self):\n\t\trep = \"(\"\n\t\tfor elem in self.alfa:",
"\talfa = set()",
"\tbeta = set()",
"\tdef __hash__(self):\n\t\treturn 0",
"\t\treturn 0",
"\tdef __repr__(self):\n\t\trep = \"(\"\n\... |
# -*- coding: utf-8 -*-
from sys import *
class ca:
a = set()
am = set()
def __hash__(self):
return 0
def __repr__(self):
rep = "(ATRIBUTO/S: "
for elem in self.a:
rep += elem+' '
rep += ", CIERRE:"
for elem in self.am:
rep += ' '+elem
rep += ')'
return rep
def __str__(self):
rep = "(ATRIBUTO/S: "
for elem in self.a:
rep += elem+' '
rep += ", CIERRE:"
for elem in self.am:
rep += ' '+elem
rep += ')'
return rep
def __cmp__(self,other):
assert type(other) == type(self)
#hay que retornar la negación aunque paresca loco
return (not(self.a == other.a and self.am == other.am))
def __init__(self,x,y):
assert type(x) == set and type(y) == set
self.a = x.copy()
self.am = y.copy()
| [
[
1,
0,
0.0811,
0.027,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
3,
0,
0.5541,
0.8649,
0,
0.66,
1,
749,
0,
5,
0,
0,
0,
0,
8
],
[
14,
1,
0.1622,
0.027,
1,
0.17,
... | [
"from sys import *",
"class ca:\n\ta = set()\n\tam = set()\n\tdef __hash__(self):\n\t\treturn 0\n\tdef __repr__(self):\n\t\trep = \"(ATRIBUTO/S: \"\n\t\tfor elem in self.a:",
"\ta = set()",
"\tam = set()",
"\tdef __hash__(self):\n\t\treturn 0",
"\t\treturn 0",
"\tdef __repr__(self):\n\t\trep = \"(ATRI... |
# -*- coding: utf-8 -*-
from df import *
# Aca vamos a definir todos los atributos, y una funcion nos va a devolver un
# set, que vendria a ser el Esquema universal
# TODO
def getEsquemaUniversal():
return set(['Planilla.Numero', 'Planilla.Fecha', 'Encuestado.Edad', \
'Encuestado.Sexo', 'Encuestado.Ingreso_mensual', \
'Encuestado.Profesion', 'Encuestado.Instruccion', \
'Encuestado.No_C.I.', 'Encuestado.Nombre_y_apellido', \
'Jefe_de_Grupo_Familiar.Fecha_de_nacimiento', \
'Jefe_de_Grupo_Familiar.Forma_de_cobro', \
'Jefe_de_Grupo_Familiar.¿Trabaja_actualmente?', \
'Jefe_de_Grupo_Familiar.Dedicacion_actual', \
'Jefe_de_Grupo_Familiar.E-mail', \
'Jefe_de_Grupo_Familiar.Telefono.Celular', \
'Jefe_de_Grupo_Familiar.Telefono.Habitacion', \
'Jefe_de_Grupo_Familiar.Telefono.Oficina', \
'Situacion_comunidad.Ventajas', \
'Situacion_comunidad.Desventajas', \
'Comunidad.Nombre', \
'Comunidad.Direccion.Nro', 'Comunidad.Direccion.Calle', \
'Sector.CP', \
'Municipio.Nombre', \
'Parroquia.Nombre', 'Estado.Nombre', \
'Situacion_Laboral.Tarjeta', \
'Situacion_Laboral.Ticket', \
'Situacion_Laboral.Cta_banco', \
'Situacion_Laboral.Ingreso_familiar', \
'Situacion_Laboral.Comercio_en_casa', \
'Situacion_Laboral.Trabajo', 'Vivienda.Mascotas', \
'Vivienda.Plagas', \
'Vivienda.Cond_salubridad', 'Vivienda.Enseres', \
'Vivienda.Techo', \
'Vivienda.Paredes', 'Vivienda.Terreno_propio', \
'Vivienda.Forma_tenencia', \
'Vivienda.Tipo', \
'Vivienda.OCV', 'Servicios.Servicios_comunales', \
'Servicios.Recoleccion_basura', \
'Servicios.Transporte', 'Servicios.Medios', \
'Servicios.Telefonia', \
'Servicios.Electricidad', 'Servicios.Gas', \
'Servicios.Aguas_servidas', \
'Servicios.Aguas_blancas', 'Salud.Historia_familiar', \
'Salud.Ayuda_especial', \
'Situacion_de_Exclusion.Tercera_edad', \
'Situacion_de_Exclusion.Discapacitados', \
'Situacion_de_Exclusion.Enfermedades_terminales', \
'Situacion_de_Exclusion.Indigentes', \
'Situacion_de_Exclusion.Niños_calle', \
'Participacion_Comunitaria.Propia', \
'Participacion_Comunitaria.Familia', \
'Participacion_Comunitaria.Org_Comunitarias', \
'Participacion_Comunitaria.Administracion', \
'Participacion_Comunitaria.Constitucion', \
'Participacion_Comunitaria.Sabe', \
'Participacion_Comunitaria.Apoya', \
'Participacion_Comunitaria.Área', \
'Participacion_Comunitaria.Misiones'])
# Funcion que devuelve el conjunto de dependencias funcionales
def getDepFunc ():
s = set ([df(set(['Planilla.Numero']),set(['Situacion_Laboral.Trabajo', \
'Situacion_Laboral.Comercio_en_casa', \
'Situacion_Laboral.Ingreso_familiar', \
'Situacion_Laboral.Cta_banco',\
'Situacion_Laboral.Tarjeta', \
'Situacion_Laboral.Ticket'])), \
df(set(['Planilla.Numero']),set(['Vivienda.Tipo', \
'Vivienda.Forma_tenencia', \
'Vivienda.Terreno_propio', \
'Vivienda.OCV','Vivienda.Techo', \
'Vivienda.Paredes', \
'Vivienda.Enseres', \
'Vivienda.Cond_salubridad', \
'Vivienda.Plagas', \
'Vivienda.Mascotas'])), \
df(set(['Planilla.Numero']),set(['Servicios.Aguas_blancas', \
'Servicios.Aguas_servidas', \
'Servicios.Gas', \
'Servicios.Electricidad', \
'Servicios.Recoleccion_basura', \
'Servicios.Telefonia', \
'Servicios.Transporte', \
'Servicios.Medios', \
'Servicios.Servicios_comunales'])), \
df(set(['Planilla.Numero']),set(['Participacion_Comunitaria.Org_Comunitarias', \
'Participacion_Comunitaria.Administracion', \
'Participacion_Comunitaria.Constitucion', \
'Participacion_Comunitaria.Propia', \
'Participacion_Comunitaria.Familia', \
'Participacion_Comunitaria.Misiones', \
'Participacion_Comunitaria.Sabe', \
'Participacion_Comunitaria.Apoya', \
'Participacion_Comunitaria.Área'])), \
df(set(['Planilla.Numero']),set(['Salud.Ayuda_especial', \
'Salud.Historia_familiar'])), \
df(set(['Planilla.Numero']),set(['Situacion_de_Exclusion.Niños_calle', \
'Situacion_de_Exclusion.Indigentes', \
'Situacion_de_Exclusion.Tercera_edad', \
'Situacion_de_Exclusion.Discapacitados', \
'Situacion_de_Exclusion.Enfermedades_terminales'])), \
df(set(['Encuestado.No_C.I.']),set(['Encuestado.Nombre_y_apellido', \
'Encuestado.Edad', \
'Encuestado.Sexo', \
'Encuestado.Ingreso_mensual', \
'Encuestado.Profesion', \
'Encuestado.Instruccion'])), \
df(set(['Encuestado.No_C.I.']),set(['Jefe_de_Grupo_Familiar.E-mail', \
'Jefe_de_Grupo_Familiar.Telefono.Celular', \
'Jefe_de_Grupo_Familiar.Telefono.Habitacion', \
'Jefe_de_Grupo_Familiar.Telefono.Oficina', \
'Jefe_de_Grupo_Familiar.Dedicacion_actual', \
'Jefe_de_Grupo_Familiar.¿Trabaja_actualmente?', \
'Jefe_de_Grupo_Familiar.Forma_de_cobro', \
'Jefe_de_Grupo_Familiar.Fecha_de_nacimiento'])), \
df(set(['Encuestado.No_C.I.']),set(['Situacion_comunidad.Ventajas', \
'Situacion_comunidad.Desventajas'])), \
df(set(['Planilla.Numero']),set(['Comunidad.Nombre', \
'Comunidad.Direccion.Nro', \
'Comunidad.Direccion.Calle'])), \
df(set(['Comunidad.Nombre']),set(['Sector.CP'])), \
df(set(['Municipio.Nombre']),set(['Estado.Nombre'])), \
# Cambiamos esta, ibamos antes a Municipio.Nombre, ahora a Sector.CP
df(set(['Parroquia.Nombre']),set(['Sector.CP'])), \
df(set(['Planilla.Numero']),set(['Encuestado.No_C.I.'])), \
df(set(['Encuestado.No_C.I.']),set(['Planilla.Numero'])), \
df(set(['Participacion_Comunitaria.Constitucion']),set(['Participacion_Comunitaria.Sabe'])), \
df(set(['Situacion_Laboral.Tarjeta']),set(['Situacion_Laboral.Cta_banco'])), \
# Estas se pasaron por alto!!!!! en la primera entrega
df(set(['Sector.CP']),set(['Municipio.Nombre'])), \
df(set(['Planilla.Numero']),set(['Planilla.Fecha'])), \
df(set(['Planilla.Numero']),set(['Parroquia.Nombre']))
])
return s
| [
[
1,
0,
0.0213,
0.0071,
0,
0.66,
0,
411,
0,
1,
0,
0,
411,
0,
0
],
[
2,
0,
0.2589,
0.383,
0,
0.66,
0.5,
467,
0,
0,
1,
0,
0,
0,
1
],
[
13,
1,
0.2624,
0.3759,
1,
0.88,... | [
"from df import *",
"def getEsquemaUniversal():\n\treturn set(['Planilla.Numero', 'Planilla.Fecha', 'Encuestado.Edad', \\\n\t'Encuestado.Sexo', 'Encuestado.Ingreso_mensual', \\\n\t'Encuestado.Profesion', 'Encuestado.Instruccion', \\\n\t'Encuestado.No_C.I.', 'Encuestado.Nombre_y_apellido', \\\n\t'Jefe_de_Grupo_Fam... |
# -*- coding: utf-8 -*-
from fprima import cierreAtributosAlfa
import copy
def calcular_FNBC (conjRi, Fpri, cierreAtr):
"""Descomposición en la forma normal de Boyce-Codd de un
conjunto de esquemas relacionales, para un F+ dado.
1º parámetro => conjunto de esquemas relacionales
2º parámetro => cierre del conj de dependencias funcionales
3º parametro => conj de cierres de los atributos"""
if (not (type(conjRi) is list)):
return "El 1º parametro debe ser una 'list' de 'sets'"
elif (not (type(Fpri) is set)):
return "El 2º parametro debe ser un 'set' de 'df'"
elif (not (type(cierreAtr) is dict)):
return "El 3º parametro debe ser un 'set' de 'tuplas'"
# ¿Revisamos los tipos del contenido de conjRi y Fpri?
stop = False
# copiamos los parámetros para modificarlos a gusto
F = Fpri.copy()
FNBC = []
FNBC.extend(conjRi)
while (not stop):
stop = True
for dep in F:
Ri = es_violac_FNBC (FNBC, dep, cierreAtr)
if Ri is not None: # Si hay violación en algun Ri
stop = False
print "\nSe halló que dep viola un Ri..."
print "dep = "+str(dep)+'\n'
convertir_FNBC (FNBC, Ri, dep)
return FNBC
def es_violac_FNBC (conjRi, dep, cierreAtr):
""" Indica si dep es violación de la descomposición conjRi.
Si así es devuelve el Ri violado. Sino regresa None """
for Ri in conjRi:
if (viola_FNBC (Ri, dep, cierreAtr)):
return Ri
# si no encontramos violación se retorna 'None'
def viola_FNBC (Ri, dep, cierreAtr):
""" Informa si la d.f. dep es violación FNBC para el esquema Ri """
if (not dep.alfa.issubset(Ri) or not dep.beta.issubset(Ri)):
return False
if (dep.beta.issubset(dep.alfa)):
# dependencia trivial => no hay violación FNBC
return False
for atr in cierreAtr:
# revisamos si la parte izquierda de la dep es superclave
if (dep.alfa == atr and Ri.issubset(cierreAtr[atr].union(atr)) ):
return False # es superclave => no hay violación FNBC
# si llegamos acá la dep no era trivial, ni superclave de Ri
# entonces dep es violación FNBC para Ri
return True
def convertir_FNBC (conjRi, Ri, dep):
""" Descompone Ri según la dependencia dep
para que deje de haber violación FNBC """
print "Descomponiendo Ri: "
conjRi.remove(Ri)
Rj = dep.alfa.union(dep.beta) # {a} U {b}
Ri = Ri.difference(dep.beta) # Ri - {b}
print "Descomposición obtenida:"
print "\tRj1 = "+str(Rj)
print "\tRj2 = "+str(Ri)
conjRi.append(Ri)
conjRi.append(Rj)
def chequear_FNBC (fPrima, conjRi, cierreAtr):
""" Determina si la descomposición conjRi respeta FNBC, según fPrima """
for dep in fPrima:
ri = es_violac_FNBC (conjRi, dep, cierreAtr)
if ri is not None:
#print "\nEl esquema "+str(ri)+" no respeta FNBC y "+str(dep.alfa)+"->"+str(dep.beta)+" es un testigo..."
return False # ri es una violación FNBC
# Si llegamos hasta acá, conjRi respeta FNBC
return True
def chequear_FNBC_df(F,conjR):
""" Determina si la descomposición conjR preserva las df de F """
for df in F: # Chequeamos que la descompoción conserve cada df
res = copy.deepcopy(df.alfa)
c = len(res) - 1
print "Pasada del def " , df
while c < len(res):
c = len(res)
for Ri in conjR:
cierre = cierreAtributosAlfa(res.intersection(Ri),F)
res.update(cierre.intersection(Ri))
print "Res += " , cierre.intersection(Ri)
if not df.beta.issubset(res):# beta no esta en res
return False
# Todos los beta estaban contenidos en res
return True
| [
[
1,
0,
0.0236,
0.0079,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0315,
0.0079,
0,
0.66,
0.1429,
739,
0,
1,
0,
0,
739,
0,
0
],
[
2,
0,
0.1772,
0.2677,
0,
... | [
"from fprima import cierreAtributosAlfa",
"import copy",
"def calcular_FNBC (conjRi, Fpri, cierreAtr):\n\n\t\"\"\"Descomposición en la forma normal de Boyce-Codd de un\n\t conjunto de esquemas relacionales, para un F+ dado.\n\t 1º parámetro => conjunto de esquemas relacionales\n\t 2º parámetro => cierre d... |
# -*- coding: utf-8 *-*
import sys
import unittest
from tests.dijkstra import *
from tests.prim import *
from tests.kruskal import *
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.2,
0.1,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3,
0.1,
0,
0.66,
0.2,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.5,
0.1,
0,
0.66,
0.4,
292,... | [
"import sys",
"import unittest",
"from tests.dijkstra import *",
"from tests.prim import *",
"from tests.kruskal import *",
"if __name__ == '__main__':\n unittest.main()",
" unittest.main()"
] |
# -*- coding: utf-8 *-*
import unittest
from tests_algorithms import *
from tests_graphs import *
from tests_structures import *
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.2222,
0.1111,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.4444,
0.1111,
0,
0.66,
0.25,
222,
0,
1,
0,
0,
222,
0,
0
],
[
1,
0,
0.5556,
0.1111,
0,
0.66... | [
"import unittest",
"from tests_algorithms import *",
"from tests_graphs import *",
"from tests_structures import *",
"if __name__ == '__main__':\n unittest.main()",
" unittest.main()"
] |
# -*- coding: utf-8 -*-
from graphs.listgraph import *
from graphs.matrixgraph import *
from graphs.generator import *
from timeit import Timer
class Main():
def __init__(self):
self.repeat = 5
def log(self, message):
print message
def measure(self):
start = 500
delta = 100
num = 1
end = start + (num - 1) * delta
outputname = 'results.dense.' + str(start) + '.'
outputname += str(end) + '.txt'
self.logfile = open(outputname, 'w')
filenames = []
for i in range(num):
vertices = start + i * delta
filename = 'inputs/dense.' + str(vertices) + '.txt'
filenames.append(filename)
for i in range(num):
vertices = (i + 1) * delta
filename = 'inputs/sparse.' + str(vertices) + '.txt'
filenames.append(filename)
count = start
for filename in filenames:
output = str(count) + ';'
count += delta
self.log('-------------')
self.log(filename)
self.log('-------------')
# MatrixGraph - ListGraph - Dijkstra
graph_impl = 'MatrixGraph'
time = self.time_dijkstra(graph_impl, filename)
self.log(graph_impl + ' - dijkstra:' + str(time))
output += str(time) + ';'
graph_impl = 'ListGraph'
time = self.time_dijkstra(graph_impl, filename)
self.log(graph_impl + ' - dijkstra:' + str(time))
output += str(time) + ';'
# MatrixGraph - ListGraph - Prim
graph_impl = 'MatrixGraph'
time = self.time_prim(graph_impl, filename)
self.log(graph_impl + ' - prim:' + str(time))
output += str(time) + ';'
graph_impl = 'ListGraph'
time = self.time_prim(graph_impl, filename)
self.log(graph_impl + ' - prim:' + str(time))
output += str(time) + ';'
# MatrixGraph - ListGraph - Kruskal
graph_impl = 'MatrixGraph'
time = self.time_kruskal(graph_impl, filename)
self.log(graph_impl + ' - kruskal:' + str(time))
output += str(time) + ';'
graph_impl = 'ListGraph'
time = self.time_kruskal(graph_impl, filename)
self.log(graph_impl + ' - kruskal:' + str(time))
output += str(time) + ';'
output = output.replace('.', ',')
self.logfile.write(output + '\n')
self.logfile.close()
def time_dijkstra(self, implementation, filename):
setup = self.get_setup(implementation, filename)
setup += "index = graph.vertex_count() - 1\n"
setup += "destination = 'V' + str(index)\n"
code = "graph.run_dijkstra('V0', destination)"
return self.time(code, setup)
def time_kruskal(self, implementation, filename):
setup = self.get_setup(implementation, filename)
code = "graph.run_kruskal()"
return self.time(code, setup)
def time_prim(self, implementation, filename):
setup = self.get_setup(implementation, filename)
code = "graph.run_prim()"
return self.time(code, setup)
def get_setup(self, implementation, filename):
setup = "from graphs." + implementation.lower()
setup += " import " + implementation + "\n"
setup += "graph = " + implementation + "()\n"
setup += "graph.load('" + filename + "')\n"
return setup
def time(self, code, setup):
timer = Timer(code, setup)
return (timer.timeit(self.repeat) / self.repeat)
def generate_graphs(self):
gen = Generator()
densefactor = 0.5
sparsefactor = 0.01
vertices = 50
delta = 50
for i in range(50):
print 'Generating graphs with', vertices, 'vertices...'
filename = 'inputs/dense.' + str(vertices) + '.txt'
gen.generate(vertices, densefactor, filename)
filename = 'inputs/sparse.' + str(vertices) + '.txt'
gen.generate(vertices, sparsefactor, filename)
vertices += delta
def list_vs_array(self):
repeat = 100
setup = "from structures.list import List\n"
setup += "list = List()\n"
setup += "for i in range(100000):\n"
setup += " list.add(i)\n"
code = "for i in list:\n"
code += " pass"
timer = Timer(code, setup)
print 'list:', (timer.timeit(repeat) / repeat)
setup = "list = []\n"
setup += "for i in range(100000):\n"
setup += " list.append(i)\n"
code = "for i in list:\n"
code += " pass"
timer = Timer(code, setup)
print 'array:', (timer.timeit(repeat) / repeat)
def main(self):
self.measure()
# self.list_vs_array()
# self.generate_graphs()
if __name__ == '__main__':
main = Main()
main.main()
| [
[
1,
0,
0.0115,
0.0057,
0,
0.66,
0,
217,
0,
1,
0,
0,
217,
0,
0
],
[
1,
0,
0.0172,
0.0057,
0,
0.66,
0.2,
941,
0,
1,
0,
0,
941,
0,
0
],
[
1,
0,
0.023,
0.0057,
0,
0.66... | [
"from graphs.listgraph import *",
"from graphs.matrixgraph import *",
"from graphs.generator import *",
"from timeit import Timer",
"class Main():\n\n def __init__(self):\n self.repeat = 5\n\n def log(self, message):\n print(message)",
" def __init__(self):\n self.repeat = 5"... |
# -*- coding: utf-8 -*-
import unittest
from structures.unionfind import UnionFind
class UnionFindTest(unittest.TestCase):
def test_create_unionfind(self):
unionfind = UnionFind(['V1', 'V2'])
self.assertEqual(2, unionfind.count())
def test_create_unionfind_union_check_count(self):
unionfind = UnionFind(['V1', 'V2'])
self.assertEqual(2, unionfind.count())
unionfind.union('V1', 'V2')
self.assertEqual(1, unionfind.count())
def test_build_example_from_book_check_count(self):
items = 'ijstuvwxyz'
unionfind = UnionFind(items)
self.assertEqual(len(items), unionfind.count())
unionfind.union(unionfind.find('w'), unionfind.find('u'))
unionfind.union(unionfind.find('s'), unionfind.find('u'))
unionfind.union(unionfind.find('t'), unionfind.find('v'))
unionfind.union(unionfind.find('z'), unionfind.find('v'))
unionfind.union(unionfind.find('i'), unionfind.find('x'))
unionfind.union(unionfind.find('y'), unionfind.find('j'))
unionfind.union(unionfind.find('x'), unionfind.find('j'))
self.assertEqual(3, unionfind.count())
unionfind.union(unionfind.find('u'), unionfind.find('v'))
self.assertEqual(2, unionfind.count())
| [
[
1,
0,
0.0488,
0.0244,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0732,
0.0244,
0,
0.66,
0.5,
696,
0,
1,
0,
0,
696,
0,
0
],
[
3,
0,
0.5732,
0.878,
0,
0.66,
... | [
"import unittest",
"from structures.unionfind import UnionFind",
"class UnionFindTest(unittest.TestCase):\n\n def test_create_unionfind(self):\n unionfind = UnionFind(['V1', 'V2'])\n\n self.assertEqual(2, unionfind.count())\n\n def test_create_unionfind_union_check_count(self):",
" def ... |
# -*- coding: utf-8 -*-
import unittest
from graphs.matrixgraph import MatrixGraph
class MatrixGraphTest(unittest.TestCase):
def setUp(self):
self.graph = MatrixGraph()
def test_add_two_vertices(self):
self.graph.add_vertex('V1')
self.graph.add_vertex('V2')
self.assertEqual(2, self.graph.vertex_count())
def test_add_fifteen_vertices(self):
n = 15
for i in range(n):
name = 'V' + str(i)
self.graph.add_vertex(name)
self.assertEqual(n, self.graph.vertex_count())
def test_add_vertices_add_edge_check_edge(self):
n = 15
for i in range(n):
name = 'V' + str(i)
self.graph.add_vertex(name)
self.graph.add_edge('V4', 'V5', 1)
weight = self.graph.get_edge('V4', 'V5')
self.assertEqual(1, weight)
def test_load(self):
self.graph.load('inputs/test1.txt')
self.assertEqual(6, self.graph.vertex_count())
self.assertEqual(1, self.graph.get_edge('s', 'u'))
self.assertEqual(2, self.graph.get_edge('s', 'v'))
self.assertEqual(4, self.graph.get_edge('s', 'x'))
self.assertEqual(1, self.graph.get_edge('u', 'x'))
self.assertEqual(3, self.graph.get_edge('u', 'y'))
self.assertEqual(2, self.graph.get_edge('v', 'x'))
self.assertEqual(3, self.graph.get_edge('v', 'z'))
self.assertEqual(1, self.graph.get_edge('x', 'y'))
self.assertEqual(2, self.graph.get_edge('x', 'z'))
| [
[
1,
0,
0.0377,
0.0189,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0566,
0.0189,
0,
0.66,
0.5,
941,
0,
1,
0,
0,
941,
0,
0
],
[
3,
0,
0.5566,
0.9057,
0,
0.66,... | [
"import unittest",
"from graphs.matrixgraph import MatrixGraph",
"class MatrixGraphTest(unittest.TestCase):\n\n def setUp(self):\n self.graph = MatrixGraph()\n\n def test_add_two_vertices(self):\n self.graph.add_vertex('V1')\n self.graph.add_vertex('V2')",
" def setUp(self):\n ... |
# -*- coding: utf-8 -*-
import unittest
from structures.hashtable import HashTable
class HashTableTest(unittest.TestCase):
def test_add_and_retrieve_item(self):
hash = HashTable()
key = "one"
hash.set(key, 1)
self.assertEqual(1, hash.get(key))
def test_add_and_retrieve_two_items(self):
pairs = [["one", 1], ["two", 2]]
hash = HashTable()
for pair in pairs:
key = pair[0]
data = pair[1]
hash.set(key, data)
count = 1
for pair in pairs:
key = pair[0]
self.assertEqual(count, hash.get(key))
count += 1
def test_add_and_retrieve_fifteen_items(self):
pairs = [["one", 1], ["two", 2], ["three", 3],
["four", 4], ["five", 5], ["six", 6],
["seven", 7], ["eight", 8], ["nine", 9],
["ten", 10], ["eleven", 11], ["twelve", 12],
["thirteen", 13], ["fourteen", 14], ["fifteen", 15]]
hash = HashTable()
for pair in pairs:
key = pair[0]
data = pair[1]
hash.set(key, data)
count = 1
for pair in pairs:
key = pair[0]
self.assertEqual(count, hash.get(key))
count += 1
def test_add_and_retrieve_fifteen_items_similar_keys(self):
hash = HashTable()
n = 100
for i in range(n):
key = 'V' + str(i)
hash.set(key, i)
for i in range(n):
key = 'V' + str(i)
self.assertEqual(i, hash.get(key))
def test_add__fifteen_items_similar_keys_retrieve_different(self):
hash = HashTable()
n = 100
for i in range(n):
key = 'V' + str(i)
hash.set(key, i)
self.assertEqual(None, hash['does not exist'])
self.assertEqual(None, hash['V101'])
self.assertEqual(None, hash['V1.1'])
def test_add_some_items_and_get_values_list(self):
hash = HashTable()
n = 15
for i in range(n):
key = 'V' + str(i)
hash.set(key, i)
values = hash.get_values()
self.assertEqual(n, len(values))
def test_add_items_to_force_rehashing_and_get_values_list(self):
hash = HashTable()
n = 1000
for i in range(n):
key = 'V' + str(i)
hash.set(key, i)
values = hash.get_values()
self.assertEqual(n, len(values))
| [
[
1,
0,
0.0222,
0.0111,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0333,
0.0111,
0,
0.66,
0.5,
112,
0,
1,
0,
0,
112,
0,
0
],
[
3,
0,
0.5333,
0.9444,
0,
0.66,... | [
"import unittest",
"from structures.hashtable import HashTable",
"class HashTableTest(unittest.TestCase):\n\n def test_add_and_retrieve_item(self):\n hash = HashTable()\n key = \"one\"\n hash.set(key, 1)\n\n self.assertEqual(1, hash.get(key))",
" def test_add_and_retrieve_ite... |
# -*- coding: utf-8 -*-
import unittest
from structures.list import List
class ListTest(unittest.TestCase):
def test_create_list_check_empty(self):
list = List()
self.assertTrue(list.empty())
def test_create_list_add_element_check_emtpy(self):
list = List()
list.add(1)
self.assertFalse(list.empty())
def test_add_two_items_pop_them_check_values_check_empty(self):
list = List()
list.add(1)
list.add(2)
value = list.pop()
self.assertEqual(2, value)
value = list.pop()
self.assertEqual(1, value)
self.assertTrue(list.empty())
def test_add_items_first_pop_them_check_values_check_empty(self):
list = List()
list.add_first(1)
list.add_first(2)
value = list.pop()
self.assertEqual(1, value)
value = list.pop()
self.assertEqual(2, value)
self.assertTrue(list.empty())
def test_add_items_use_iterator(self):
list = List()
list.add_first(1)
list.add_first(2)
list.add_first(3)
count = 0
for item in list:
count += 1
self.assertEqual(3, count)
| [
[
1,
0,
0.0339,
0.0169,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0508,
0.0169,
0,
0.66,
0.5,
593,
0,
1,
0,
0,
593,
0,
0
],
[
3,
0,
0.5508,
0.9153,
0,
0.66,... | [
"import unittest",
"from structures.list import List",
"class ListTest(unittest.TestCase):\n\n def test_create_list_check_empty(self):\n list = List()\n\n self.assertTrue(list.empty())\n\n def test_create_list_add_element_check_emtpy(self):",
" def test_create_list_check_empty(self):\n ... |
# -*- coding: utf-8 -*-
import unittest
from graphs.listgraph import ListGraph
class ListGraphTest(unittest.TestCase):
def setUp(self):
self.graph = ListGraph()
def test_add_two_vertices(self):
self.graph.add_vertex('V1')
self.graph.add_vertex('V2')
self.assertEqual(2, self.graph.vertex_count())
def test_add_fifteen_vertices(self):
n = 15
for i in range(n):
name = 'V' + str(i)
self.graph.add_vertex(name)
self.assertEqual(n, self.graph.vertex_count())
def test_add_vertices_add_edge_check_edge(self):
n = 15
for i in range(n):
name = 'V' + str(i)
self.graph.add_vertex(name)
self.graph.add_edge('V4', 'V5', 1)
weight = self.graph.get_edge('V4', 'V5')
self.assertEqual(1, weight)
self.assertEqual(1, self.graph.edge_count())
def test_load(self):
self.graph.load('inputs/test1.txt')
self.assertEqual(6, self.graph.vertex_count())
self.assertEqual(1, self.graph.get_edge('s', 'u'))
self.assertEqual(2, self.graph.get_edge('s', 'v'))
self.assertEqual(4, self.graph.get_edge('s', 'x'))
self.assertEqual(1, self.graph.get_edge('u', 'x'))
self.assertEqual(3, self.graph.get_edge('u', 'y'))
self.assertEqual(2, self.graph.get_edge('v', 'x'))
self.assertEqual(3, self.graph.get_edge('v', 'z'))
self.assertEqual(1, self.graph.get_edge('x', 'y'))
self.assertEqual(2, self.graph.get_edge('x', 'z'))
| [
[
1,
0,
0.037,
0.0185,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0556,
0.0185,
0,
0.66,
0.5,
217,
0,
1,
0,
0,
217,
0,
0
],
[
3,
0,
0.5556,
0.9074,
0,
0.66,
... | [
"import unittest",
"from graphs.listgraph import ListGraph",
"class ListGraphTest(unittest.TestCase):\n\n def setUp(self):\n self.graph = ListGraph()\n\n def test_add_two_vertices(self):\n self.graph.add_vertex('V1')\n self.graph.add_vertex('V2')",
" def setUp(self):\n sel... |
# -*- coding: utf-8 *-*
import unittest
from graphs.listgraph import ListGraph
from graphs.matrixgraph import MatrixGraph
class DijkstraTest(unittest.TestCase):
def test_dijkstra_matrix(self):
self.run_test1(MatrixGraph())
self.run_test2(MatrixGraph())
self.run_test3(MatrixGraph())
def test_dijkstra_list(self):
self.run_test1(ListGraph())
self.run_test2(ListGraph())
self.run_test3(ListGraph())
def run_test1(self, graph):
graph.load('inputs/test1.txt')
path = graph.run_dijkstra('s', 'z')
self.assertEqual('s', path.pop_first())
self.assertEqual('u', path.pop_first())
self.assertEqual('x', path.pop_first())
self.assertEqual('z', path.pop_first())
def run_test2(self, graph):
graph.load('inputs/test2.txt')
path = graph.run_dijkstra('s', 'z')
self.assertEqual('s', path.pop_first())
self.assertEqual('u', path.pop_first())
self.assertEqual('v', path.pop_first())
self.assertEqual('x', path.pop_first())
self.assertEqual('y', path.pop_first())
self.assertEqual('z', path.pop_first())
def run_test3(self, graph):
graph = ListGraph()
graph.load('inputs/test3.txt')
path = graph.run_dijkstra('a', 'e')
self.assertEqual('a', path.pop_first())
self.assertEqual('c', path.pop_first())
self.assertEqual('e', path.pop_first())
| [
[
1,
0,
0.0408,
0.0204,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0612,
0.0204,
0,
0.66,
0.3333,
217,
0,
1,
0,
0,
217,
0,
0
],
[
1,
0,
0.0816,
0.0204,
0,
0.... | [
"import unittest",
"from graphs.listgraph import ListGraph",
"from graphs.matrixgraph import MatrixGraph",
"class DijkstraTest(unittest.TestCase):\n\n def test_dijkstra_matrix(self):\n self.run_test1(MatrixGraph())\n self.run_test2(MatrixGraph())\n self.run_test3(MatrixGraph())\n\n ... |
# -*- coding: utf-8 *-*
import unittest
from graphs.listgraph import ListGraph
from graphs.matrixgraph import MatrixGraph
class KruskalTest(unittest.TestCase):
def test_kruskal_matrix(self):
self.run_test1(MatrixGraph())
self.run_test2(MatrixGraph())
self.run_test3(MatrixGraph())
def test_kruskal_list(self):
self.run_test1(ListGraph())
self.run_test2(ListGraph())
self.run_test3(ListGraph())
def run_test1(self, graph):
graph.load('inputs/test1.txt')
count = graph.vertex_count() - 1
self.edges = graph.run_kruskal()
self.assertEqual(count, self.edges.count())
self.assertTrue(self.contains('s', 'u'))
self.assertTrue(self.contains('u', 'x'))
self.assertTrue(self.contains('x', 'y'))
self.assertTrue(self.contains('x', 'z'))
self.assertTrue(self.contains('s', 'v') or self.contains('v', 'x'))
def run_test2(self, graph):
graph.load('inputs/test2.txt')
count = graph.vertex_count() - 1
self.edges = graph.run_kruskal()
self.assertEqual(count, self.edges.count())
self.assertTrue(self.contains('s', 'u'))
self.assertTrue(self.contains('u', 'v'))
self.assertTrue(self.contains('v', 'x'))
self.assertTrue(self.contains('x', 'y'))
self.assertTrue(self.contains('y', 'z'))
def run_test3(self, graph):
graph.load('inputs/test3.txt')
count = graph.vertex_count() - 1
self.edges = graph.run_kruskal()
self.assertEqual(count, self.edges.count())
self.assertTrue(self.contains('a', 'b'))
self.assertTrue(self.contains('a', 'c'))
self.assertTrue(self.contains('c', 'd'))
self.assertTrue(self.contains('d', 'e'))
def contains(self, src, dest):
for edge in self.edges:
source = edge[0]
destination = edge[1]
if ((source == src and destination == dest) or
(source == dest and destination == src)):
return True
return False
| [
[
1,
0,
0.0286,
0.0143,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0429,
0.0143,
0,
0.66,
0.3333,
217,
0,
1,
0,
0,
217,
0,
0
],
[
1,
0,
0.0571,
0.0143,
0,
0.... | [
"import unittest",
"from graphs.listgraph import ListGraph",
"from graphs.matrixgraph import MatrixGraph",
"class KruskalTest(unittest.TestCase):\n\n def test_kruskal_matrix(self):\n self.run_test1(MatrixGraph())\n self.run_test2(MatrixGraph())\n self.run_test3(MatrixGraph())\n\n de... |
# -*- coding: utf-8 *-*
import unittest
from graphs.listgraph import ListGraph
from graphs.matrixgraph import MatrixGraph
class PrimTest(unittest.TestCase):
def test_prim_matrix(self):
self.run_test1(MatrixGraph())
self.run_test2(MatrixGraph())
self.run_test3(MatrixGraph())
def test_prim_list(self):
self.run_test1(ListGraph())
self.run_test2(ListGraph())
self.run_test3(ListGraph())
def run_test1(self, graph):
graph.load('inputs/test1.txt')
count = graph.vertex_count() - 1
self.edges = graph.run_prim()
self.assertEqual(count, self.edges.count())
self.assertTrue(self.contains('s', 'u'))
self.assertTrue(self.contains('u', 'x'))
self.assertTrue(self.contains('x', 'y'))
self.assertTrue(self.contains('x', 'z'))
self.assertTrue(self.contains('s', 'v') or self.contains('v', 'x'))
def run_test2(self, graph):
graph.load('inputs/test2.txt')
count = graph.vertex_count() - 1
self.edges = graph.run_prim()
self.assertEqual(count, self.edges.count())
self.assertTrue(self.contains('s', 'u'))
self.assertTrue(self.contains('u', 'v'))
self.assertTrue(self.contains('v', 'x'))
self.assertTrue(self.contains('x', 'y'))
self.assertTrue(self.contains('y', 'z'))
def run_test3(self, graph):
graph.load('inputs/test3.txt')
count = graph.vertex_count() - 1
self.edges = graph.run_prim()
self.assertEqual(count, self.edges.count())
self.assertTrue(self.contains('a', 'b'))
self.assertTrue(self.contains('a', 'c'))
self.assertTrue(self.contains('c', 'd'))
self.assertTrue(self.contains('d', 'e'))
def contains(self, src, dest):
for edge in self.edges:
source = edge[0]
destination = edge[1]
if ((source == src and destination == dest) or
(source == dest and destination == src)):
return True
return False
| [
[
1,
0,
0.0286,
0.0143,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0429,
0.0143,
0,
0.66,
0.3333,
217,
0,
1,
0,
0,
217,
0,
0
],
[
1,
0,
0.0571,
0.0143,
0,
0.... | [
"import unittest",
"from graphs.listgraph import ListGraph",
"from graphs.matrixgraph import MatrixGraph",
"class PrimTest(unittest.TestCase):\n\n def test_prim_matrix(self):\n self.run_test1(MatrixGraph())\n self.run_test2(MatrixGraph())\n self.run_test3(MatrixGraph())\n\n def test... |
# -*- coding: utf-8 -*-
import unittest
from structures.heap import Heap
class HeapTest(unittest.TestCase):
def test_add_n_elements_verify_order(self):
heap = Heap()
n = 65
#Insert elements in reverse order
for i in range(n):
heap.insert(n - i, n - i)
#Then verify they are extracted from min to max
min = None
while not heap.empty():
newmin = heap.extract_min()
if min is not None:
self.assertLessEqual(min, newmin)
min = newmin
def test_add_3_elements_change_key_verify_heap_order(self):
heap = Heap()
n = 3
#Insert elements in reverse order
for i in range(n):
heap.insert(n - i, n - i)
heap.change_data(0, 4)
heap.change_key(0, 4)
#Then verify they are extracted from min to max
min = None
while not heap.empty():
newmin = heap.extract_min()
if min is not None:
self.assertLessEqual(min, newmin)
min = newmin
| [
[
1,
0,
0.0513,
0.0256,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0769,
0.0256,
0,
0.66,
0.5,
909,
0,
1,
0,
0,
909,
0,
0
],
[
3,
0,
0.5769,
0.8718,
0,
0.66,... | [
"import unittest",
"from structures.heap import Heap",
"class HeapTest(unittest.TestCase):\n\n def test_add_n_elements_verify_order(self):\n heap = Heap()\n n = 65\n #Insert elements in reverse order\n for i in range(n):\n heap.insert(n - i, n - i)",
" def test_add... |
# -*- coding: utf-8 -*-
from structures.hashtable import HashTable
from structures.list import List
from structures.heap import Heap
from structures.unionfind import UnionFind
from graphs.graph import *
class MatrixGraph(Graph):
def __init__(self):
self.__adjacency = []
self.__vertices = HashTable()
def add_vertex(self, name):
self.__vertices[name] = self.__length()
self.__adjacency.append([None] * self.__length())
for i in range(self.__length()):
self.__adjacency[i].append(None)
def add_edge(self, source, dest, weight):
indexsource = self.__vertices[source]
indexdest = self.__vertices[dest]
if indexsource is not None and indexdest is not None:
self.__adjacency[indexsource][indexdest] = weight
else:
name = source
if indexdest is None:
name = dest
raise Exception("Vertex '" + name + "' doesn't exist")
def vertex_count(self):
return self.__length()
def edge_count(self):
count = 0
length = self.__length() - 1
for i in range(length):
for j in range(length):
if self.__adjacency[i][j] is not None:
count += 1
return count
def get_edge(self, source, dest):
indexsource = self.__vertices[source]
indexdest = self.__vertices[dest]
return self.__adjacency[indexsource][indexdest]
def __length(self):
return len(self.__adjacency)
def run_dijkstra(self, source, destination):
if self.__vertices[source] is None:
raise Exception('Source vertex does not exist!')
if self.__vertices[destination] is None:
raise Exception('Destination vertex does not exist!')
# Stores the name of the previous node for the key
previous = HashTable()
# Stores the calculated cost so far for each node
# from the source
cost = HashTable()
# Prority Queue
heap = Heap()
# Represents the value infinity
infinity = float("inf")
# Initialize all costs to infinity
for name in self.__vertices.get_keys():
cost[name] = infinity
# Set cost to source node to zero
# and add to heap
cost[source] = 0
heap.insert(cost[source], source)
# Count how many nodes have been added to S so far
count = 0
# While there are nodes missing
# and the heap is not empty (this condition is added
# in case the graph is not connected)
while count < self.vertex_count() and not heap.empty():
name = heap.extract_min()
count += 1
# Get index for vertex in matrix
index = self.__vertices[name]
# Look for adjacent nodes
for i in range(self.__length()):
weight = self.__adjacency[index][i]
if weight is not None:
# Calculate new cost
dest = self.__vertices.get_key(i)
newcost = cost[name] + weight
# Update cost if it's smaller and is not in S
if newcost < cost[dest]:
cost[dest] = newcost
previous[dest] = name
heap.insert(newcost, dest)
# Build path from registered previous vertices
path = List()
current = destination
while previous[current] is not None:
path.add_first(current)
current = previous[current]
#Add path source
path.add_first(source)
return path
def run_kruskal(self):
edges = List()
union = UnionFind(self.__vertices.get_keys())
heap = Heap()
#Add all edges to the priority queue
for name in self.__vertices.get_keys():
i = self.__vertices[name]
for j in range(self.__length()):
weight = self.__adjacency[i][j]
if weight is not None:
heap.insert(weight, [i, j, weight])
count = self.vertex_count() - 1
# While there is more than one component
# and the heap is not empty (this condition is added
# in case the graph is not connected)
edges = 0
while edges < count and not heap.empty():
edge = heap.extract_min()
source = self.__vertices.get_key(edge[0])
dest = self.__vertices.get_key(edge[1])
setsource = union.find(source)
setdest = union.find(dest)
if setsource != setdest:
union.union(setsource, setdest)
#edges.add([source, dest])
edges += 1
return edges
def run_prim(self):
edges = List()
cost = HashTable()
heap = Heap()
previous = HashTable()
infinity = float("inf")
for name in self.__vertices.get_keys():
cost[name] = infinity
source = self.__vertices.get_keys()[0]
cost[source] = 0
heap.insert(cost[source], source)
count = 0
while count < self.vertex_count() and not heap.empty():
name = heap.extract_min()
index = self.__vertices[name]
count += 1
# Look for adjacent nodes
for i in range(self.__length()):
weight = self.__adjacency[index][i]
if weight is not None:
dest = self.__vertices.get_key(i)
newcost = weight
if newcost < cost[dest]:
cost[dest] = newcost
previous[dest] = [name, dest]
heap.insert(newcost, dest)
for name in self.__vertices.get_keys():
if previous[name] is not None:
edges.add(previous[name])
return edges
def to_string(self):
output = ''
for i in range(self.__length()):
output += self.__vertices.get_keys()[i] + ': '
for j in range(self.__length()):
edge = self.__adjacency[i][j]
destination = edge[1]
weight = edge[2]
if edge is not None:
output += '(' + destination + ', '
output += str(weight) + ') '
output += '\n'
return output
def save(self, filename):
file = open(filename, 'w')
for name in self.__vertices.get_keys():
rowIndex = self.__vertices.get_key_index(name)
file.write(name + Graph.NAME_SEPARATOR)
count = 0
for colIndex in range(self.__length()):
weight = self.__adjacency[rowIndex][colIndex]
if weight is not None:
if count != 0:
file.write(Graph.ADJ_LIST_SEPARATOR)
file.write(str(colIndex) + Graph.WEIGHT_SEPARATOR)
file.write(str(weight))
count += 1
file.write('\n')
file.close()
def load(self, filename):
file = open(filename)
adj = HashTable()
for line in file:
pair = line.split(Graph.NAME_SEPARATOR)
name = pair[0].strip()
adjlist = pair[1].strip()
self.add_vertex(name)
adj[name] = adjlist
for name in self.__vertices.get_keys():
list = adj[name]
edges = list.split(Graph.ADJ_LIST_SEPARATOR)
for edge in edges:
edge = edge.strip()
if len(edge) > 0:
words = edge.split(Graph.WEIGHT_SEPARATOR)
destIndex = int(words[0].strip())
dest = self.__vertices.get_key(destIndex)
weight = None
weight = int(words[1])
self.add_edge(name, dest, weight)
file.close()
| [
[
1,
0,
0.0084,
0.0042,
0,
0.66,
0,
112,
0,
1,
0,
0,
112,
0,
0
],
[
1,
0,
0.0126,
0.0042,
0,
0.66,
0.2,
593,
0,
1,
0,
0,
593,
0,
0
],
[
1,
0,
0.0168,
0.0042,
0,
0.6... | [
"from structures.hashtable import HashTable",
"from structures.list import List",
"from structures.heap import Heap",
"from structures.unionfind import UnionFind",
"from graphs.graph import *",
"class MatrixGraph(Graph):\n\n def __init__(self):\n self.__adjacency = []\n self.__vertices = ... |
# -*- coding: utf-8 -*-
from structures.list import List
from structures.heap import Heap
from structures.unionfind import UnionFind
from graphs.graph import *
class ListGraph(Graph):
def __init__(self, size=None):
self.__vertices = HashTable(size)
def add_vertex(self, name):
self.__vertices[name] = List()
def add_edge(self, src, dest, weight=None):
adjacents = self.__vertices[src]
exists = False
for edge in adjacents:
edgeDest = self.__vertices.get_key(edge[0])
if dest == edgeDest:
exists = True
break
if not exists:
destIndex = self.__vertices.get_key_index(dest)
edge = [destIndex, weight]
adjacents.add(edge)
def get_edge(self, src, dest):
adjacents = self.__vertices[src]
for edge in adjacents:
edgeDest = self.__vertices.get_key(edge[0])
if edgeDest == dest:
return edge[1]
return None
def vertex_count(self):
return self.__vertices.count()
def edge_count(self):
count = 0
for name in self.__vertices.get_keys():
adjacents = self.__vertices[name]
count += adjacents.count()
return count
def get_vertex(self, index):
return self.__vertices.get_key(index)
def run_dijkstra(self, source, destination):
if self.__vertices[source] is None:
raise Exception('Source vertex does not exist!')
if self.__vertices[destination] is None:
raise Exception('Destination vertex does not exist!')
# Stores the name of the previous node for the key
previous = HashTable()
# Stores the calculated distance so far for each node
# from the source
cost = HashTable()
# Prority Queue
heap = Heap()
# Represents the value infinity
infinity = float("inf")
# Initialize all costs to infinity
for name in self.__vertices.get_keys():
cost[name] = infinity
# Set the cost to source node to zero
# and add to heap
cost[source] = 0
heap.insert(cost[source], source)
# Count how many nodes have been added to S so far
count = 0
# While there are nodes missing
# and the heap is not empty (this condition is added
# in case the graph is not connected)
while count < self.vertex_count() and not heap.empty():
name = heap.extract_min()
count += 1
# Get vertex
adjacents = self.__vertices[name]
# Look for adjacent nodes
for edge in adjacents:
# Calculate new cost
dest = self.__vertices.get_key(edge[0])
weight = edge[1]
newcost = cost[name] + weight
# Update cost if it's smaller
if newcost < cost[dest]:
cost[dest] = newcost
previous[dest] = name
heap.insert(newcost, dest)
# Build path from registered previous vertices
path = List()
current = destination
while previous[current] is not None:
path.add_first(current)
current = previous[current]
path.add_first(source)
return path
def run_kruskal(self):
edges = List()
union = UnionFind(self.__vertices.get_keys())
heap = Heap()
#Add all edges to the priority queue
indexSrc = 0
for name in self.__vertices.get_keys():
adjacents = self.__vertices[name]
for edge in adjacents:
weight = edge[1]
indexDest = edge[0]
heap.insert(weight, [indexSrc, indexDest])
indexSrc += 1
count = self.vertex_count() - 1
edges = 0
# While there is more than one component
# and the heap is not empty (this condition is added
# in case the graph is not connected)
while edges < count and not heap.empty():
edge = heap.extract_min()
source = self.__vertices.get_key(edge[0])
destination = self.__vertices.get_key(edge[1])
setsource = union.find(source)
setdest = union.find(destination)
if setsource != setdest:
union.union(setsource, setdest)
#edges.add(edge)
edges += 1
return edges
def run_prim(self):
edges = List()
cost = HashTable()
heap = Heap()
previous = HashTable()
infinity = float("inf")
for name in self.__vertices.get_keys():
cost[name] = infinity
source = self.__vertices.get_keys()[0]
cost[source] = 0
heap.insert(cost[source], source)
count = 0
while count < self.vertex_count() and not heap.empty():
name = heap.extract_min()
adjacents = self.__vertices[name]
count += 1
for edge in adjacents:
newcost = edge[1]
dest = self.__vertices.get_key(edge[0])
if newcost < cost[dest]:
cost[dest] = newcost
previous[dest] = [name, dest]
heap.insert(newcost, dest)
for name in self.__vertices.get_keys():
if previous[name] is not None:
edges.add(previous[name])
return edges
def to_string(self):
output = ''
for name in self.__vertices.get_keys():
adjacents = self.__vertices[name]
output += name + ':'
if adjacents.count() > 0:
for edge in adjacents:
output += '(' + edge[0] + ', '
output += str(edge[1]) + ') '
output += '\n'
return output
def save(self, filename):
file = open(filename, 'w')
for name in self.__vertices.get_keys():
adjacents = self.__vertices[name]
file.write(name + Graph.NAME_SEPARATOR)
if adjacents.count() > 0:
count = 0
for edge in adjacents:
destinationIndex = edge[0]
weight = edge[1]
if count != 0:
file.write(Graph.ADJ_LIST_SEPARATOR)
file.write(str(destinationIndex) + Graph.WEIGHT_SEPARATOR)
file.write(str(weight))
count += 1
file.write('\n')
file.close()
def load(self, filename):
file = open(filename)
for line in file:
pair = line.split(Graph.NAME_SEPARATOR)
name = pair[0].strip()
list = pair[1].strip()
self.add_vertex(name)
edges = list.split(Graph.ADJ_LIST_SEPARATOR)
for edge in edges:
edge = edge.strip()
if len(edge) > 0:
words = edge.split(Graph.WEIGHT_SEPARATOR)
index = int(words[0].strip())
weight = int(words[1])
self.__vertices[name].add([index, weight])
file.close()
| [
[
1,
0,
0.0088,
0.0044,
0,
0.66,
0,
593,
0,
1,
0,
0,
593,
0,
0
],
[
1,
0,
0.0132,
0.0044,
0,
0.66,
0.25,
909,
0,
1,
0,
0,
909,
0,
0
],
[
1,
0,
0.0175,
0.0044,
0,
0.... | [
"from structures.list import List",
"from structures.heap import Heap",
"from structures.unionfind import UnionFind",
"from graphs.graph import *",
"class ListGraph(Graph):\n\n def __init__(self, size=None):\n self.__vertices = HashTable(size)\n\n def add_vertex(self, name):\n self.__ver... |
# -*- coding: utf-8 -*-
from graphs.matrixgraph import MatrixGraph
import random
import math
class Generator():
def __init__(self):
pass
def generate(self, vcount, factor, filename):
if factor > 1:
raise Exception('Invalid density factor.')
maxedges = (vcount - 1) * vcount
ecount = int(maxedges * factor)
if ecount < vcount:
ecount = vcount
# if ecount > 10000:
# ecount = 10000
# print 'Going for', ecount, 'edges.'
graph = MatrixGraph()
for i in range(vcount):
name = 'V' + str(i)
graph.add_vertex(name)
#self.random_generation(graph, vcount, ecount)
self.secuential_generation(graph, vcount, ecount)
graph.save(filename)
def random_generation(self, graph, vcount, ecount):
for i in range(ecount):
while True:
edge = self.random_edge(vcount)
if graph.get_edge(edge[0], edge[1]) is None:
graph.add_edge(edge[0], edge[1], edge[2])
break
else:
print 'This edge already exists. Try again.'
print edge[0], edge[1], graph.edge_count()
def secuential_generation(self, graph, vcount, ecount):
edgespervertex = int(math.ceil(ecount / vcount))
vdelta = int(vcount / edgespervertex)
for src in range(vcount):
if src % 100 == 0:
print src
for dest in range(src + 1, vcount + src + 1, vdelta):
if dest >= vcount:
dest = dest - vcount
if src != dest:
weight = random.randint(0, 10)
source = 'V' + str(src)
destination = 'V' + str(dest)
#print source, '-', destination, ':', weight
graph.add_edge(source, destination, weight)
def random_edge(self, vcount, src=None):
if src is None:
src = random.randint(0, vcount - 1)
dest = src
while dest == src:
dest = random.randint(0, vcount - 1)
weight = random.randint(0, 10)
src = 'V' + str(src)
dest = 'V' + str(dest)
return [src, dest, weight]
| [
[
1,
0,
0.0267,
0.0133,
0,
0.66,
0,
941,
0,
1,
0,
0,
941,
0,
0
],
[
1,
0,
0.04,
0.0133,
0,
0.66,
0.3333,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0533,
0.0133,
0,
0.... | [
"from graphs.matrixgraph import MatrixGraph",
"import random",
"import math",
"class Generator():\n\n def __init__(self):\n pass\n\n def generate(self, vcount, factor, filename):\n if factor > 1:\n raise Exception('Invalid density factor.')",
" def __init__(self):\n ... |
# -*- coding: utf-8 -*-
from structures.hashtable import HashTable
class Graph():
NAME_SEPARATOR = '->'
ADJ_LIST_SEPARATOR = '||'
WEIGHT_SEPARATOR = ';'
def __init__(self):
pass
def load(self, filename):
pass
def save(self, filename):
raise Exception('save() not implemented.')
def show(self):
print self.to_string()
def add_vertex(self, name):
pass
def add_edge(self, source, destination, weight):
pass
def vertex_count(self):
pass
def edge_count(self):
pass
def run_kruskal(self):
pass
def run_prim(self):
pass
def run_dijkstra(self, source, destination):
pass
| [
[
1,
0,
0.0476,
0.0238,
0,
0.66,
0,
112,
0,
1,
0,
0,
112,
0,
0
],
[
3,
0,
0.5595,
0.9048,
0,
0.66,
1,
90,
0,
11,
0,
0,
0,
0,
3
],
[
14,
1,
0.1667,
0.0238,
1,
0.11,
... | [
"from structures.hashtable import HashTable",
"class Graph():\n\n NAME_SEPARATOR = '->'\n ADJ_LIST_SEPARATOR = '||'\n WEIGHT_SEPARATOR = ';'\n\n def __init__(self):\n pass",
" NAME_SEPARATOR = '->'",
" ADJ_LIST_SEPARATOR = '||'",
" WEIGHT_SEPARATOR = ';'",
" def __init__(sel... |
# -*- coding: utf-8 -*-
import sys
import unittest
from tests.matrixgraph import *
from tests.listgraph import *
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.2222,
0.1111,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3333,
0.1111,
0,
0.66,
0.25,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.5556,
0.1111,
0,
0.66... | [
"import sys",
"import unittest",
"from tests.matrixgraph import *",
"from tests.listgraph import *",
"if __name__ == '__main__':\n unittest.main()",
" unittest.main()"
] |
# -*- coding: utf-8 -*-
import sys
import unittest
from tests.hashtable import *
from tests.heap import *
from tests.unionfind import *
from tests.list import *
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.1818,
0.0909,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.2727,
0.0909,
0,
0.66,
0.1667,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.4545,
0.0909,
0,
0.... | [
"import sys",
"import unittest",
"from tests.hashtable import *",
"from tests.heap import *",
"from tests.unionfind import *",
"from tests.list import *",
"if __name__ == '__main__':\n unittest.main()",
" unittest.main()"
] |
# -*- coding: utf-8 -*-
from structures.hashtable import HashTable
class UnionFind():
def __init__(self, items):
self.sets = HashTable()
for item in items:
node = [item, None, 1]
self.sets[item] = node
self.__count = len(items)
def find(self, item):
node = self.sets[item]
while node[1] is not None:
node = node[1]
return node[0]
def union(self, item1, item2):
if item1 != item2:
node1 = self.sets[item1]
node2 = self.sets[item2]
if node1[2] < node2[2]:
node1[1] = node2
node2[2] += node1[2]
else:
node2[1] = node1
node1[2] += node2[2]
self.__count -= 1
def count(self):
return self.__count
#class UnionFindNode():
#
# def __init__(self, item):
# self.item = item
# self.set = None
# self.size = 1
| [
[
1,
0,
0.0476,
0.0238,
0,
0.66,
0,
112,
0,
1,
0,
0,
112,
0,
0
],
[
3,
0,
0.4643,
0.7143,
0,
0.66,
1,
845,
0,
4,
0,
0,
0,
0,
2
],
[
2,
1,
0.2262,
0.1429,
1,
0.68,
... | [
"from structures.hashtable import HashTable",
"class UnionFind():\n\n def __init__(self, items):\n self.sets = HashTable()\n for item in items:\n node = [item, None, 1]\n self.sets[item] = node\n self.__count = len(items)",
" def __init__(self, items):\n s... |
# -*- coding: utf-8 -*-
class HashTable():
__initial_size = 10000
def __init__(self, size=None):
self.__size = size
if size is None:
self.__size = HashTable.__initial_size
self.items = [None] * self.__size
self.__keys = []
def count(self):
return len(self.__keys)
def set(self, key, value):
keyIndex = self.count()
self.__keys.append(key)
if self.count() > 0.7 * self.__size:
self.__rehash()
index = self.__get_index(key)
item = self.items[index]
if item is None:
self.items[index] = [keyIndex, value]
elif self.__keys[item[0]] == key:
item[1] = value
def get(self, key):
index = self.__get_index(key)
value = None
if index is not None and self.items[index] is not None:
value = self.items[index][1]
return value
def get_values(self):
values = []
for item in self.items:
if item is not None:
values.append(item[1])
return values
def get_keys(self):
return self.__keys
def get_key(self, index):
return self.__keys[index]
def get_key_index(self, key):
index = self.__get_index(key)
item = self.items[index]
return item[0]
def __get_index(self, key):
"Used linear probing."
index = self.__primary_hash(key) % self.__size
if self.__collision(key, index):
index = self.__secondary_hash(key) % self.__size
pos = 0
while self.__collision(key, index):
index += 1
index %= self.__size
pos += 1
return index
def __collision(self, key, index):
item = self.items[index]
return (item is not None and self.__keys[item[0]] != key)
def __get_hash(self, key, pos=0):
hash = self.__primary_hash(key)
hash += pos * self.__secondary_hash(key)
return hash
def __primary_hash(self, key):
"Used FNV hash function"
key = str(key)
h = 2166136261
for letter in key:
h = (h * 16777619) ^ ord(letter)
return h
def __secondary_hash(self, key):
"Shift-Add-XOR hash function"
key = str(key)
h = 0
for letter in key:
h ^= (h << 5) + (h >> 2) + ord(letter)
return h
def __rehash(self):
max_capacity = self.__size
occupied = self.count() * 100 / self.__size
# print self.count(), 'items of total capacity', max_capacity
# print 'percentage:', occupied
olditems = self.items
factor = 2
if self.count() < 50000:
factor = 4
self.__size = int(factor * self.__size)
self.items = [None] * self.__size
# print 'rehashing to', self.__size, 'buckets'
for item in olditems:
if item is not None:
itemKey = self.__keys[item[0]]
index = self.__get_index(itemKey)
self.items[index] = item
# print 'finished rehashing'
# print '--------------------------'
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.set(key, value)
| [
[
3,
0,
0.5149,
0.9776,
0,
0.66,
0,
631,
0,
16,
0,
0,
0,
0,
25
],
[
14,
1,
0.0373,
0.0075,
1,
0.08,
0,
323,
1,
0,
0,
0,
0,
1,
0
],
[
2,
1,
0.0784,
0.0597,
1,
0.08,
... | [
"class HashTable():\n __initial_size = 10000\n\n def __init__(self, size=None):\n self.__size = size\n\n if size is None:\n self.__size = HashTable.__initial_size",
" __initial_size = 10000",
" def __init__(self, size=None):\n self.__size = size\n\n if size is ... |
# -*- coding: utf-8 -*-
class List():
def __init__(self):
self.__begin = None
self.__end = None
self.__current = None
self.__size = 0
def empty(self):
return self.__size == 0
def pop(self):
return self.pop_last()
def pop_last(self):
item = None
if self.__end is not None:
self.__size -= 1
item = self.__end[0]
if self.__end[1] is not None:
prev = self.__end[1]
prev[2] = None
self.__end = prev
elif self.__begin is None:
self.__end = None
self.__begin = None
return item
def pop_first(self):
item = None
if self.__begin is not None:
self.__size -= 1
item = self.__begin[0]
if self.__begin[2] is not None:
next = self.__begin[2]
next[1] = None
self.__begin = next
else:
self.__end = None
self.__begin = None
return item
def add(self, item):
self.add_last(item)
def add_first(self, item):
self.__size += 1
node = [item, None, None]
if self.__begin is None:
self.__begin = node
self.__end = node
else:
node[2] = self.__begin
node[1] = None
self.__begin[1] = node
self.__begin = node
def add_last(self, item):
self.__size += 1
node = [item, None, None]
if self.__end is None:
self.__begin = node
self.__end = node
else:
node[1] = self.__end
node[2] = None
self.__end[2] = node
self.__end = node
def get_first(self):
return self.__begin
def get_last(self):
return self.__end
def next(self):
if self.__current is not None:
self.__current = self.__current[2]
return self.__current
def prev(self):
if self.__current is not None:
this.__current = self.__current[1]
return self.__current
def current(self):
return self.__current
def count(self):
return self.__size
def reset(self):
self.__current = self.__begin
def __iter__(self):
return ListIterator(self)
def to_string(self):
output = ''
for item in self:
value = str(item[0])
if value is None:
value = ''
output += value + ';'
return output
#class ListNode:
#
# def __init__(self, item):
# self.item = item
# self.next = None
# self.prev = None
class ListIterator:
def __init__(self, list):
self.list = list
self.list.reset()
def next(self):
node = self.list.current()
item = None
if node is not None:
item = node[0]
else:
raise StopIteration
self.list.next()
return item
def __iter__(self):
return self
| [
[
3,
0,
0.4071,
0.7643,
0,
0.66,
0,
24,
0,
17,
0,
0,
0,
0,
4
],
[
2,
1,
0.0571,
0.0357,
1,
0.73,
0,
555,
0,
1,
0,
0,
0,
0,
0
],
[
14,
2,
0.05,
0.0071,
2,
0.54,
... | [
"class List():\n\n def __init__(self):\n self.__begin = None\n self.__end = None\n self.__current = None\n self.__size = 0",
" def __init__(self):\n self.__begin = None\n self.__end = None\n self.__current = None\n self.__size = 0",
" self.__b... |
# -*- coding: utf-8 -*-
class Heap():
__maxSize = 100
def __init__(self):
self.items = []
def insert(self, key, data):
item = [key, data]
self.items.append(item)
index = len(self.items) - 1
self.__heapify_up(index)
def change_key(self, index, key):
self.items[index][0] = key
self.__heapify(index)
def change_data(self, index, data):
self.items[index][1] = data
self.__heapify(index)
def extract_min(self):
min = self.items[0]
self.__delete(0)
return min[1]
def empty(self):
return self.__length() == 0
def __delete(self, index):
item = self.items.pop()
if index < self.__length():
self.items[index] = item
self.__heapify(index)
def __heapify(self, index):
parent = self.__parent(index)
if self.items[index][0] < self.items[parent][0]:
self.__heapify_up(index)
else:
self.__heapify_down(index)
def __heapify_up(self, index):
if index > 0:
parent = self.__parent(index)
if self.items[index][0] < self.items[parent][0]:
self.__swap(index, parent)
self.__heapify_up(parent)
def __heapify_down(self, index):
length = self.__length()
left = self.__left(index)
right = self.__right(index)
#Verify if current has children
if left >= length:
return
#Verify if current has right child
elif right < length:
leftChild = self.items[left]
rightChild = self.items[right]
if leftChild[0] < rightChild[0]:
min = left
else:
min = right
#Then only left child exists
elif left == length - 1:
min = left
else:
raise Exception('There something wrong!')
item = self.items[index]
child = self.items[min]
if child[0] < item[0]:
self.__swap(index, min)
self.__heapify_down(min)
def __swap(self, i, j):
"Swaps nodes in the heap"
temp = self.items[i]
self.items[i] = self.items[j]
self.items[j] = temp
def __length(self):
"Returns the number of elements in the heap"
return len(self.items)
def __item(self, index):
item = None
if index < self.__length():
item = self.items[index]
return item
def __left(self, index):
return (2 * index) + 1
def __right(self, index):
return (2 * index) + 2
def __parent(self, index):
if index == 0:
return 0
elif index % 2 == 0:
return index / 2 - 1
else:
return index / 2
def size(self):
return self.__length()
def show(self):
for index in range(self.__length()):
item = self.__item(index)
left = self.__item(self.__left(index))
right = self.__item(self.__right(index))
output = str(item[0])
if left is not None:
output += ' (left=' + str(left[0]) + ')'
if right is not None:
output += ' (right=' + str(right[0]) + ')'
print output
print '---------------------------'
| [
[
3,
0,
0.5155,
0.9767,
0,
0.66,
0,
538,
0,
18,
0,
0,
0,
0,
37
],
[
14,
1,
0.0388,
0.0078,
1,
0.57,
0,
822,
1,
0,
0,
0,
0,
1,
0
],
[
2,
1,
0.0581,
0.0155,
1,
0.57,
... | [
"class Heap():\n __maxSize = 100\n\n def __init__(self):\n self.items = []\n\n def insert(self, key, data):\n item = [key, data]",
" __maxSize = 100",
" def __init__(self):\n self.items = []",
" self.items = []",
" def insert(self, key, data):\n item = [k... |
# -*- coding: utf-8 *-*
class DBActors():
def __init__(self, filename):
self.filename = filename
self.file = None
self.currentline = None
def open(self):
if self.file is None:
self.file = open(self.filename)
# Read file until the start of actor/actress list
while self.currentline != '---- ------\n':
self.currentline = self.file.readline()
else:
raise Exception('Already opened!')
def close(self):
self.file.close()
self.file = None
def next(self):
self.currentline = self.file.readline()
if self.finished():
return None, None
parts = self.currentline.split('\t')
actor = None
title = None
if self.is_actor_line():
actor = parts[0].strip()
title = self.get_movie_name(parts, 1)
elif self.is_movie_line():
title = self.get_movie_name(parts)
return actor, title
def finished(self):
return self.end_of_db() or self.end_of_list()
def end_of_db(self):
return self.currentline == ''
def end_of_list(self):
return self.currentline.startswith('---------------------------------')
def is_actor_line(self):
first = self.currentline[0]
return first != '\t' and first != '\n'
def is_movie_line(self):
return self.currentline[0] == '\t'
def find_non_empty(self, parts, startat=0):
for index in range(startat, len(parts)):
part = parts[index]
if part != '':
return part
def get_movie_name(self, parts, startat=0):
title = self.find_non_empty(parts, startat)
characterStart = title.find('[')
creditStart = title.find('<')
if characterStart > 0:
title = title[:characterStart]
elif creditStart > 0:
title = title[:creditStart]
return title.strip()
| [
[
3,
0,
0.5253,
0.962,
0,
0.66,
0,
402,
0,
11,
0,
0,
0,
0,
21
],
[
2,
1,
0.0949,
0.0506,
1,
0.27,
0,
555,
0,
2,
0,
0,
0,
0,
0
],
[
14,
2,
0.0886,
0.0127,
2,
0.67,
... | [
"class DBActors():\n\n def __init__(self, filename):\n self.filename = filename\n self.file = None\n self.currentline = None\n\n def open(self):",
" def __init__(self, filename):\n self.filename = filename\n self.file = None\n self.currentline = None",
" ... |
# -*- coding: utf-8 *-*
from structures.hashtable import HashTable
class Actor():
def __init__(self, name):
self.name = name
self.__titlesHash = HashTable()
self.titles = []
def add_title(self, title):
if self.__titlesHash[title] is None:
self.__titlesHash[title] = True
self.titles.append(title)
def to_string(self):
output = self.name + ':'
for title in self.titles:
output += ' ' + title + ';'
return output
| [
[
1,
0,
0.0909,
0.0455,
0,
0.66,
0,
112,
0,
1,
0,
0,
112,
0,
0
],
[
3,
0,
0.6136,
0.8182,
0,
0.66,
1,
985,
0,
3,
0,
0,
0,
0,
2
],
[
2,
1,
0.3864,
0.1818,
1,
0.34,
... | [
"from structures.hashtable import HashTable",
"class Actor():\n\n def __init__(self, name):\n self.name = name\n self.__titlesHash = HashTable()\n self.titles = []\n\n def add_title(self, title):",
" def __init__(self, name):\n self.name = name\n self.__titlesHash = H... |
# -*- coding: utf-8 *-*
from structures.hashtable import HashTable
class Movie():
def __init__(self, title):
self.title = title
self.actors = HashTable()
def add_actor(self, actor):
if self.actors[actor] is None:
self.actors[actor] = True
| [
[
1,
0,
0.1538,
0.0769,
0,
0.66,
0,
112,
0,
1,
0,
0,
112,
0,
0
],
[
3,
0,
0.6923,
0.6923,
0,
0.66,
1,
945,
0,
2,
0,
0,
0,
0,
1
],
[
2,
1,
0.6154,
0.2308,
1,
0.88,
... | [
"from structures.hashtable import HashTable",
"class Movie():\n\n def __init__(self, title):\n self.title = title\n self.actors = HashTable()\n\n def add_actor(self, actor):\n if self.actors[actor] is None:",
" def __init__(self, title):\n self.title = title\n self.ac... |
# -*- coding: utf-8 *-*
from graphs.listgraph import *
def add_edge(g, src, dest):
g.add_edge(src, dest, 1)
g.add_edge(dest, src, 1)
def generate_test1():
graph = ListGraph()
graph.add_vertex('A')
graph.add_vertex('B')
graph.add_vertex('C')
graph.add_vertex('D')
graph.add_vertex('E')
graph.add_vertex('F')
graph.add_vertex('G')
add_edge(graph, 'A', 'B')
add_edge(graph, 'A', 'C')
add_edge(graph, 'A', 'D')
add_edge(graph, 'B', 'E')
add_edge(graph, 'C', 'F')
add_edge(graph, 'F', 'G')
graph.save('inputs/test1.txt')
def generate_test2():
graph = ListGraph()
graph.add_vertex('A')
graph.add_vertex('B')
graph.add_vertex('C')
graph.add_vertex('D')
add_edge(graph, 'A', 'B')
add_edge(graph, 'A', 'C')
add_edge(graph, 'A', 'D')
add_edge(graph, 'B', 'C')
add_edge(graph, 'B', 'D')
add_edge(graph, 'C', 'D')
graph.save('inputs/test2.txt')
def generate_test3():
graph = ListGraph()
graph.add_vertex('A')
graph.add_vertex('B')
graph.add_vertex('C')
graph.add_vertex('D')
add_edge(graph, 'B', 'C')
add_edge(graph, 'B', 'D')
add_edge(graph, 'C', 'D')
graph.save('inputs/test3.txt')
def generate_test4():
graph = ListGraph()
graph.add_vertex('A')
graph.add_vertex('B')
graph.add_vertex('C')
graph.add_vertex('D')
add_edge(graph, 'A', 'B')
add_edge(graph, 'B', 'C')
add_edge(graph, 'C', 'D')
graph.save('inputs/test4.txt')
generate_test1()
generate_test2()
generate_test3()
generate_test4()
| [
[
1,
0,
0.0244,
0.0122,
0,
0.66,
0,
217,
0,
1,
0,
0,
217,
0,
0
],
[
2,
0,
0.0732,
0.0366,
0,
0.66,
0.1111,
76,
0,
3,
0,
0,
0,
0,
2
],
[
8,
1,
0.0732,
0.0122,
1,
0.4... | [
"from graphs.listgraph import *",
"def add_edge(g, src, dest):\n g.add_edge(src, dest, 1)\n g.add_edge(dest, src, 1)",
" g.add_edge(src, dest, 1)",
" g.add_edge(dest, src, 1)",
"def generate_test1():\n\n graph = ListGraph()\n\n graph.add_vertex('A')\n graph.add_vertex('B')\n graph.ad... |
# -*- coding: utf-8 *-*
import sys
import unittest
from tests.dijkstra import *
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.25,
0.125,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.375,
0.125,
0,
0.66,
0.3333,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.625,
0.125,
0,
0.66,
... | [
"import sys",
"import unittest",
"from tests.dijkstra import *",
"if __name__ == '__main__':\n unittest.main()",
" unittest.main()"
] |
# -*- coding: utf-8 *-*
import unittest
from tests_algorithms import *
from tests_graphs import *
from tests_structures import *
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.2222,
0.1111,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.4444,
0.1111,
0,
0.66,
0.25,
222,
0,
1,
0,
0,
222,
0,
0
],
[
1,
0,
0.5556,
0.1111,
0,
0.66... | [
"import unittest",
"from tests_algorithms import *",
"from tests_graphs import *",
"from tests_structures import *",
"if __name__ == '__main__':\n unittest.main()",
" unittest.main()"
] |
# -*- coding: utf-8 *-*
from graphs.matrixgraph import *
from graphs.listgraph import *
from structures.hashtable import HashTable
from imdb.dbactors import DBActors
# Total
# Around 1450000 actors
# Around 819000 actresses
# RAM: 524.8 MB
class Main():
def __init__(self):
pass
def main(self):
filenames = ['C:/Juan/imdb/actors.list', 'C:/Juan/imdb/actresses.list']
graphfilename = 'C:/Juan/imdb/imdb.graph.txt'
# self.generate_graph(filenames, graphfilename)
self.query_graph(graphfilename)
def query_graph(self, graphfilename):
print 'Loading graph', graphfilename, '...'
graph = ListGraph(3000000)
graph.load(graphfilename)
input = None
while input != 'quit':
print '------------------------------------------------'
input = raw_input("Enter action ['query' or 'quit']: ")
print '------------------------------------------------'
if input == 'query':
actor = raw_input("Enter actors name: ")
distance = raw_input("Enter minimum distance: ")
d = int(distance)
try:
print 'Processing, please wait...'
actors = graph.run_dijkstra(actor, d)
result = 'There are ' + str(actors.count()) + ' actors'
result += ' farther than ' + distance
result += ' from ' + actor
print result
except Exception:
print 'Invalid actors.'
print 'Thank you, come back soon!'
def generate_graph(self, filenames, outputfilename):
print 'Generating graph...'
graph = ListGraph(3000000)
movies = HashTable(3000000)
for filename in filenames:
print "Processing ", filename
dbactors = DBActors(filename)
dbactors.open()
currentactor = None
num = 0
while not dbactors.finished():
# Get actor and movie from line
actor, title = dbactors.next()
if actor is not None:
indexcurrentactor = graph.vertex_count()
graph.add_vertex(actor)
# Print counter and verify if maximum reached
num += 1
if num % 1000 == 0:
print num
if num > 1000 * 100:
break
if title is not None:
movie = movies[title]
if movie is None:
movie = HashTable()
movies[title] = movie
if indexcurrentactor is not None:
movie[indexcurrentactor] = None
else:
raise Exception('Oh no! Current actor is null!')
dbactors.close()
for movie in movies.get_values():
actors = movie.get_keys()
for indexsrc in actors:
for indexdest in actors:
if indexsrc != indexdest:
src = graph.get_vertex(indexsrc)
dest = graph.get_vertex(indexdest)
graph.add_edge(src, dest, 1)
graph.save(outputfilename)
def generate_list(self, filename):
actors = open(filename + '.txt', 'w+')
dbactors = DBActors(filename)
dbactors.open()
while not dbactors.finished():
actor, title = dbactors.next()
if actor is not None:
actors.write('\n')
actors.write(actor + ': ')
if title is not None:
actors.write(title + '; ')
actors.close()
def preview(self, filename, numlines=1000):
file = open(filename)
for i in range(numlines):
print file.readline()
file.close()
if __name__ == '__main__':
main = Main()
main.main()
| [
[
1,
0,
0.015,
0.0075,
0,
0.66,
0,
941,
0,
1,
0,
0,
941,
0,
0
],
[
1,
0,
0.0226,
0.0075,
0,
0.66,
0.2,
217,
0,
1,
0,
0,
217,
0,
0
],
[
1,
0,
0.0301,
0.0075,
0,
0.66... | [
"from graphs.matrixgraph import *",
"from graphs.listgraph import *",
"from structures.hashtable import HashTable",
"from imdb.dbactors import DBActors",
"class Main():\n\n def __init__(self):\n pass\n\n def main(self):\n filenames = ['C:/Juan/imdb/actors.list', 'C:/Juan/imdb/actresses.l... |
# -*- coding: utf-8 -*-
import unittest
from structures.unionfind import UnionFind
class UnionFindTest(unittest.TestCase):
def test_create_unionfind(self):
unionfind = UnionFind(['V1', 'V2'])
self.assertEqual(2, unionfind.count())
def test_create_unionfind_union_check_count(self):
unionfind = UnionFind(['V1', 'V2'])
self.assertEqual(2, unionfind.count())
unionfind.union('V1', 'V2')
self.assertEqual(1, unionfind.count())
def test_build_example_from_book_check_count(self):
items = 'ijstuvwxyz'
unionfind = UnionFind(items)
self.assertEqual(len(items), unionfind.count())
unionfind.union(unionfind.find('w'), unionfind.find('u'))
unionfind.union(unionfind.find('s'), unionfind.find('u'))
unionfind.union(unionfind.find('t'), unionfind.find('v'))
unionfind.union(unionfind.find('z'), unionfind.find('v'))
unionfind.union(unionfind.find('i'), unionfind.find('x'))
unionfind.union(unionfind.find('y'), unionfind.find('j'))
unionfind.union(unionfind.find('x'), unionfind.find('j'))
self.assertEqual(3, unionfind.count())
unionfind.union(unionfind.find('u'), unionfind.find('v'))
self.assertEqual(2, unionfind.count())
| [
[
1,
0,
0.0488,
0.0244,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0732,
0.0244,
0,
0.66,
0.5,
696,
0,
1,
0,
0,
696,
0,
0
],
[
3,
0,
0.5732,
0.878,
0,
0.66,
... | [
"import unittest",
"from structures.unionfind import UnionFind",
"class UnionFindTest(unittest.TestCase):\n\n def test_create_unionfind(self):\n unionfind = UnionFind(['V1', 'V2'])\n\n self.assertEqual(2, unionfind.count())\n\n def test_create_unionfind_union_check_count(self):",
" def ... |
# -*- coding: utf-8 -*-
import unittest
from graphs.matrixgraph import MatrixGraph
class MatrixGraphTest(unittest.TestCase):
def setUp(self):
self.graph = MatrixGraph()
def test_add_two_vertices(self):
self.graph.add_vertex('V1')
self.graph.add_vertex('V2')
self.assertEqual(2, self.graph.vertex_count())
def test_add_fifteen_vertices(self):
n = 15
for i in range(n):
name = 'V' + str(i)
self.graph.add_vertex(name)
self.assertEqual(n, self.graph.vertex_count())
def test_add_vertices_add_edge_check_edge(self):
n = 15
for i in range(n):
name = 'V' + str(i)
self.graph.add_vertex(name)
self.graph.add_edge('V4', 'V5', 1)
weight = self.graph.get_edge('V4', 'V5')
self.assertEqual(1, weight)
def test_load(self):
self.graph.load('inputs/test1.txt')
self.assertEqual(7, self.graph.vertex_count())
self.assertEqual(1, self.graph.get_edge('A', 'B'))
self.assertEqual(1, self.graph.get_edge('A', 'C'))
self.assertEqual(1, self.graph.get_edge('A', 'D'))
self.assertEqual(1, self.graph.get_edge('B', 'A'))
self.assertEqual(1, self.graph.get_edge('B', 'E'))
self.assertEqual(1, self.graph.get_edge('C', 'A'))
self.assertEqual(1, self.graph.get_edge('C', 'F'))
self.assertEqual(1, self.graph.get_edge('D', 'A'))
self.assertEqual(1, self.graph.get_edge('E', 'B'))
| [
[
1,
0,
0.037,
0.0185,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0556,
0.0185,
0,
0.66,
0.5,
941,
0,
1,
0,
0,
941,
0,
0
],
[
3,
0,
0.5556,
0.9074,
0,
0.66,
... | [
"import unittest",
"from graphs.matrixgraph import MatrixGraph",
"class MatrixGraphTest(unittest.TestCase):\n\n def setUp(self):\n self.graph = MatrixGraph()\n\n def test_add_two_vertices(self):\n self.graph.add_vertex('V1')\n self.graph.add_vertex('V2')",
" def setUp(self):\n ... |
# -*- coding: utf-8 -*-
import unittest
from structures.hashtable import HashTable
class HashTableTest(unittest.TestCase):
def test_add_and_retrieve_item(self):
hash = HashTable()
key = "one"
hash.set(key, 1)
self.assertEqual(1, hash.get(key))
def test_add_and_retrieve_two_items(self):
pairs = [["one", 1], ["two", 2]]
hash = HashTable()
for pair in pairs:
key = pair[0]
data = pair[1]
hash.set(key, data)
count = 1
for pair in pairs:
key = pair[0]
self.assertEqual(count, hash.get(key))
count += 1
def test_add_and_retrieve_fifteen_items(self):
pairs = [["one", 1], ["two", 2], ["three", 3],
["four", 4], ["five", 5], ["six", 6],
["seven", 7], ["eight", 8], ["nine", 9],
["ten", 10], ["eleven", 11], ["twelve", 12],
["thirteen", 13], ["fourteen", 14], ["fifteen", 15]]
hash = HashTable()
for pair in pairs:
key = pair[0]
data = pair[1]
hash.set(key, data)
count = 1
for pair in pairs:
key = pair[0]
self.assertEqual(count, hash.get(key))
count += 1
def test_add_and_retrieve_fifteen_items_similar_keys(self):
hash = HashTable()
n = 100
for i in range(n):
key = 'V' + str(i)
hash.set(key, i)
for i in range(n):
key = 'V' + str(i)
self.assertEqual(i, hash.get(key))
def test_add__fifteen_items_similar_keys_retrieve_different(self):
hash = HashTable()
n = 100
for i in range(n):
key = 'V' + str(i)
hash[key] = i
self.assertEqual(None, hash['does not exist'])
self.assertEqual(None, hash['V101'])
self.assertEqual(None, hash['V1.1'])
def test_add_some_items_and_get_values_list(self):
hash = HashTable()
n = 15
for i in range(n):
key = 'V' + str(i)
hash.set(key, i)
values = hash.get_values()
self.assertEqual(n, len(values))
def test_add_items_to_force_rehashing_and_get_values_list(self):
hash = HashTable()
n = 1000
for i in range(n):
key = 'V' + str(i)
hash[key] = i
values = hash.get_values()
self.assertEqual(n, len(values))
| [
[
1,
0,
0.0222,
0.0111,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0333,
0.0111,
0,
0.66,
0.5,
112,
0,
1,
0,
0,
112,
0,
0
],
[
3,
0,
0.5333,
0.9444,
0,
0.66,... | [
"import unittest",
"from structures.hashtable import HashTable",
"class HashTableTest(unittest.TestCase):\n\n def test_add_and_retrieve_item(self):\n hash = HashTable()\n key = \"one\"\n hash.set(key, 1)\n\n self.assertEqual(1, hash.get(key))",
" def test_add_and_retrieve_ite... |
# -*- coding: utf-8 -*-
import unittest
from structures.list import List
class ListTest(unittest.TestCase):
def test_create_list_check_empty(self):
list = List()
self.assertTrue(list.empty())
def test_create_list_add_element_check_emtpy(self):
list = List()
list.add(1)
self.assertFalse(list.empty())
def test_add_two_items_pop_them_check_values_check_empty(self):
list = List()
list.add(1)
list.add(2)
value = list.pop()
self.assertEqual(2, value)
value = list.pop()
self.assertEqual(1, value)
self.assertTrue(list.empty())
def test_add_items_first_pop_them_check_values_check_empty(self):
list = List()
list.add_first(1)
list.add_first(2)
value = list.pop()
self.assertEqual(1, value)
value = list.pop()
self.assertEqual(2, value)
self.assertTrue(list.empty())
def test_add_items_use_iterator(self):
list = List()
list.add_first(1)
list.add_first(2)
list.add_first(3)
count = 0
for item in list:
count += 1
self.assertEqual(3, count)
| [
[
1,
0,
0.0339,
0.0169,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0508,
0.0169,
0,
0.66,
0.5,
593,
0,
1,
0,
0,
593,
0,
0
],
[
3,
0,
0.5508,
0.9153,
0,
0.66,... | [
"import unittest",
"from structures.list import List",
"class ListTest(unittest.TestCase):\n\n def test_create_list_check_empty(self):\n list = List()\n\n self.assertTrue(list.empty())\n\n def test_create_list_add_element_check_emtpy(self):",
" def test_create_list_check_empty(self):\n ... |
# -*- coding: utf-8 -*-
import unittest
from graphs.listgraph import ListGraph
class ListGraphTest(unittest.TestCase):
def setUp(self):
self.graph = ListGraph()
def test_add_two_vertices(self):
self.graph.add_vertex('V1')
self.graph.add_vertex('V2')
self.assertEqual(2, self.graph.vertex_count())
def test_add_fifteen_vertices(self):
n = 15
for i in range(n):
name = 'V' + str(i)
self.graph.add_vertex(name)
self.assertEqual(n, self.graph.vertex_count())
def test_add_vertices_add_edge_check_edge(self):
n = 15
for i in range(n):
name = 'V' + str(i)
self.graph.add_vertex(name)
self.graph.add_edge('V4', 'V5', 1)
weight = self.graph.get_edge('V4', 'V5')
self.assertEqual(1, weight)
self.assertEqual(1, self.graph.edge_count())
def test_load(self):
self.graph.load('inputs/test1.txt')
self.assertEqual(7, self.graph.vertex_count())
self.assertEqual(1, self.graph.get_edge('A', 'B'))
self.assertEqual(1, self.graph.get_edge('A', 'C'))
self.assertEqual(1, self.graph.get_edge('A', 'D'))
self.assertEqual(1, self.graph.get_edge('B', 'A'))
self.assertEqual(1, self.graph.get_edge('B', 'E'))
self.assertEqual(1, self.graph.get_edge('C', 'A'))
self.assertEqual(1, self.graph.get_edge('C', 'F'))
self.assertEqual(1, self.graph.get_edge('D', 'A'))
self.assertEqual(1, self.graph.get_edge('E', 'B'))
| [
[
1,
0,
0.0364,
0.0182,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0545,
0.0182,
0,
0.66,
0.5,
217,
0,
1,
0,
0,
217,
0,
0
],
[
3,
0,
0.5545,
0.9091,
0,
0.66,... | [
"import unittest",
"from graphs.listgraph import ListGraph",
"class ListGraphTest(unittest.TestCase):\n\n def setUp(self):\n self.graph = ListGraph()\n\n def test_add_two_vertices(self):\n self.graph.add_vertex('V1')\n self.graph.add_vertex('V2')",
" def setUp(self):\n sel... |
# -*- coding: utf-8 *-*
import unittest
from graphs.listgraph import ListGraph
from graphs.matrixgraph import MatrixGraph
class DijkstraTest(unittest.TestCase):
def test_dijkstra_matrix(self):
self.run_test1(MatrixGraph())
self.run_test2(MatrixGraph())
self.run_test3(MatrixGraph())
self.run_test4(MatrixGraph())
def test_dijkstra_list(self):
self.run_test1(ListGraph())
self.run_test2(ListGraph())
self.run_test3(ListGraph())
self.run_test4(ListGraph())
def run_test1(self, graph):
graph.load('inputs/test1.txt')
items = graph.run_dijkstra('A', 1)
self.assertEqual(6, items.count())
items = graph.run_dijkstra('A', 2)
self.assertEqual(3, items.count())
items = graph.run_dijkstra('A', 3)
self.assertEqual(1, items.count())
def run_test2(self, graph):
graph.load('inputs/test2.txt')
items = graph.run_dijkstra('A', 1)
self.assertEqual(3, items.count())
items = graph.run_dijkstra('A', 2)
self.assertEqual(0, items.count())
def run_test3(self, graph):
graph.load('inputs/test3.txt')
items = graph.run_dijkstra('A', 1)
self.assertEqual(3, items.count())
def run_test4(self, graph):
graph.load('inputs/test4.txt')
items = graph.run_dijkstra('A', 1)
self.assertEqual(3, items.count())
items = graph.run_dijkstra('A', 2)
self.assertEqual(2, items.count())
items = graph.run_dijkstra('A', 3)
self.assertEqual(1, items.count())
items = graph.run_dijkstra('A', 4)
self.assertEqual(0, items.count())
| [
[
1,
0,
0.0328,
0.0164,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0492,
0.0164,
0,
0.66,
0.3333,
217,
0,
1,
0,
0,
217,
0,
0
],
[
1,
0,
0.0656,
0.0164,
0,
0.... | [
"import unittest",
"from graphs.listgraph import ListGraph",
"from graphs.matrixgraph import MatrixGraph",
"class DijkstraTest(unittest.TestCase):\n\n def test_dijkstra_matrix(self):\n self.run_test1(MatrixGraph())\n self.run_test2(MatrixGraph())\n self.run_test3(MatrixGraph())\n ... |
# -*- coding: utf-8 -*-
import unittest
from structures.heap import Heap
class HeapTest(unittest.TestCase):
def test_add_n_elements_verify_order(self):
heap = Heap()
n = 65
#Insert elements in reverse order
for i in range(n):
heap.insert(n - i, n - i)
#Then verify they are extracted from min to max
min = None
while not heap.empty():
newmin = heap.extract_min()
if min is not None:
self.assertLessEqual(min, newmin)
min = newmin
def test_add_3_elements_change_key_verify_heap_order(self):
heap = Heap()
n = 3
#Insert elements in reverse order
for i in range(n):
heap.insert(n - i, n - i)
heap.change_data(0, 4)
heap.change_key(0, 4)
#Then verify they are extracted from min to max
min = None
while not heap.empty():
newmin = heap.extract_min()
if min is not None:
self.assertLessEqual(min, newmin)
min = newmin
| [
[
1,
0,
0.0513,
0.0256,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0769,
0.0256,
0,
0.66,
0.5,
909,
0,
1,
0,
0,
909,
0,
0
],
[
3,
0,
0.5769,
0.8718,
0,
0.66,... | [
"import unittest",
"from structures.heap import Heap",
"class HeapTest(unittest.TestCase):\n\n def test_add_n_elements_verify_order(self):\n heap = Heap()\n n = 65\n #Insert elements in reverse order\n for i in range(n):\n heap.insert(n - i, n - i)",
" def test_add... |
# -*- coding: utf-8 -*-
from structures.hashtable import HashTable
from structures.list import List
from structures.heap import Heap
from structures.unionfind import UnionFind
from graphs.graph import *
class MatrixGraph(Graph):
def __init__(self):
self.__adjacency = []
self.__vertices = HashTable()
def add_vertex(self, name):
self.__vertices[name] = self.__length()
self.__adjacency.append([None] * self.__length())
for i in range(self.__length()):
self.__adjacency[i].append(None)
def add_edge(self, source, dest, weight):
edge = [source, dest, weight]
indexsource = self.__vertices[source]
indexdest = self.__vertices[dest]
if indexsource is not None and indexdest is not None:
self.__adjacency[indexsource][indexdest] = edge
else:
name = source
if indexdest is None:
name = dest
raise Exception("Vertex '" + name + "' doesn't exist")
def vertex_count(self):
return self.__length()
def edge_count(self):
count = 0
length = self.__length() - 1
for i in range(length):
for j in range(length):
if self.__adjacency[i][j] is not None:
count += 1
return count
def get_edge(self, source, dest):
indexsource = self.__vertices[source]
indexdest = self.__vertices[dest]
return self.__adjacency[indexsource][indexdest][2]
def __length(self):
return len(self.__adjacency)
def run_dijkstra(self, source, distance):
if self.__vertices[source] is None:
raise Exception('Source vertex does not exist!')
# Stores the name of the previous node for the key
previous = HashTable()
# Stores the calculated cost so far for each node
# from the source
cost = HashTable()
# Prority Queue
heap = Heap()
# Represents the value infinity
infinity = float("inf")
# Initialize all costs to infinity
for name in self.__vertices.get_keys():
cost[name] = infinity
# Set cost to source node to zero
# and add to heap
cost[source] = 0
heap.insert(cost[source], source)
# Count how many nodes have been added to S so far
count = 0
# While there are nodes missing
# and the heap is not empty (this condition is added
# in case the graph is not connected)
while count < self.vertex_count() and not heap.empty():
name = heap.extract_min()
count += 1
# Get index for vertex in matrix
index = self.__vertices[name]
# Look for adjacent nodes
for i in range(self.__length()):
edge = self.__adjacency[index][i]
if edge is not None:
# Calculate new cost
weight = edge[2]
dest = edge[1]
newcost = cost[name] + weight
# Update cost if it's smaller and is not in S
if newcost < cost[dest]:
cost[dest] = newcost
previous[dest] = name
heap.insert(newcost, dest)
# Get all vertex that are at least distance
# from source
actors = List()
for name in self.__vertices.get_keys():
if distance <= cost[name]:
actors.add(name)
return actors
def to_string(self):
output = ''
for i in range(self.__length()):
output += self.__vertices.get_keys()[i] + ': '
for j in range(self.__length()):
edge = self.__adjacency[i][j]
destination = edge[1]
weight = edge[2]
if edge is not None:
output += '(' + destination + ', '
output += str(weight) + ') '
output += '\n'
return output
def load(self, filename):
file = open(filename)
adj = HashTable()
for line in file:
pair = line.split(Graph.NAME_SEPARATOR)
name = pair[0].strip()
adjlist = pair[1].strip()
self.add_vertex(name)
adj[name] = adjlist
for name in self.__vertices.get_keys():
list = adj[name]
edges = list.split(Graph.ADJ_LIST_SEPARATOR)
for edge in edges:
edge = edge.strip()
if len(edge) > 0:
words = edge.split(Graph.WEIGHT_SEPARATOR)
destIndex = int(words[0].strip())
dest = self.__vertices.get_key(destIndex)
weight = None
weight = int(words[1])
self.add_edge(name, dest, weight)
file.close()
| [
[
1,
0,
0.0137,
0.0068,
0,
0.66,
0,
112,
0,
1,
0,
0,
112,
0,
0
],
[
1,
0,
0.0205,
0.0068,
0,
0.66,
0.2,
593,
0,
1,
0,
0,
593,
0,
0
],
[
1,
0,
0.0274,
0.0068,
0,
0.6... | [
"from structures.hashtable import HashTable",
"from structures.list import List",
"from structures.heap import Heap",
"from structures.unionfind import UnionFind",
"from graphs.graph import *",
"class MatrixGraph(Graph):\n\n def __init__(self):\n self.__adjacency = []\n self.__vertices = ... |
# -*- coding: utf-8 -*-
from structures.list import List
from structures.heap import Heap
from structures.unionfind import UnionFind
from graphs.graph import *
class ListGraph(Graph):
def __init__(self, size=None):
self.__vertices = HashTable(size)
def add_vertex(self, name):
self.__vertices[name] = List()
def add_edge(self, src, dest, weight=None):
adjacents = self.__vertices[src]
exists = False
for edge in adjacents:
edgeDest = self.__vertices.get_key(edge[0])
if dest == edgeDest:
exists = True
break
if not exists:
destIndex = self.__vertices.get_key_index(dest)
edge = [destIndex, weight]
adjacents.add(edge)
def get_edge(self, src, dest):
adjacents = self.__vertices[src]
for edge in adjacents:
edgeDest = self.__vertices.get_key(edge[0])
if edgeDest == dest:
return edge[1]
return None
def vertex_count(self):
return self.__vertices.count()
def edge_count(self):
count = 0
for name in self.__vertices.get_keys():
adjacents = self.__vertices[name]
count += adjacents.count()
return count
def get_vertex(self, index):
return self.__vertices.get_key(index)
def run_dijkstra(self, source, distance):
if self.__vertices[source] is None:
raise Exception('Source vertex does not exist!')
# Stores the name of the previous node for the key
previous = HashTable()
# Stores the calculated distance so far for each node
# from the source
cost = HashTable()
# Prority Queue
heap = Heap()
# Represents the value infinity
infinity = float("inf")
# Initialize all costs to infinity
for name in self.__vertices.get_keys():
cost[name] = infinity
# Set the cost to source node to zero
# and add to heap
cost[source] = 0
heap.insert(cost[source], source)
# Count how many nodes have been added to S so far
count = 0
# While there are nodes missing
# and the heap is not empty (this condition is added
# in case the graph is not connected)
while count < self.vertex_count() and not heap.empty():
name = heap.extract_min()
count += 1
# Get vertex
adjacents = self.__vertices[name]
# Look for adjacent nodes
for edge in adjacents:
# Calculate new cost
dest = self.__vertices.get_key(edge[0])
weight = edge[1]
newcost = cost[name] + weight
# Update cost if it's smaller
if newcost < cost[dest]:
cost[dest] = newcost
previous[dest] = name
heap.insert(newcost, dest)
# Get all vertex that are at least distance
# from source
actors = List()
for name in self.__vertices.get_keys():
if distance <= cost[name]:
actors.add(name)
return actors
def to_string(self):
output = ''
for name in self.__vertices.get_keys():
adjacents = self.__vertices[name]
output += name + ':'
if adjacents.count() > 0:
for edge in adjacents:
output += '(' + edge[0] + ', '
output += str(edge[1]) + ') '
output += '\n'
return output
def save(self, filename):
file = open(filename, 'w')
for name in self.__vertices.get_keys():
adjacents = self.__vertices[name]
file.write(name + Graph.NAME_SEPARATOR)
if adjacents.count() > 0:
count = 0
for edge in adjacents:
destinationIndex = edge[0]
weight = edge[1]
if count != 0:
file.write(Graph.ADJ_LIST_SEPARATOR)
file.write(str(destinationIndex) + Graph.WEIGHT_SEPARATOR)
file.write(str(weight))
count += 1
file.write('\n')
file.close()
def load(self, filename):
file = open(filename)
for line in file:
pair = line.split(Graph.NAME_SEPARATOR)
name = pair[0].strip()
list = pair[1].strip()
self.add_vertex(name)
edges = list.split(Graph.ADJ_LIST_SEPARATOR)
for edge in edges:
edge = edge.strip()
if len(edge) > 0:
words = edge.split(Graph.WEIGHT_SEPARATOR)
index = int(words[0].strip())
weight = int(words[1])
self.__vertices[name].add([index, weight])
file.close()
| [
[
1,
0,
0.0131,
0.0065,
0,
0.66,
0,
593,
0,
1,
0,
0,
593,
0,
0
],
[
1,
0,
0.0196,
0.0065,
0,
0.66,
0.25,
909,
0,
1,
0,
0,
909,
0,
0
],
[
1,
0,
0.0261,
0.0065,
0,
0.... | [
"from structures.list import List",
"from structures.heap import Heap",
"from structures.unionfind import UnionFind",
"from graphs.graph import *",
"class ListGraph(Graph):\n\n def __init__(self, size=None):\n self.__vertices = HashTable(size)\n\n def add_vertex(self, name):\n self.__ver... |
# -*- coding: utf-8 -*-
from graphs.matrixgraph import MatrixGraph
from graphs.graph import Edge
import random
import math
class Generator():
def __init__(self):
pass
def generate(self, vcount, factor, filename):
if factor > 1:
raise Exception('Invalid density factor.')
maxedges = (vcount - 1) * vcount
ecount = int(maxedges * factor)
if ecount < vcount:
ecount = vcount
# if ecount > 10000:
# ecount = 10000
# print 'Going for', ecount, 'edges.'
graph = MatrixGraph()
for i in range(vcount):
name = 'V' + str(i)
graph.add_vertex(name)
#self.random_generation(graph, vcount, ecount)
self.secuential_generation(graph, vcount, ecount)
graph.save(filename)
def random_generation(self, graph, vcount, ecount):
for i in range(ecount):
while True:
edge = self.random_edge(vcount)
if graph.get_edge(edge.source, edge.destination) is None:
graph.add_edge(edge.source, edge.destination, edge.weight)
break
else:
print 'This edge already exists. Try again.'
print edge.source, edge.destination, graph.edge_count()
def secuential_generation(self, graph, vcount, ecount):
edgespervertex = int(math.ceil(ecount / vcount))
vdelta = int(vcount / edgespervertex)
for src in range(vcount):
for dest in range(src + 1, vcount + src + 1, vdelta):
if dest >= vcount:
dest = dest - vcount
if src != dest:
weight = random.randint(0, 10)
source = 'V' + str(src)
destination = 'V' + str(dest)
#print source, '-', destination, ':', weight
graph.add_edge(source, destination, weight)
def random_edge(self, vcount, src=None):
if src is None:
src = random.randint(0, vcount - 1)
dest = src
while dest == src:
dest = random.randint(0, vcount - 1)
weight = random.randint(0, 10)
src = 'V' + str(src)
dest = 'V' + str(dest)
return Edge(src, dest, weight)
| [
[
1,
0,
0.027,
0.0135,
0,
0.66,
0,
941,
0,
1,
0,
0,
941,
0,
0
],
[
1,
0,
0.0405,
0.0135,
0,
0.66,
0.25,
628,
0,
1,
0,
0,
628,
0,
0
],
[
1,
0,
0.0541,
0.0135,
0,
0.6... | [
"from graphs.matrixgraph import MatrixGraph",
"from graphs.graph import Edge",
"import random",
"import math",
"class Generator():\n\n def __init__(self):\n pass\n\n def generate(self, vcount, factor, filename):\n if factor > 1:\n raise Exception('Invalid density factor.')",
... |
# -*- coding: utf-8 -*-
from structures.hashtable import HashTable
class Graph():
NAME_SEPARATOR = '->'
ADJ_LIST_SEPARATOR = '||'
WEIGHT_SEPARATOR = ';'
def __init__(self):
pass
def load(self, filename):
pass
def save(self, filename):
raise Exception('save() not implemented.')
def show(self):
print self.to_string()
def add_vertex(self, name):
pass
def add_edge(self, source, destination, weight):
pass
def vertex_count(self):
pass
def edge_count(self):
pass
def run_kruskal(self):
pass
def run_prim(self):
pass
def run_dijkstra(self, source, destination):
pass
| [
[
1,
0,
0.0476,
0.0238,
0,
0.66,
0,
112,
0,
1,
0,
0,
112,
0,
0
],
[
3,
0,
0.5595,
0.9048,
0,
0.66,
1,
90,
0,
11,
0,
0,
0,
0,
3
],
[
14,
1,
0.1667,
0.0238,
1,
0.07,
... | [
"from structures.hashtable import HashTable",
"class Graph():\n\n NAME_SEPARATOR = '->'\n ADJ_LIST_SEPARATOR = '||'\n WEIGHT_SEPARATOR = ';'\n\n def __init__(self):\n pass",
" NAME_SEPARATOR = '->'",
" ADJ_LIST_SEPARATOR = '||'",
" WEIGHT_SEPARATOR = ';'",
" def __init__(sel... |
# -*- coding: utf-8 -*-
import sys
import unittest
from tests.matrixgraph import *
from tests.listgraph import *
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.2222,
0.1111,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3333,
0.1111,
0,
0.66,
0.25,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.5556,
0.1111,
0,
0.66... | [
"import sys",
"import unittest",
"from tests.matrixgraph import *",
"from tests.listgraph import *",
"if __name__ == '__main__':\n unittest.main()",
" unittest.main()"
] |
# -*- coding: utf-8 -*-
import sys
import unittest
from tests.hashtable import *
from tests.heap import *
from tests.unionfind import *
from tests.list import *
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.1818,
0.0909,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.2727,
0.0909,
0,
0.66,
0.1667,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.4545,
0.0909,
0,
0.... | [
"import sys",
"import unittest",
"from tests.hashtable import *",
"from tests.heap import *",
"from tests.unionfind import *",
"from tests.list import *",
"if __name__ == '__main__':\n unittest.main()",
" unittest.main()"
] |
# -*- coding: utf-8 -*-
from structures.hashtable import HashTable
class UnionFind():
def __init__(self, items):
self.sets = HashTable()
for item in items:
node = UnionFindNode(item)
self.sets.set(item, node)
self.__count = len(items)
def find(self, item):
node = self.sets.get(item)
while node.set is not None:
node = node.set
return node.item
def union(self, item1, item2):
if item1 != item2:
node1 = self.sets.get(item1)
node2 = self.sets.get(item2)
if node1.size < node2.size:
node1.set = node2
node2.size += node1.size
else:
node2.set = node1
node1.size += node2.size
self.__count -= 1
def count(self):
return self.__count
class UnionFindNode():
def __init__(self, item):
self.item = item
self.set = None
self.size = 1
| [
[
1,
0,
0.0476,
0.0238,
0,
0.66,
0,
112,
0,
1,
0,
0,
112,
0,
0
],
[
3,
0,
0.4643,
0.7143,
0,
0.66,
0.5,
845,
0,
4,
0,
0,
0,
0,
7
],
[
2,
1,
0.2262,
0.1429,
1,
0.29,... | [
"from structures.hashtable import HashTable",
"class UnionFind():\n\n def __init__(self, items):\n self.sets = HashTable()\n for item in items:\n node = UnionFindNode(item)\n self.sets.set(item, node)\n self.__count = len(items)",
" def __init__(self, items):\n ... |
# -*- coding: utf-8 -*-
class HashTable():
__initial_size = 32
def __init__(self, size=None):
self.__size = size
if size is None:
self.__size = HashTable.__initial_size
self.items = [None] * self.__size
self.__keys = []
def count(self):
return len(self.__keys)
def set(self, key, value):
keyIndex = self.count()
self.__keys.append(key)
if self.count() > 0.7 * self.__size:
self.__rehash()
index = self.__get_index(key)
item = self.items[index]
if item is None:
self.items[index] = [keyIndex, value]
elif self.__keys[item[0]] == key:
item[1] = value
def get(self, key):
index = self.__get_index(key)
value = None
if index is not None and self.items[index] is not None:
value = self.items[index][1]
return value
def get_values(self):
values = []
for item in self.items:
if item is not None:
values.append(item[1])
return values
def get_keys(self):
return self.__keys
def get_key(self, index):
return self.__keys[index]
def get_key_index(self, key):
index = self.__get_index(key)
item = self.items[index]
return item[0]
def __get_index(self, key):
"Used linear probing."
index = self.__primary_hash(key) % self.__size
if self.__collision(key, index):
index = self.__secondary_hash(key) % self.__size
pos = 0
while self.__collision(key, index):
index += 1
index %= self.__size
pos += 1
return index
def __collision(self, key, index):
item = self.items[index]
return (item is not None and self.__keys[item[0]] != key)
def __get_hash(self, key, pos=0):
hash = self.__primary_hash(key)
hash += pos * self.__secondary_hash(key)
return hash
def __primary_hash(self, key):
"Used FNV hash function"
key = str(key)
h = 2166136261
for letter in key:
h = (h * 16777619) ^ ord(letter)
return h
def __secondary_hash(self, key):
"Shift-Add-XOR hash function"
key = str(key)
h = 0
for letter in key:
h ^= (h << 5) + (h >> 2) + ord(letter)
return h
def __rehash(self):
max_capacity = self.__size
occupied = self.count() * 100 / self.__size
# print self.count(), 'items of total capacity', max_capacity
# print 'percentage:', occupied
olditems = self.items
factor = 2
if self.count() < 50000:
factor = 4
self.__size = int(factor * self.__size)
self.items = [None] * self.__size
# print 'rehashing to', self.__size, 'buckets'
for item in olditems:
if item is not None:
itemKey = self.__keys[item[0]]
index = self.__get_index(itemKey)
self.items[index] = item
# print 'finished rehashing'
# print '--------------------------'
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.set(key, value)
| [
[
3,
0,
0.5149,
0.9776,
0,
0.66,
0,
631,
0,
16,
0,
0,
0,
0,
25
],
[
14,
1,
0.0373,
0.0075,
1,
0.12,
0,
323,
1,
0,
0,
0,
0,
1,
0
],
[
2,
1,
0.0784,
0.0597,
1,
0.12,
... | [
"class HashTable():\n __initial_size = 32\n\n def __init__(self, size=None):\n self.__size = size\n\n if size is None:\n self.__size = HashTable.__initial_size",
" __initial_size = 32",
" def __init__(self, size=None):\n self.__size = size\n\n if size is None:\... |
# -*- coding: utf-8 -*-
class List():
def __init__(self):
self.__begin = None
self.__end = None
self.__current = None
self.__size = 0
def empty(self):
return self.__size == 0
def pop(self):
return self.pop_last()
def pop_last(self):
item = None
if self.__end is not None:
self.__size -= 1
item = self.__end[0]
if self.__end[1] is not None:
prev = self.__end[1]
prev[2] = None
self.__end = prev
elif self.__begin is None:
self.__end = None
self.__begin = None
return item
def pop_first(self):
item = None
if self.__begin is not None:
self.__size -= 1
item = self.__begin[0]
if self.__begin[2] is not None:
next = self.__begin[2]
next[1] = None
self.__begin = next
else:
self.__end = None
self.__begin = None
return item
def add(self, item):
self.add_last(item)
def add_first(self, item):
self.__size += 1
node = [item, None, None]
if self.__begin is None:
self.__begin = node
self.__end = node
else:
node[2] = self.__begin
node[1] = None
self.__begin[1] = node
self.__begin = node
def add_last(self, item):
self.__size += 1
node = [item, None, None]
if self.__end is None:
self.__begin = node
self.__end = node
else:
node[1] = self.__end
node[2] = None
self.__end[2] = node
self.__end = node
def get_first(self):
return self.__begin
def get_last(self):
return self.__end
def next(self):
if self.__current is not None:
self.__current = self.__current[2]
return self.__current
def prev(self):
if self.__current is not None:
this.__current = self.__current[1]
return self.__current
def current(self):
return self.__current
def count(self):
return self.__size
def reset(self):
self.__current = self.__begin
def __iter__(self):
return ListIterator(self)
def to_string(self):
output = ''
for item in self:
value = str(item[0])
if value is None:
value = ''
output += value + ';'
return output
#class ListNode:
#
# def __init__(self, item):
# self.item = item
# self.next = None
# self.prev = None
class ListIterator:
def __init__(self, list):
self.list = list
self.list.reset()
def next(self):
node = self.list.current()
item = None
if node is not None:
item = node[0]
else:
raise StopIteration
self.list.next()
return item
def __iter__(self):
return self
| [
[
3,
0,
0.4071,
0.7643,
0,
0.66,
0,
24,
0,
17,
0,
0,
0,
0,
4
],
[
2,
1,
0.0571,
0.0357,
1,
0.35,
0,
555,
0,
1,
0,
0,
0,
0,
0
],
[
14,
2,
0.05,
0.0071,
2,
0.52,
... | [
"class List():\n\n def __init__(self):\n self.__begin = None\n self.__end = None\n self.__current = None\n self.__size = 0",
" def __init__(self):\n self.__begin = None\n self.__end = None\n self.__current = None\n self.__size = 0",
" self.__b... |
# -*- coding: utf-8 -*-
class Heap():
__maxSize = 100
def __init__(self):
self.items = []
def insert(self, key, data):
item = HeapItem(key, data)
self.items.append(item)
index = len(self.items) - 1
self.__heapify_up(index)
def change_key(self, index, key):
self.items[index].key = key
self.__heapify(index)
def change_data(self, index, data):
self.items[index].data = data
self.__heapify(index)
def extract_min(self):
min = self.items[0]
self.__delete(0)
return min.data
def empty(self):
return self.__length() == 0
def __delete(self, index):
item = self.items.pop()
if index < self.__length():
self.items[index] = item
self.__heapify(index)
def __heapify(self, index):
parent = self.__parent(index)
if self.items[index].key < self.items[parent].key:
self.__heapify_up(index)
else:
self.__heapify_down(index)
def __heapify_up(self, index):
if index > 0:
parent = self.__parent(index)
if self.items[index].key < self.items[parent].key:
self.__swap(index, parent)
self.__heapify_up(parent)
def __heapify_down(self, index):
length = self.__length()
left = self.__left(index)
right = self.__right(index)
#Verify if current has children
if left >= length:
return
#Verify if current has right child
elif right < length:
leftChild = self.items[left]
rightChild = self.items[right]
if leftChild.key < rightChild.key:
min = left
else:
min = right
#Then only left child exists
elif left == length - 1:
min = left
else:
raise Exception('There something wrong!')
item = self.items[index]
child = self.items[min]
if child.key < item.key:
self.__swap(index, min)
self.__heapify_down(min)
def __swap(self, i, j):
"Swaps nodes in the heap"
temp = self.items[i]
self.items[i] = self.items[j]
self.items[j] = temp
def __length(self):
"Returns the number of elements in the heap"
return len(self.items)
def __item(self, index):
item = None
if index < self.__length():
item = self.items[index]
return item
def __left(self, index):
return (2 * index) + 1
def __right(self, index):
return (2 * index) + 2
def __parent(self, index):
if index == 0:
return 0
elif index % 2 == 0:
return index / 2 - 1
else:
return index / 2
def size(self):
return self.__length()
def show(self):
for index in range(self.__length()):
item = self.__item(index)
left = self.__item(self.__left(index))
right = self.__item(self.__right(index))
output = str(item.key)
if left is not None:
output += ' (left=' + str(left.key) + ')'
if right is not None:
output += ' (right=' + str(right.key) + ')'
print output
print '---------------------------'
class HeapItem():
def __init__(self, key, data):
self.key = key
self.data = data
| [
[
3,
0,
0.489,
0.9265,
0,
0.66,
0,
538,
0,
18,
0,
0,
0,
0,
38
],
[
14,
1,
0.0368,
0.0074,
1,
0.4,
0,
822,
1,
0,
0,
0,
0,
1,
0
],
[
2,
1,
0.0551,
0.0147,
1,
0.4,
... | [
"class Heap():\n __maxSize = 100\n\n def __init__(self):\n self.items = []\n\n def insert(self, key, data):\n item = HeapItem(key, data)",
" __maxSize = 100",
" def __init__(self):\n self.items = []",
" self.items = []",
" def insert(self, key, data):\n i... |
# -*- coding: utf-8 -*-
###############################################################################
# Import Modules
###############################################################################
import sys
import unittest
from backtracking import backtracking
from galeshapley import galeshapley
class TdaTP1(unittest.TestCase):
def getOutput(self, filename):
outputFile = file(filename)
return outputFile.read()
def getGaleShapley(self, filename):
gs = galeshapley.GaleShapley(filename)
gs.match()
return gs.toString()
def getBacktracking(self, filename):
b = backtracking.Backtracking(filename)
b.match()
return b.solution
def testGaleShapleyInput1(self):
result = self.getGaleShapley('input1.txt')
expected = self.getOutput('output1.gs.txt')
self.assertEqual(result, expected)
def testGaleShapleyInput2(self):
result = self.getGaleShapley('input2.txt')
expected = self.getOutput('output2.gs.txt')
self.assertEqual(result, expected)
def testGaleShapleyInput3(self):
result = self.getGaleShapley('input3.txt')
expected = self.getOutput('output3.gs.txt')
self.assertEqual(result, expected)
def testBacktrackingInput1(self):
result = self.getBacktracking('input1.txt')
expected = self.getOutput('output1.bt.txt')
self.assertEqual(result, expected)
def testBacktrackingInput2(self):
result = self.getBacktracking('input2.txt')
expected = self.getOutput('output2.bt.txt')
self.assertEqual(result, expected)
def testBacktrackingInput3(self):
result = self.getBacktracking('input3.txt')
expected = self.getOutput('output3.bt.txt')
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.0847,
0.0169,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.1017,
0.0169,
0,
0.66,
0.2,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.1186,
0.0169,
0,
0.66,... | [
"import sys",
"import unittest",
"from backtracking import backtracking",
"from galeshapley import galeshapley",
"class TdaTP1(unittest.TestCase):\n\n def getOutput(self, filename):\n outputFile = file(filename)\n return outputFile.read()\n\n def getGaleShapley(self, filename):\n ... |
# -*- coding: utf-8 -*-
###############################################################################
# Import Modules
###############################################################################
import sys
from backtracking import backtracking
from galeshapley import galeshapley
def compareResult(filename, resultStr):
outputFile = file(filename)
outputStr = outputFile.read()
if outputStr == resultStr:
print "Sucess! The result matches the file contents for:", filename
else:
print "Fail! The result doesn't match the file contents for:", filename
def errormsg():
print "Error. Script usage:"
print " $ python tdatp1.py [-gs|-bt] [inputFilename] [outputFilename]"
def main():
if len(sys.argv) == 4:
algorithm = sys.argv[1]
inputFilename = sys.argv[2]
outputFilename = sys.argv[3]
resultStr = ''
if algorithm == '-gs':
print "------------------------------------"
print "Gale & Shapley"
print "------------------------------------"
gs = galeshapley.GaleShapley(inputFilename)
gs.match()
resultStr = gs.toString()
elif algorithm == '-bt':
print "------------------------------------"
print "Backtracking"
print "------------------------------------"
b = backtracking.Backtracking(inputFilename)
b.match()
resultStr = b.solution
else:
errormsg()
if resultStr != '':
print resultStr
compareResult(outputFilename, resultStr)
else:
errormsg()
if __name__ == '__main__':
main()
| [
[
1,
0,
0.0877,
0.0175,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.1053,
0.0175,
0,
0.66,
0.1667,
481,
0,
1,
0,
0,
481,
0,
0
],
[
1,
0,
0.1228,
0.0175,
0,
... | [
"import sys",
"from backtracking import backtracking",
"from galeshapley import galeshapley",
"def compareResult(filename, resultStr):\n outputFile = file(filename)\n outputStr = outputFile.read()\n if outputStr == resultStr:\n print(\"Sucess! The result matches the file contents for:\", filen... |
# -*- coding: utf-8 -*-
from collections import deque
from model.person import Person
from model.solution import Solution
class Backtracking:
def __init__(self, filename):
self.men = deque()
self.women = []
self.solution = None
self.__temp = Solution()
self.load_data(filename)
def load_data(self, filename):
"""Loads men and women [O(n)] and their preference lists [O(n)]
O(n^2)"""
file = open(filename)
genre = 'Male'
for line in file:
words = line.split(":")
if line == '\n':
genre = 'Female'
else:
name = words[0]
preferences = words[1]
person = Person(name, preferences)
if genre == 'Male':
self.men.append(person)
else:
self.women.append(person)
file.close()
def match(self):
"""Finds all stable matchings
O(n^n)"""
# If there are still
if len(self.men) != 0:
#Take the first man in the list
m = self.men.pop()
#Check for women without commitment
for w in self.women:
if w.fiancee is None:
#Build pair
pair = [m, w]
m.engage(w)
#Verify that the new pair doesn't create instability
if self.__temp.is_stable(pair):
self.__temp.push(pair)
self.match()
self.__temp.pop()
m.break_up()
# Get the man back in the stack
self.men.append(m)
else:
if self.solution is None:
self.solution = self.__temp.toString()
| [
[
1,
0,
0.0339,
0.0169,
0,
0.66,
0,
193,
0,
1,
0,
0,
193,
0,
0
],
[
1,
0,
0.0508,
0.0169,
0,
0.66,
0.3333,
231,
0,
1,
0,
0,
231,
0,
0
],
[
1,
0,
0.0678,
0.0169,
0,
... | [
"from collections import deque",
"from model.person import Person",
"from model.solution import Solution",
"class Backtracking:\n\n def __init__(self, filename):\n self.men = deque()\n self.women = []\n self.solution = None\n self.__temp = Solution()\n self.load_data(file... |
# -*- coding: utf-8 -*-
from collections import deque
from model.person import Person
from model.solution import Solution
class GaleShapley:
def __init__(self, filename):
self.singles = deque()
self.men = []
self.women = dict()
self.load_data(filename)
def load_data(self, filename):
"""Loads men and women [O(n)] and their preference lists [O(n)]
O(n^2)"""
file = open(filename)
genre = 'Male'
for line in file:
words = line.split(":")
if line == '\n':
genre = 'Female'
else:
name = words[0]
preferences = words[1]
person = Person(name, preferences)
if genre == 'Male':
self.men.append(person)
else:
self.women[name] = person
file.close()
self.singles = deque(self.men)
def match(self):
"""Finds a stable matching in O(n^2)"""
# If there are still single men
while len(self.singles) > 0:
#Take a single man
m = self.singles.pop()
#Get next woman's name in the preference list
name = m.get_prospect()
if name is None:
raise Exception("This shouldn't be happening!")
#Get woman by name
w = self.women[name]
#Check who the woman prefers
if w.prefers(m):
if w.fiancee is not None:
self.singles.appendleft(w.fiancee)
w.break_up()
m.engage(w)
else:
self.singles.appendleft(m)
def toString(self):
str = ''
for m in self.men:
str += "(" + m.name + ", " + m.fiancee.name + ")\n"
return str
| [
[
1,
0,
0.0317,
0.0159,
0,
0.66,
0,
193,
0,
1,
0,
0,
193,
0,
0
],
[
1,
0,
0.0476,
0.0159,
0,
0.66,
0.3333,
231,
0,
1,
0,
0,
231,
0,
0
],
[
1,
0,
0.0635,
0.0159,
0,
... | [
"from collections import deque",
"from model.person import Person",
"from model.solution import Solution",
"class GaleShapley:\n\n def __init__(self, filename):\n self.singles = deque()\n self.men = []\n self.women = dict()\n\n self.load_data(filename)",
" def __init__(self... |
# -*- coding: utf-8 -*-
from collections import deque
class Solution:
def __init__(self):
self.__pairs = deque()
def is_stable(self, pair):
for p in self.__pairs:
m1 = p[0]
w1 = p[1]
m2 = pair[0]
w2 = pair[1]
if ((m1.prefers(w2) and w2.prefers(m1))
or (m2.prefers(w1) and w1.prefers(m2))):
return False
return True
def push(self, pair):
self.__pairs.append(pair)
def pop(self):
pair = self.__pairs.pop()
return pair
def show(self):
print self.toString()
def toString(self):
str = ''
for p in self.__pairs:
str += "(" + p[0].name + ", " + p[1].name + ")\n"
return str
| [
[
1,
0,
0.0833,
0.0278,
0,
0.66,
0,
193,
0,
1,
0,
0,
193,
0,
0
],
[
3,
0,
0.5833,
0.8611,
0,
0.66,
1,
659,
0,
6,
0,
0,
0,
0,
9
],
[
2,
1,
0.2361,
0.0556,
1,
0.84,
... | [
"from collections import deque",
"class Solution:\n\n def __init__(self):\n self.__pairs = deque()\n\n def is_stable(self, pair):\n for p in self.__pairs:\n m1 = p[0]",
" def __init__(self):\n self.__pairs = deque()",
" self.__pairs = deque()",
" def is_sta... |
# -*- coding: utf-8 -*-
from collections import deque
class Person:
def __init__(self, name, preferences=[]):
"""Initializes the preferences hashtable.
O(n)"""
self.prefnames = deque()
self.name = name
self.fiancee = None
self.prefs = dict()
i = 0
for prospectname in preferences.split(","):
prospectname = prospectname.strip()
self.prefs[prospectname] = i
self.prefnames.appendleft(prospectname)
i = i + 1
def prefers(self, p1):
"""Determins wheter this person prefers its current
fiancee or the one specified in the parameter.
O(1)"""
result = True
if self.fiancee is not None:
result = self.prefs[p1.name] > self.prefs[self.fiancee.name]
return result
def engage(self, p):
self.fiancee = p
p.fiancee = self
def break_up(self):
if self.fiancee is not None:
self.fiancee.fiancee = None
self.fiancee = None
def get_prospect(self):
if len(self.prefnames) > 0:
return self.prefnames.pop()
return None
| [
[
1,
0,
0.0465,
0.0233,
0,
0.66,
0,
193,
0,
1,
0,
0,
193,
0,
0
],
[
3,
0,
0.5581,
0.907,
0,
0.66,
1,
362,
0,
5,
0,
0,
0,
0,
7
],
[
2,
1,
0.3023,
0.3023,
1,
0.27,
... | [
"from collections import deque",
"class Person:\n\n def __init__(self, name, preferences=[]):\n \"\"\"Initializes the preferences hashtable.\n O(n)\"\"\"\n self.prefnames = deque()\n self.name = name\n self.fiancee = None",
" def __init__(self, name, preferences=[]):\n ... |
__author__="Sergey Karakovskiy, sergey at idsia fullstop ch"
__date__ ="$Feb 18, 2009 1:01:12 AM$"
class SimplePyAgent:
# class MarioAgent(Agent):
""" example of usage of AmiCo
"""
def getAction(self, obs):
ret = (0, 1, 0, 0, 0)
return ret
def giveReward(self, reward):
pass
def _getName(self):
if self._name is None:
self._name = self.__class__.__name__
return self._name
def _setName(self, newname):
"""Change name to newname. Uniqueness is not guaranteed anymore."""
self._name = newname
_name = None
name = property(_getName, _setName)
def __repr__(self):
""" The default representation of a named object is its name. """
return "<%s '%s'>" % (self.__class__.__name__, self.name)
def newEpisode(self):
pass
| [
[
14,
0,
0.0286,
0.0286,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0571,
0.0286,
0,
0.66,
0.5,
763,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.5286,
0.8571,
0,
0.66,... | [
"__author__=\"Sergey Karakovskiy, sergey at idsia fullstop ch\"",
"__date__ =\"$Feb 18, 2009 1:01:12 AM$\"",
"class SimplePyAgent:\n# class MarioAgent(Agent):\n \"\"\" example of usage of AmiCo\n \"\"\"\n\n def getAction(self, obs):\n ret = (0, 1, 0, 0, 0)\n return ret",
" \"\"\" ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.