THU-IAR commited on
Commit
c84b37e
·
1 Parent(s): d36557f

Upload 28 files

Browse files
Code/GBDT_train.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import argparse
3
+ import pickle
4
+ import random
5
+ import os
6
+ from model.gbdt_regressor import GradientBoostingRegressor
7
+
8
+ def train_GBDT(number:int):
9
+ '''
10
+ Function Description:
11
+ Train GBDT based on the given problem instances and the neural encoding results of the decision variables.
12
+
13
+ Parameters:
14
+ - number: Number of problem instances.
15
+
16
+ Return:
17
+ The trained GBDT is generated and packaged as data.pickle. The function does not have a return value.
18
+ '''
19
+ print("Tesing the performance of GBDT regressor...")
20
+ # Load data
21
+ data = []
22
+ label = []
23
+ max_num = 200000
24
+ now_num = 0
25
+ for num in range(number):
26
+ # Check if data.pickle exists and read it if it exists.
27
+ if(os.path.exists('./example/node' + str(num) + '.pickle') == False):
28
+ print("No problem file!")
29
+ return
30
+ with open('./example/node' + str(num) + '.pickle', "rb") as f:
31
+ node = pickle.load(f)
32
+ now_data = node[0]
33
+ now_label = node[1]
34
+ p = max_num / (len(node[0]) * number)
35
+ for i in range(len(now_data)):
36
+ if(random.random() <= p):
37
+ data.append(now_data[i])
38
+ label.append(now_label[i])
39
+ now_num += 1
40
+ # Train model
41
+ print(now_num)
42
+ reg = GradientBoostingRegressor()
43
+ #print(data)
44
+ reg.fit(data=np.array(data), label=np.array(label), n_estimators=30, learning_rate=0.1, max_depth=5, min_samples_split=2)
45
+ # Model evaluation
46
+ with open('./GBDT.pickle', 'wb') as f:
47
+ pickle.dump([reg], f)
48
+
49
+ def parse_args():
50
+ parser = argparse.ArgumentParser()
51
+ parser.add_argument("--number", type = int, default = 10, help = 'The number of instances.')
52
+ return parser.parse_args()
53
+
54
+ if __name__ == '__main__':
55
+ args = parse_args()
56
+ #print(vars(args))
57
+ train_GBDT(**vars(args))
Code/GNN_GBDT.yml ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: GNN_GBDT
2
+ channels:
3
+ - gurobi
4
+ - pytorch
5
+ - nvidia
6
+ - conda-forge
7
+ - http://conda.anaconda.org/gurobi
8
+ - defaults
9
+ dependencies:
10
+ - _libgcc_mutex=0.1=main
11
+ - _openmp_mutex=5.1=1_gnu
12
+ - ampl-mp=3.1.0=h2cc385e_1006
13
+ - blas=1.0=mkl
14
+ - brotlipy=0.7.0=py39h27cfd23_1003
15
+ - bzip2=1.0.8=h7b6447c_0
16
+ - ca-certificates=2022.12.7=ha878542_0
17
+ - certifi=2022.12.7=pyhd8ed1ab_0
18
+ - cffi=1.15.1=py39h5eee18b_3
19
+ - charset-normalizer=2.0.4=pyhd3eb1b0_0
20
+ - cppad=20210000.6=h9c3ff4c_0
21
+ - cryptography=38.0.1=py39h9ce1e76_0
22
+ - cuda=11.7.1=0
23
+ - cuda-cccl=11.7.91=0
24
+ - cuda-command-line-tools=11.7.1=0
25
+ - cuda-compiler=11.7.1=0
26
+ - cuda-cudart=11.7.99=0
27
+ - cuda-cudart-dev=11.7.99=0
28
+ - cuda-cuobjdump=11.7.91=0
29
+ - cuda-cupti=11.7.101=0
30
+ - cuda-cuxxfilt=11.7.91=0
31
+ - cuda-demo-suite=12.0.76=0
32
+ - cuda-documentation=12.0.76=0
33
+ - cuda-driver-dev=11.7.99=0
34
+ - cuda-gdb=12.0.90=0
35
+ - cuda-libraries=11.7.1=0
36
+ - cuda-libraries-dev=11.7.1=0
37
+ - cuda-memcheck=11.8.86=0
38
+ - cuda-nsight=12.0.78=0
39
+ - cuda-nsight-compute=12.0.0=0
40
+ - cuda-nvcc=11.7.99=0
41
+ - cuda-nvdisasm=12.0.76=0
42
+ - cuda-nvml-dev=11.7.91=0
43
+ - cuda-nvprof=12.0.90=0
44
+ - cuda-nvprune=11.7.91=0
45
+ - cuda-nvrtc=11.7.99=0
46
+ - cuda-nvrtc-dev=11.7.99=0
47
+ - cuda-nvtx=11.7.91=0
48
+ - cuda-nvvp=12.0.90=0
49
+ - cuda-runtime=11.7.1=0
50
+ - cuda-sanitizer-api=12.0.90=0
51
+ - cuda-toolkit=11.7.1=0
52
+ - cuda-tools=11.7.1=0
53
+ - cuda-visual-tools=11.7.1=0
54
+ - ecole=0.7.3=py39h0db7e46_1
55
+ - ffmpeg=4.3=hf484d3e_0
56
+ - fftw=3.3.9=h27cfd23_1
57
+ - flit-core=3.6.0=pyhd3eb1b0_0
58
+ - fmt=8.1.1=h4bd325d_0
59
+ - freetype=2.12.1=h4a9f257_0
60
+ - gds-tools=1.5.0.59=0
61
+ - giflib=5.2.1=h7b6447c_0
62
+ - gmp=6.2.1=h295c915_3
63
+ - gnutls=3.6.15=he1e5248_0
64
+ - gurobi=10.0.0=py39_0
65
+ - idna=3.4=py39h06a4308_0
66
+ - intel-openmp=2021.4.0=h06a4308_3561
67
+ - ipopt=3.14.1=h7ede334_0
68
+ - jpeg=9e=h7f8727e_0
69
+ - lame=3.100=h7b6447c_0
70
+ - lcms2=2.12=h3be6417_0
71
+ - ld_impl_linux-64=2.38=h1181459_1
72
+ - lerc=3.0=h295c915_0
73
+ - libblas=3.9.0=12_linux64_mkl
74
+ - libcublas=11.10.3.66=0
75
+ - libcublas-dev=11.10.3.66=0
76
+ - libcufft=10.7.2.124=h4fbf590_0
77
+ - libcufft-dev=10.7.2.124=h98a8f43_0
78
+ - libcufile=1.5.0.59=0
79
+ - libcufile-dev=1.5.0.59=0
80
+ - libcurand=10.3.1.50=0
81
+ - libcurand-dev=10.3.1.50=0
82
+ - libcusolver=11.4.0.1=0
83
+ - libcusolver-dev=11.4.0.1=0
84
+ - libcusparse=11.7.4.91=0
85
+ - libcusparse-dev=11.7.4.91=0
86
+ - libdeflate=1.8=h7f8727e_5
87
+ - libedit=3.1.20191231=he28a2e2_2
88
+ - libffi=3.4.2=h6a678d5_6
89
+ - libgcc-ng=11.2.0=h1234567_1
90
+ - libgfortran-ng=11.2.0=h00389a5_1
91
+ - libgfortran5=11.2.0=h1234567_1
92
+ - libgomp=11.2.0=h1234567_1
93
+ - libiconv=1.16=h7f8727e_2
94
+ - libidn2=2.3.2=h7f8727e_0
95
+ - liblapack=3.9.0=12_linux64_mkl
96
+ - libnpp=11.7.4.75=0
97
+ - libnpp-dev=11.7.4.75=0
98
+ - libnvjpeg=11.8.0.2=0
99
+ - libnvjpeg-dev=11.8.0.2=0
100
+ - libpng=1.6.37=hbc83047_0
101
+ - libstdcxx-ng=11.2.0=h1234567_1
102
+ - libtasn1=4.16.0=h27cfd23_0
103
+ - libtiff=4.4.0=hecacb30_2
104
+ - libunistring=0.9.10=h27cfd23_0
105
+ - libwebp=1.2.4=h11a3e52_0
106
+ - libwebp-base=1.2.4=h5eee18b_0
107
+ - lz4-c=1.9.4=h6a678d5_0
108
+ - metis=5.1.0=h58526e2_1006
109
+ - mkl=2021.4.0=h06a4308_640
110
+ - mkl-service=2.4.0=py39h7f8727e_0
111
+ - mkl_fft=1.3.1=py39hd3c417c_0
112
+ - mkl_random=1.2.2=py39h51133e4_0
113
+ - mumps-include=5.2.1=ha770c72_11
114
+ - mumps-seq=5.2.1=h2104b81_11
115
+ - ncurses=6.3=h5eee18b_3
116
+ - nettle=3.7.3=hbbd107a_1
117
+ - nsight-compute=2022.4.0.15=0
118
+ - numpy=1.23.4=py39h14f4228_0
119
+ - numpy-base=1.23.4=py39h31eccc5_0
120
+ - openh264=2.1.1=h4ff587b_0
121
+ - openssl=1.1.1s=h7f8727e_0
122
+ - pillow=9.3.0=py39hace64e9_1
123
+ - pip=22.3.1=py39h06a4308_0
124
+ - pycparser=2.21=pyhd3eb1b0_0
125
+ - pyopenssl=22.0.0=pyhd3eb1b0_0
126
+ - pyscipopt=3.5.0=py39he80948d_0
127
+ - pysocks=1.7.1=py39h06a4308_0
128
+ - python=3.9.15=h7a1cb2a_2
129
+ - python_abi=3.9=2_cp39
130
+ - pytorch=1.13.1=py3.9_cuda11.7_cudnn8.5.0_0
131
+ - pytorch-cuda=11.7=h67b0de4_1
132
+ - pytorch-mutex=1.0=cuda
133
+ - readline=8.2=h5eee18b_0
134
+ - requests=2.28.1=py39h06a4308_0
135
+ - scip=7.0.3=hf5bcbcd_1
136
+ - scipy=1.9.3=py39h14f4228_0
137
+ - scotch=6.0.9=h3858553_1
138
+ - setuptools=65.5.0=py39h06a4308_0
139
+ - six=1.16.0=pyhd3eb1b0_1
140
+ - sqlite=3.40.0=h5082296_0
141
+ - tbb=2020.2=h4bd325d_4
142
+ - tk=8.6.12=h1ccaba5_0
143
+ - torchaudio=0.13.1=py39_cu117
144
+ - torchvision=0.14.1=py39_cu117
145
+ - typing_extensions=4.4.0=py39h06a4308_0
146
+ - tzdata=2022g=h04d1e81_0
147
+ - unixodbc=2.3.10=h583eb01_0
148
+ - urllib3=1.26.13=py39h06a4308_0
149
+ - wheel=0.37.1=pyhd3eb1b0_0
150
+ - xz=5.2.8=h5eee18b_0
151
+ - zlib=1.2.13=h5eee18b_0
152
+ - zstd=1.5.2=ha4553b6_0
153
+ - pip:
154
+ - jinja2==3.1.2
155
+ - joblib==1.2.0
156
+ - markupsafe==2.1.1
157
+ - psutil==5.9.4
158
+ - pyg-lib==0.1.0+pt113cu117
159
+ - pyparsing==3.0.9
160
+ - pytorch-metric-learning==1.6.3
161
+ - scikit-learn==1.2.0
162
+ - threadpoolctl==3.1.0
163
+ - torch-cluster==1.6.0+pt113cu117
164
+ - torch-geometric==2.2.0
165
+ - torch-scatter==2.0.9
166
+ - torch-sparse==0.6.16+pt113cu117
167
+ - torch-spline-conv==1.2.1+pt113cu117
168
+ - tqdm==4.64.1
169
+ prefix: /home/anaconda3/envs/GNN_GBDT
Code/GNN_inference.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import pickle
3
+ from pathlib import Path
4
+ from typing import Union
5
+
6
+ import os
7
+ import torch
8
+ import torch.nn.functional as F
9
+ import torch_geometric
10
+ from pytorch_metric_learning import losses
11
+
12
+ from model.graphcnn import GNNPolicy
13
+
14
+ class BipartiteNodeData(torch_geometric.data.Data):
15
+ """
16
+ Class Description:
17
+ This class encode a node bipartite graph observation as returned by the `ecole.observation.NodeBipartite`
18
+ observation function in a format understood by the pytorch geometric data handlers.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ constraint_features,
24
+ edge_indices,
25
+ edge_features,
26
+ variable_features,
27
+ assignment1,
28
+ assignment2
29
+ ):
30
+ super().__init__()
31
+ self.constraint_features = constraint_features
32
+ self.edge_index = edge_indices
33
+ self.edge_attr = edge_features
34
+ self.variable_features = variable_features
35
+ self.assignment1 = assignment1
36
+ self.assignment2 = assignment2
37
+
38
+ def __inc__(self, key, value, store, *args, **kwargs):
39
+ """
40
+ Function Description:
41
+ We overload the pytorch geometric method that tells how to increment indices when concatenating graphs
42
+ for those entries (edge index, candidates) for which this is not obvious.
43
+ """
44
+ if key == "edge_index":
45
+ return torch.tensor(
46
+ [[self.constraint_features.size(0)], [self.variable_features.size(0)]]
47
+ )
48
+ elif key == "candidates":
49
+ return self.variable_features.size(0)
50
+ else:
51
+ return super().__inc__(key, value, *args, **kwargs)
52
+
53
+
54
+ class GraphDataset(torch_geometric.data.Dataset):
55
+ """
56
+ Class Description:
57
+ This class encodes a collection of graphs, as well as a method to load such graphs from the disk.
58
+ It can be used in turn by the data loaders provided by pytorch geometric.
59
+ """
60
+
61
+ def __init__(self, sample_files):
62
+ super().__init__(root=None, transform=None, pre_transform=None)
63
+ self.sample_files = sample_files
64
+
65
+ def len(self):
66
+ return len(self.sample_files)
67
+
68
+ def get(self, index):
69
+ """
70
+ Function Description:
71
+ This method loads a node bipartite graph observation as saved on the disk during data collection.
72
+ """
73
+ with open(self.sample_files[index], "rb") as f:
74
+ [variable_features, constraint_features, edge_indices, edge_features, solution1, solution2] = pickle.load(f)
75
+
76
+ graph = BipartiteNodeData(
77
+ torch.FloatTensor(constraint_features),
78
+ torch.LongTensor(edge_indices),
79
+ torch.FloatTensor(edge_features),
80
+ torch.FloatTensor(variable_features),
81
+ torch.LongTensor(solution1),
82
+ torch.LongTensor(solution2),
83
+ )
84
+
85
+ # We must tell pytorch geometric how many nodes there are, for indexing purposes
86
+ graph.num_nodes = len(constraint_features) + len(variable_features)
87
+ graph.cons_nodes = len(constraint_features)
88
+ graph.vars_nodes = len(variable_features)
89
+
90
+ return graph
91
+
92
+ def make(number: int,
93
+ model_path : str,
94
+ device : torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")):
95
+ """
96
+ Function Description:
97
+ Obtain the encoding information of the decision variables based on the input problem data and package the output.
98
+ """
99
+ policy = GNNPolicy().to(device)
100
+ policy.load_state_dict(torch.load(model_path, policy.state_dict()))
101
+ File = []
102
+ for num in range(number):
103
+ if(os.path.exists('./example/pair' + str(num) + '.pickle') == False):
104
+ print("No input file!")
105
+ return
106
+ File.append('example/pair' + str(num) + '.pickle')
107
+
108
+ data = GraphDataset(File)
109
+ loader = torch_geometric.loader.DataLoader(data, batch_size = 1)
110
+
111
+ now_site = 0
112
+ for batch in loader:
113
+ batch = batch.to(device)
114
+ # Compute the logits (i.e. pre-softmax activations) according to the policy on the concatenated graphs
115
+ logits = policy(
116
+ batch.constraint_features,
117
+ batch.edge_index,
118
+ batch.edge_attr,
119
+ batch.variable_features,
120
+ )
121
+ print(logits)
122
+ with open('./example/sample' + str(now_site) + '.pickle', "rb") as f:
123
+ solution = pickle.load(f)
124
+ with open('./example/node' + str(now_site) + '.pickle', 'wb') as f:
125
+ pickle.dump([logits.tolist(), solution[4]], f)
126
+ print(now_site)
127
+ now_site += 1
128
+
129
+ def parse_args():
130
+ parser = argparse.ArgumentParser()
131
+ parser.add_argument("--number", type=int, default=30)
132
+ parser.add_argument("--model_path", type=str, default="trained_model.pkl")
133
+ parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device to use for training.")
134
+ return parser.parse_args()
135
+
136
+ if __name__ == "__main__":
137
+ args = parse_args()
138
+ make(**vars(args))
Code/GNN_train.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import pickle
3
+ from pathlib import Path
4
+ from typing import Union
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ import torch_geometric
9
+ from pytorch_metric_learning import losses
10
+
11
+ from model.graphcnn import GNNPolicy
12
+
13
+ __all__ = ["train"]
14
+
15
+ class BipartiteNodeData(torch_geometric.data.Data):
16
+ """
17
+ Class Description:
18
+ This class encode a node bipartite graph observation as returned by the `ecole.observation.NodeBipartite`
19
+ observation function in a format understood by the pytorch geometric data handlers.
20
+ """
21
+ def __init__(
22
+ self,
23
+ constraint_features,
24
+ edge_indices,
25
+ edge_features,
26
+ variable_features,
27
+ assignment1,
28
+ assignment2
29
+ ):
30
+ super().__init__()
31
+ self.constraint_features = constraint_features
32
+ self.edge_index = edge_indices
33
+ self.edge_attr = edge_features
34
+ self.variable_features = variable_features
35
+ self.assignment1 = assignment1
36
+ self.assignment2 = assignment2
37
+
38
+ def __inc__(self, key, value, store, *args, **kwargs):
39
+ """
40
+ Function Description:
41
+ Overload the pytorch geometric method that tells how to increment indices when concatenating graphs for those entries (edge index, candidates) for which this is not obvious.
42
+ """
43
+ if key == "edge_index":
44
+ return torch.tensor(
45
+ [[self.constraint_features.size(0)], [self.variable_features.size(0)]]
46
+ )
47
+ elif key == "candidates":
48
+ return self.variable_features.size(0)
49
+ else:
50
+ return super().__inc__(key, value, *args, **kwargs)
51
+
52
+
53
+ class GraphDataset(torch_geometric.data.Dataset):
54
+ """
55
+ Class Description:
56
+ This class encodes a collection of graphs, as well as a method to load such graphs from the disk.
57
+ It can be used in turn by the data loaders provided by pytorch geometric.
58
+ """
59
+
60
+ def __init__(self, sample_files):
61
+ super().__init__(root=None, transform=None, pre_transform=None)
62
+ self.sample_files = sample_files
63
+
64
+ def len(self):
65
+ return len(self.sample_files)
66
+
67
+ def get(self, index):
68
+ """
69
+ Function Description:
70
+ This method loads a node bipartite graph observation as saved on the disk during data collection.
71
+ """
72
+ with open(self.sample_files[index], "rb") as f:
73
+ [variable_features, constraint_features, edge_indices, edge_features, solution1, solution2] = pickle.load(f)
74
+
75
+ graph = BipartiteNodeData(
76
+ torch.FloatTensor(constraint_features),
77
+ torch.LongTensor(edge_indices),
78
+ torch.FloatTensor(edge_features),
79
+ torch.FloatTensor(variable_features),
80
+ torch.LongTensor(solution1),
81
+ torch.LongTensor(solution2),
82
+ )
83
+
84
+ # We must tell pytorch geometric how many nodes there are, for indexing purposes
85
+ graph.num_nodes = len(constraint_features) + len(variable_features)
86
+ graph.cons_nodes = len(constraint_features)
87
+ graph.vars_nodes = len(variable_features)
88
+
89
+ return graph
90
+
91
+
92
+ def pad_tensor(input_, pad_sizes, pad_value=-1e8):
93
+ """
94
+ Function Description:
95
+ This utility function splits a tensor and pads each split to make them all the same size, then stacks them.
96
+ """
97
+ max_pad_size = pad_sizes.max()
98
+ output = input_.split(pad_sizes.cpu().numpy().tolist())
99
+ output = torch.stack(
100
+ [
101
+ F.pad(slice_, (0, max_pad_size - slice_.size(0)), "constant", pad_value)
102
+ for slice_ in output
103
+ ],
104
+ dim=0,
105
+ )
106
+ return output
107
+
108
+ def process(policy, data_loader, device, optimizer=None):
109
+ """
110
+ Function Description:
111
+ This function will process a whole epoch of training or validation, depending on whether an optimizer is provided.
112
+ """
113
+ mean_loss = 0
114
+ mean_acc = 0
115
+
116
+ n_samples_processed = 0
117
+ with torch.set_grad_enabled(optimizer is not None):
118
+ for batch in data_loader:
119
+ #print("QwQ")
120
+ batch = batch.to(device)
121
+ # Compute the logits (i.e. pre-softmax activations) according to the policy on the concatenated graphs
122
+ logits = policy(
123
+ batch.constraint_features,
124
+ batch.edge_index,
125
+ batch.edge_attr,
126
+ batch.variable_features,
127
+ )
128
+ # Graph partitioning related metric functions, where num_classes represents the number of partitions in the graph.
129
+ loss_funcA = losses.ProxyAnchorLoss(num_classes = 10, embedding_size = 16)
130
+ # Metric functions related to the optimal solution. In general integer programming problems, clustering the solution values and modifying num_classes can be done.
131
+ loss_funcB = losses.ProxyAnchorLoss(num_classes = 2, embedding_size = 16)
132
+ loss = loss_funcA(logits, batch.assignment1.to(torch.int64)) + loss_funcB(logits, batch.assignment2.to(torch.int64))
133
+
134
+ loss_optimizerA = torch.optim.SGD(loss_funcA.parameters(), lr = 0.01)
135
+ loss_optimizerB = torch.optim.SGD(loss_funcB.parameters(), lr = 0.01)
136
+
137
+ if optimizer is not None:
138
+ optimizer.zero_grad()
139
+ loss.backward()
140
+ optimizer.step()
141
+ loss_optimizerA.step()
142
+ loss_optimizerB.step()
143
+
144
+ mean_loss += loss.item() * batch.num_graphs
145
+ n_samples_processed += batch.num_graphs
146
+
147
+ mean_loss /= n_samples_processed
148
+ return mean_loss
149
+
150
+ def train(
151
+ model_save_path: Union[str, Path],
152
+ batch_size: int = 1,
153
+ learning_rate: float = 1e-3,
154
+ num_epochs: int = 20,
155
+ device: torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
156
+ ):
157
+ """
158
+ Function Description:
159
+ This function trains a GNN policy on training data.
160
+
161
+ Parameters:
162
+ - data_path: Path to the data directory.
163
+ - model_save_path: Path to save the model.
164
+ - batch_size: Batch size for training.
165
+ - learning_rate: Learning rate for the optimizer.
166
+ - num_epochs: Number of epochs to train for.
167
+ - device: Device to use for training.
168
+ """
169
+ # load samples from data_path and divide them
170
+ sample_files = [str(path) for path in Path('./example').glob("pair*.pickle")]
171
+ print(sample_files)
172
+ train_files = sample_files[: int(0.6 * len(sample_files))]
173
+ valid_files = sample_files[int(0.9 * len(sample_files)) :]
174
+
175
+ train_data = GraphDataset(train_files)
176
+ train_loader = torch_geometric.loader.DataLoader(train_data, batch_size=batch_size, shuffle = False)
177
+ valid_data = GraphDataset(valid_files)
178
+ valid_loader = torch_geometric.loader.DataLoader(valid_data, batch_size=batch_size, shuffle = False)
179
+
180
+ policy = GNNPolicy().to(device)
181
+
182
+ optimizer = torch.optim.Adam(policy.parameters(), lr=learning_rate)
183
+ for epoch in range(num_epochs):
184
+ train_loss = process(policy, train_loader, device, optimizer)
185
+ #valid_loss = process(policy, valid_loader, device, None)
186
+ valid_loss = 0
187
+ print(f"Epoch {epoch+1}: Train Loss: {train_loss:0.3f}, Valid Loss: {valid_loss:0.3f}")
188
+
189
+ torch.save(policy.state_dict(), model_save_path)
190
+ print(f"Trained parameters saved to {model_save_path}")
191
+
192
+ def parse_args():
193
+ """
194
+ Function Description:
195
+ This function parses the command line arguments.
196
+ """
197
+ parser = argparse.ArgumentParser()
198
+ parser.add_argument("--model_save_path", type=str, default="trained_model.pkl", help="Path to save the model.")
199
+ parser.add_argument("--batch_size", type=int, default=1, help="Batch size for training.")
200
+ parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate for the optimizer.")
201
+ parser.add_argument("--num_epochs", type=int, default=10, help="Number of epochs to train for.")
202
+ parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device to use for training.")
203
+ return parser.parse_args()
204
+
205
+
206
+ if __name__ == "__main__":
207
+ args = parse_args()
208
+ train(**vars(args))
Code/data_generation.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import argparse
3
+ import pickle
4
+ import random
5
+ import time
6
+ import os
7
+
8
+ def generate_IS(N, M):
9
+ '''
10
+ Function Description:
11
+ Generate instances of the maximum independent set problem in a general graph.
12
+
13
+ Parameters:
14
+ - N: Number of vertices in the graph.
15
+ - M: Number of edges in the graph.
16
+
17
+ Return:
18
+ Relevant parameters of the generated maximum independent set problem.
19
+ '''
20
+
21
+ # n represents the number of decision variables, where each vertex in the graph corresponds to a decision variable.
22
+ # m represents the number of constraints, where each edge in the graph corresponds to a constraint.
23
+ # k[i] represents the number of decision variables in the i-th constraint.
24
+ n = N
25
+ m = M
26
+ k = []
27
+
28
+ # site[i][j] represents which decision variable the j-th decision variable corresponds to in the i-th constraint.
29
+ # value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
30
+ # constraint[i] represents the right-hand side value of the i-th constraint.
31
+ # constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
32
+ # coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
33
+ site = []
34
+ value = []
35
+ for i in range(m):
36
+ site.append([])
37
+ value.append([])
38
+ k.append(0)
39
+ constraint = np.zeros(m)
40
+ constraint_type = np.zeros(m)
41
+ coefficient = {}
42
+
43
+ # Add constraint: randomly generate an edge and impose a constraint that the vertices connected by the edge cannot be selected simultaneously.
44
+ for i in range(M):
45
+ x = random.randint(0, N - 1)
46
+ y = random.randint(0, N - 1)
47
+ while(x == y) :
48
+ x = random.randint(0, N - 1)
49
+ y = random.randint(0, N - 1)
50
+ site[i].append(x)
51
+ value[i].append(1)
52
+ site[i].append(y)
53
+ value[i].append(1)
54
+ constraint[i] = 1
55
+ constraint_type[i] = 1
56
+ k[i] = 2
57
+
58
+ # Set the coefficients of the objective function, where the coefficient value of each decision variable corresponding to a vertex is a random value.
59
+ for i in range(N):
60
+ coefficient[i] = random.random()
61
+
62
+ return(n, m, k, site, value, constraint, constraint_type, coefficient)
63
+
64
+ def generate_MVC(N, M):
65
+ '''
66
+ Function Description:
67
+ Generate instances of the minimum vertex cover problem in a general graph.
68
+
69
+ Parameters:
70
+ - N: Number of vertices in the graph.
71
+ - M: Number of edges in the graph.
72
+
73
+ Return:
74
+ Relevant parameters of the generated minimum vertex cover problem.
75
+ '''
76
+
77
+ # n represents the number of decision variables, where each vertex in the graph corresponds to a decision variable.
78
+ # m represents the number of constraints, where each edge in the graph corresponds to a constraint.
79
+ # k[i] represents the number of decision variables in the i-th constraint.
80
+ n = N
81
+ m = M
82
+ k = []
83
+
84
+ # site[i][j] represents which decision variable the j-th decision variable corresponds to in the i-th constraint.
85
+ # value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
86
+ # constraint[i] represents the right-hand side value of the i-th constraint.
87
+ # constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
88
+ # coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
89
+ site = []
90
+ value = []
91
+ for i in range(m):
92
+ site.append([])
93
+ value.append([])
94
+ k.append(0)
95
+ constraint = np.zeros(m)
96
+ constraint_type = np.zeros(m)
97
+ coefficient = {}
98
+
99
+ # Add constraint: randomly generate an edge and impose a constraint that at least one of the vertices connected by the edge must be selected.
100
+ for i in range(M):
101
+ x = random.randint(0, N - 1)
102
+ y = random.randint(0, N - 1)
103
+ while(x == y) :
104
+ x = random.randint(0, N - 1)
105
+ y = random.randint(0, N - 1)
106
+ k[i] = 2
107
+ site[i].append(x)
108
+ value[i].append(1)
109
+ site[i].append(y)
110
+ value[i].append(1)
111
+ constraint[i] = 1
112
+ constraint_type[i] = 2
113
+
114
+ # Set the coefficients of the objective function, where the coefficient value of each decision variable corresponding to a vertex is a random value.
115
+ for i in range(N):
116
+ coefficient[i] = random.random()
117
+
118
+ return(n, m, k, site, value, constraint, constraint_type, coefficient)
119
+
120
+ def generate_SC(N, M):
121
+ '''
122
+ Function Description:
123
+ Generate instances of the set cover problem, where each item is guaranteed to appear in exactly 4 sets.
124
+
125
+ Parameters:
126
+ - N: Number of sets.
127
+ - M: Number of items.
128
+
129
+ Return:
130
+ Relevant parameters of the generated set cover problem.
131
+ '''
132
+
133
+ # n represents the number of decision variables, where each set corresponds to a decision variable.
134
+ # m represents the number of constraints, where each item corresponds to a constraint.
135
+ # k[i] represents the number of decision variables in the i-th constraint.
136
+ n = N
137
+ m = M
138
+ k = []
139
+
140
+ # site[i][j] represents which decision variable the j-th decision variable corresponds to in the i-th constraint.
141
+ # value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
142
+ # constraint[i] represents the right-hand side value of the i-th constraint.
143
+ # constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
144
+ # coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
145
+ site = []
146
+ value = []
147
+ for i in range(m):
148
+ site.append([])
149
+ value.append([])
150
+ k.append(0)
151
+ constraint = np.zeros(m)
152
+ constraint_type = np.zeros(m)
153
+ coefficient = {}
154
+
155
+ # Add constraint: At least one of the four sets in which each item appears must be selected.
156
+ for i in range(M):
157
+ vis = {}
158
+ for j in range(4):
159
+ now = random.randint(0, N - 1)
160
+ while(now in vis.keys()):
161
+ now = random.randint(0, N - 1)
162
+ vis[now] = 1
163
+
164
+ site[i].append(now)
165
+ value[i].append(1)
166
+ k[i] = 4
167
+ for i in range(M):
168
+ constraint[i] = 1
169
+ constraint_type[i] = 2
170
+
171
+ # Set the coefficients of the objective function, where the coefficient value of each decision variable corresponding to a set is a random value.
172
+ for i in range(N):
173
+ coefficient[i] = random.random()
174
+
175
+
176
+ return(n, m, k, site, value, constraint, constraint_type, coefficient)
177
+
178
+ def generate_CAT(N, M):
179
+ '''
180
+ Function Description:
181
+ Generate instances of the set cover problem, where each item is guaranteed to appear in exactly 5 sets.
182
+
183
+ Parameters:
184
+ - N: Number of sets.
185
+ - M: Number of items.
186
+
187
+ Return:
188
+ Relevant parameters of the generated set cover problem.
189
+ '''
190
+ # n represents the number of decision variables, where each set corresponds to a decision variable.
191
+ # m represents the number of constraints, where each item corresponds to a constraint.
192
+ # k[i] represents the number of decision variables in the i-th constraint.
193
+ n = N
194
+ m = M
195
+ k = []
196
+
197
+ # site[i][j] represents which decision variable the j-th decision variable corresponds to in the i-th constraint.
198
+ # value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
199
+ # constraint[i] represents the right-hand side value of the i-th constraint.
200
+ # constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
201
+ # coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
202
+
203
+ site = []
204
+ value = []
205
+ for i in range(m):
206
+ site.append([])
207
+ value.append([])
208
+ k.append(0)
209
+ constraint = np.zeros(m)
210
+ constraint_type = np.zeros(m)
211
+ coefficient = {}
212
+
213
+ # Add constraints.
214
+ for i in range(M):
215
+ vis = {}
216
+ for j in range(5):
217
+ now = random.randint(0, N - 1)
218
+ while(now in vis.keys()):
219
+ now = random.randint(0, N - 1)
220
+ vis[now] = 1
221
+
222
+ site[i].append(now)
223
+ value[i].append(1)
224
+ k[i] = 5
225
+ for i in range(M):
226
+ constraint[i] = 1
227
+ constraint_type[i] = 1
228
+
229
+ # Set the coefficients of the objective function, where the coefficient value of each decision variable corresponding to a set is a random value.
230
+ for i in range(N):
231
+ coefficient[i] = random.random() * 1000
232
+
233
+
234
+ return(n, m, k, site, value, constraint, constraint_type, coefficient)
235
+
236
+ def generate_samples(
237
+ problem_type : str,
238
+ difficulty_mode : str,
239
+ seed : int,
240
+ number : int
241
+ ):
242
+ '''
243
+ Function Description:
244
+ Generate problem instances based on the provided parameters and package the output as data.pickle.
245
+
246
+ Parameters:
247
+ - problem_type: Available options are ['IS', 'MVC', 'MAXCUT', 'SC'], representing the maximum independent set problem, minimum vertex cover problem, maximum cut problem, minimum set cover problem, and Meituan flash sale problem, respectively.
248
+ - difficulty_mode: Available options are ['easy', 'medium', 'hard'], representing easy (small-scale), medium (medium-scale), and hard (large-scale) difficulties.
249
+ - seed: Integer value indicating the starting random seed used for problem generation.
250
+ - number: Integer value indicating the number of instances to generate.
251
+
252
+ Return:
253
+ The problem instances are generated and packaged as data.pickle. The function does not have a return value.
254
+ '''
255
+ # Set the random seed.
256
+ random.seed(seed)
257
+
258
+ # Check and create using the os module.
259
+ dir_name = 'example'
260
+ if not os.path.exists(dir_name):
261
+ os.mkdir(dir_name)
262
+
263
+ for i in range(number):
264
+ # Randomly generate instances of the maximum independent set problem and package the output.
265
+ if(problem_type == 'IS'):
266
+ if(difficulty_mode == 'easy'):
267
+ N = 10000
268
+ M = 30000
269
+ elif(difficulty_mode == 'medium'):
270
+ N = 100000
271
+ M = 300000
272
+ else:
273
+ N = 1000000
274
+ M = 3000000
275
+ n, m, k, site, value, constraint, constraint_type, coefficient = generate_IS(N, M)
276
+ with open('./example/data' + str(i) + '.pickle', 'wb') as f:
277
+ pickle.dump(['maximize', n, m, k, site, value, constraint, constraint_type, coefficient], f)
278
+
279
+ # Randomly generate instances of the minimum vertex cover problem and package the output.
280
+ if(problem_type == 'MVC'):
281
+ if(difficulty_mode == 'easy'):
282
+ N = 10000
283
+ M = 30000
284
+ elif(difficulty_mode == 'medium'):
285
+ N = 100000
286
+ M = 300000
287
+ else:
288
+ N = 1000000
289
+ M = 3000000
290
+ n, m, k, site, value, constraint, constraint_type, coefficient = generate_MVC(N, M)
291
+ with open('./example/data' + str(i) + '.pickle', 'wb') as f:
292
+ pickle.dump(['minimize', n, m, k, site, value, constraint, constraint_type, coefficient], f)
293
+
294
+ # Randomly generate instances of the minimum set cover problem and package the output.
295
+ if(problem_type == 'SC'):
296
+ if(difficulty_mode == 'easy'):
297
+ N = 20000
298
+ M = 20000
299
+ elif(difficulty_mode == 'medium'):
300
+ N = 200000
301
+ M = 200000
302
+ else:
303
+ N = 2000000
304
+ M = 2000000
305
+ n, m, k, site, value, constraint, constraint_type, coefficient = generate_SC(N, M)
306
+ with open('./example/data' + str(i) + '.pickle', 'wb') as f:
307
+ pickle.dump(['minimize', n, m, k, site, value, constraint, constraint_type, coefficient], f)
308
+
309
+ # Randomly generate instances of the combinatorial auction problem and package the output.
310
+ if(problem_type == 'CAT'):
311
+ if(difficulty_mode == 'easy'):
312
+ N = 10000
313
+ M = 10000
314
+ elif(difficulty_mode == 'medium'):
315
+ N = 100000
316
+ M = 100000
317
+ else:
318
+ N = 1000000
319
+ M = 1000000
320
+ n, m, k, site, value, constraint, constraint_type, coefficient = generate_CAT(N, M)
321
+ with open('./example/data' + str(i) + '.pickle', 'wb') as f:
322
+ pickle.dump(['maximize', n, m, k, site, value, constraint, constraint_type, coefficient], f)
323
+
324
+
325
+ def parse_args():
326
+ parser = argparse.ArgumentParser()
327
+ parser.add_argument("--problem_type", choices = ['IS', 'MVC', 'SC', 'CAT'], default = 'SC', help = "Problem type selection")
328
+ parser.add_argument("--difficulty_mode", choices = ['easy', 'medium', 'hard'], default = 'easy', help = "Difficulty level.")
329
+ parser.add_argument('--seed', type = int, default = 0, help = 'Random generator seed.')
330
+ parser.add_argument("--number", type = int, default = 10, help = 'The number of instances.')
331
+ return parser.parse_args()
332
+
333
+ if __name__ == '__main__':
334
+ args = parse_args()
335
+ #print(vars(args))
336
+ generate_samples(**vars(args))
Code/data_partition.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import cmp_to_key
2
+ import numpy as np
3
+ import argparse
4
+ import pickle
5
+ import random
6
+ import time
7
+ import os
8
+
9
+ class pair:
10
+ def __init__(self):
11
+ self.x = 0
12
+ self.y = 0
13
+ self.val = 0
14
+
15
+ def cmp(a, b):
16
+ if a.val > b.val:
17
+ return -1
18
+ else:
19
+ return 1
20
+
21
+ def FENNEL(n, m, k, site):
22
+ '''
23
+ Function Description:
24
+ Use the FENNEL algorithm to partition the bipartite representation of the problem instance.
25
+
26
+ Parameters:
27
+ - n: Number of decision variables in the problem instance.
28
+ - m: Number of constraints in the problem instance.
29
+ - k: k[i] represents the number of decision variables in the i-th constraint.
30
+ - site: site[i][j] represents which decision variable the j-th decision variable of the i-th constraint corresponds to.
31
+
32
+ Return:
33
+ The result of the graph partitioning.
34
+ '''
35
+ raise NotImplementedError('FENNEL method should be implemented')
36
+
37
+ def generate_pair(
38
+ number : int
39
+ ):
40
+ '''
41
+ Function Description:
42
+ Partition the problem based on the given problem instances and generate training data.
43
+
44
+ Parameters:
45
+ - number: Number of problem instances.
46
+
47
+ Return:
48
+ The training data is generated and packaged as data.pickle. The function does not have a return value.
49
+ '''
50
+ for turn in range(number):
51
+ print("=====", turn)
52
+ # Check if data.pickle exists and read it if it exists.
53
+ if(os.path.exists('./example/data' + str(turn) + '.pickle') == False):
54
+ print("No problem file!")
55
+ return
56
+ with open('./example/data' + str(turn) + '.pickle', "rb") as f:
57
+ problem = pickle.load(f)
58
+
59
+ # Check if solution.pickle exists and read it if it exists.
60
+ if(os.path.exists('./example/sample' + str(turn) + '.pickle') == False):
61
+ print("No solutuion file!")
62
+ return
63
+ with open('./example/sample' + str(turn) + '.pickle', "rb") as f:
64
+ solution = pickle.load(f)
65
+
66
+ # obj_type represents the problem type (maximization/minimization).
67
+ # n represents the number of decision variables.
68
+ # m represents the number of constraints.
69
+ # k[i] represents the number of decision variables in the i-th constraint.
70
+ # site[i][j] represents which decision variable the j-th decision variable of the i-th constraint corresponds to.
71
+ # value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
72
+ # constraint[i] represents the right-hand side value of the i-th constraint.
73
+ # constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
74
+ # coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
75
+ obj_type = problem[0]
76
+ n = problem[1]
77
+ m = problem[2]
78
+ k = problem[3]
79
+ site = problem[4]
80
+ value = problem[5]
81
+ constraint = problem[6]
82
+ constraint_type = problem[7]
83
+ coefficient = problem[8]
84
+
85
+ variable_features = solution[0]
86
+ constraint_features = solution[1]
87
+ edge_indices = solution[2]
88
+ edge_features = solution[3]
89
+ optX = solution[4]
90
+
91
+ # Get the graph partitioning result.
92
+ new_color = FENNEL(n, m, k, site)
93
+
94
+ with open('./example/pair' + str(turn) + '.pickle', 'wb') as f:
95
+ pickle.dump([variable_features, constraint_features, edge_indices, edge_features, new_color, optX], f)
96
+
97
+
98
+ def parse_args():
99
+ parser = argparse.ArgumentParser()
100
+ parser.add_argument("--number", type = int, default = 10, help = 'The number of instances.')
101
+ return parser.parse_args()
102
+
103
+ if __name__ == '__main__':
104
+ args = parse_args()
105
+ #print(vars(args))
106
+ generate_pair(**vars(args))
Code/data_solution.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import argparse
3
+ import pickle
4
+ import random
5
+ import time
6
+ import os
7
+
8
+
9
+ def get_best_solution(n, m, k, site, value, constraint, constraint_type, coefficient, time_limit, obj_type):
10
+ '''
11
+ Function Description:
12
+ Solve the problem using an optimization solver based on the provided problem instance.
13
+
14
+ Parameters:
15
+ - n: Number of decision variables in the problem instance.
16
+ - m: Number of constraints in the problem instance.
17
+ - k: k[i] represents the number of decision variables in the i-th constraint.
18
+ - site: site[i][j] represents which decision variable the j-th decision variable of the i-th constraint corresponds to.
19
+ - value: value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
20
+ - constraint: constraint[i] represents the right-hand side value of the i-th constraint.
21
+ - constraint_type: constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =..
22
+ - coefficient: coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
23
+ - time_limit: Maximum solving time.
24
+ - obj_type: Whether the problem is a maximization problem or a minimization problem.
25
+
26
+ Return:
27
+ The optimal solution of the problem, represented as a list of values for each decision variable in the optimal solution.
28
+ '''
29
+
30
+ raise NotImplementedError('get_best_solution method should be implemented')
31
+
32
+
33
+ def optimize(
34
+ time : int,
35
+ number : int,
36
+ ):
37
+ '''
38
+ Function Description:
39
+ Based on the specified parameter design, invoke the designated algorithm and solver to optimize the optimization problem in data.pickle in the current directory.
40
+
41
+ Parameters:
42
+ - number: Integer value indicating the number of instances to generate.
43
+ - suboptimal: Integer value indicating the number of suboptimal solutions to generate.
44
+
45
+ Return:
46
+ The optimal solution is generated and packaged as data.pickle. The function does not have a return value.
47
+ '''
48
+
49
+
50
+ for num in range(number):
51
+ # Check if data.pickle exists and read it if it exists.
52
+ if(os.path.exists('./example/data' + str(num) + '.pickle') == False):
53
+ print("No input file!")
54
+ return
55
+ with open('./example/data' + str(num) + '.pickle', "rb") as f:
56
+ data = pickle.load(f)
57
+
58
+ # n represents the number of decision variables.
59
+ # m represents the number of constraints.
60
+ # k[i] represents the number of decision variables in the i-th constraint.
61
+ # site[i][j] represents which decision variable the j-th decision variable of the i-th constraint corresponds to.
62
+ # value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
63
+ # constraint[i] represents the right-hand side value of the i-th constraint.
64
+ # constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
65
+ # coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
66
+ n = data[1]
67
+ m = data[2]
68
+ k = data[3]
69
+ site = data[4]
70
+ value = data[5]
71
+ constraint = data[6]
72
+ constraint_type = data[7]
73
+ coefficient = data[8]
74
+ # IS and CAT are maximization problems.
75
+ # MVC and SC are minimization problems.
76
+ obj_type = data[0]
77
+ optimal_solution = get_best_solution(n, m, k, site, value, constraint, constraint_type, coefficient, time, obj_type)
78
+
79
+
80
+ variable_features = []
81
+ constraint_features = []
82
+ edge_indices = [[], []]
83
+ edge_features = []
84
+
85
+ for i in range(n):
86
+ now_variable_features = []
87
+ now_variable_features.append(coefficient[i])
88
+ now_variable_features.append(0)
89
+ now_variable_features.append(1)
90
+ now_variable_features.append(1)
91
+ now_variable_features.append(random.random())
92
+ variable_features.append(now_variable_features)
93
+
94
+ for i in range(m):
95
+ now_constraint_features = []
96
+ now_constraint_features.append(constraint[i])
97
+ now_constraint_features.append(constraint_type[i])
98
+ now_constraint_features.append(random.random())
99
+ constraint_features.append(now_constraint_features)
100
+
101
+ for i in range(m):
102
+ for j in range(k[i]):
103
+ edge_indices[0].append(i)
104
+ edge_indices[1].append(site[i][j])
105
+ edge_features.append([value[i][j]])
106
+
107
+ with open('./example/sample' + str(num) + '.pickle', 'wb') as f:
108
+ pickle.dump([variable_features, constraint_features, edge_indices, edge_features, optimal_solution], f)
109
+
110
+ def parse_args():
111
+ parser = argparse.ArgumentParser()
112
+ parser.add_argument('--time', type = int, default = 10, help = 'Running wall-clock time.')
113
+ parser.add_argument("--number", type = int, default = 10, help = 'The number of instances.')
114
+ return parser.parse_args()
115
+
116
+
117
+
118
+ if __name__ == '__main__':
119
+ args = parse_args()
120
+ #print(vars(args))
121
+ optimize(**vars(args))
Code/model/gbdt_regressor.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy import ndarray
3
+
4
+
5
+ class GradientBoostingRegressor():
6
+ '''
7
+ Class Description:
8
+ GBDT class, which stores the trained GBDT.
9
+ '''
10
+ def __init__(self):
11
+ '''
12
+ Function Description:
13
+ Initialize the GBDT.
14
+ '''
15
+ raise NotImplementedError('GradientBoostingRegressor __init__ method should be implemented')
16
+
17
+ def fit(self, data: ndarray,
18
+ label: ndarray,
19
+ n_estimators: int,
20
+ learning_rate: float,
21
+ max_depth: int,
22
+ min_samples_split: int,
23
+ subsample=None):
24
+ '''
25
+ Function Description:
26
+ Train the GBDT based on the given decision variable neural encoding and optimal solution values.
27
+
28
+ Parameters:
29
+ - data: Neural encoding results of the decision variables.
30
+ - label: Values of the decision variables in the optimal solution.
31
+ - n_estimators: Number of decision trees.
32
+ - learning_rate: Learning rate.
33
+ - max_depth: Maximum depth of the decision trees.
34
+ - min_samples_split: Minimum number of samples required to split a leaf node.
35
+ - subsample: Subsample rate without replacement.
36
+
37
+ Return:
38
+ The training results are stored in the class. There is no return value.
39
+ '''
40
+ raise NotImplementedError('GradientBoostingRegressor fit method should be implemented')
41
+
42
+ def predict(self, data: ndarray) -> ndarray:
43
+ '''
44
+ Function Description:
45
+ Use the trained GBDT to predict the initial solution based on the given decision variable neural encoding, and return the predicted initial solution.
46
+
47
+ Parameters:
48
+ - data: Neural encoding results of the decision variables.
49
+
50
+ Return:
51
+ The predicted initial solution.
52
+ '''
53
+ raise NotImplementedError('GradientBoostingRegressor predict method should be implemented')
54
+
55
+ def calc(self, data: ndarray) -> ndarray:
56
+ '''
57
+ Function Description:
58
+ Use the trained GBDT to predict the initial solution based on the given decision variable neural encoding, and return the prediction loss.
59
+
60
+ Parameters:
61
+ - data: Neural encoding results of the decision variables.
62
+
63
+ Return:
64
+ The prediction loss generated when predicting the initial solution for each decision variable.
65
+ '''
66
+ raise NotImplementedError('GradientBoostingRegressor calc method should be implemented')
Code/model/graphcnn.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch_geometric
4
+
5
+ __all__ = ["GNNPolicy"]
6
+
7
+ class BipartiteGraphConvolution(torch_geometric.nn.MessagePassing):
8
+ """
9
+ Class Description:
10
+ Based on graph convolution, define the bipartite graph semi-convolution process.
11
+ """
12
+
13
+ def __init__(self):
14
+ '''
15
+ Function Description:
16
+ Define the size of the encoding space, and implement the semi-convolution layer and output layer.
17
+ '''
18
+ raise NotImplementedError('BipartiteGraphConvolution __init__ method should be implemented')
19
+
20
+ def forward(self, left_features, edge_indices, edge_features, right_features):
21
+ '''
22
+ Function Description:
23
+ Based on the given node and edge features, output the result of forward propagation after semi-convolution.
24
+
25
+ Parameters:
26
+ - left_features: Features of the nodes on the left side of the bipartite graph.
27
+ - edge_indices: Edge information.
28
+ - edge_features: Edge features.
29
+ - right_features: Features of the nodes on the right side of the bipartite graph.
30
+
31
+ Return: The result after forward propagation.
32
+ '''
33
+ raise NotImplementedError('BipartiteGraphConvolution forward method should be implemented')
34
+
35
+ def message(self, node_features_i, node_features_j, edge_features):
36
+ '''
37
+ Function Description:
38
+ This method sends the messages, computed in the message method.
39
+
40
+ Parameters:
41
+ - node_features_i: Features of the nodes on the left side of the bipartite graph.
42
+ - node_features_j: Features of the nodes on the right side of the bipartite graph.
43
+ - edge_features: Edge features.
44
+
45
+ Return: The result after the message passing in the semi-convolution.
46
+ '''
47
+ raise NotImplementedError('BipartiteGraphConvolution message method should be implemented')
48
+
49
+
50
+ class GNNPolicy(torch.nn.Module):
51
+ """
52
+ Class Description:
53
+ Based on the semi-convolutional layer, define the entire GNN network structure.
54
+ """
55
+ def __init__(self):
56
+ '''
57
+ Function Description:
58
+ Define the size of the encoding space, and define the layers for decision variable encoding, edge feature encoding, and constraint feature encoding.
59
+ Define two semi-convolutional layers and the final output layer.
60
+ '''
61
+ raise NotImplementedError('GNNPolicy __init__ method should be implemented')
62
+
63
+
64
+
65
+ def forward(
66
+ self, constraint_features, edge_indices, edge_features, variable_features
67
+ ):
68
+ '''
69
+ Function Description:
70
+ Based on the given constraint, edge, and variable features, output the result of forward propagation after GNN.
71
+
72
+ Parameters:
73
+ - constraint_features: Features of the constraint points.
74
+ - edge_indices: Edge information.
75
+ - edge_features: Edge features.
76
+ - variable_features: Features of the variable points.
77
+
78
+ Return: The result after forward propagation.
79
+ '''
80
+ raise NotImplementedError('GNNPolicy forward method should be implemented')
81
+
Code/test.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import torch
4
+ import random
5
+ import pickle
6
+ import argparse
7
+ import torch_geometric
8
+
9
+ import numpy as np
10
+ import torch.nn.functional as F
11
+ from pytorch_metric_learning import losses
12
+
13
+ from gurobipy import *
14
+ from typing import Union
15
+ from pathlib import Path
16
+ from functools import cmp_to_key
17
+ from model.graphcnn import GNNPolicy
18
+ from model.gbdt_regressor import GradientBoostingRegressor
19
+
20
+ def make(constraint_features,
21
+ edge_index,
22
+ edge_attr,
23
+ variable_features,
24
+ model_path : str,
25
+ device : torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")):
26
+ '''
27
+ Function Description:
28
+ Use the trained GNN to obtain the neural encoding results of the decision variables based on the given problem instances.
29
+
30
+ Parameters:
31
+ - constraint_features: Initial feature encoding of constraint points in the bipartite representation of the problem.
32
+ - edge_index: Edges in the bipartite representation of the problem.
33
+ - edge_attr: Edge features in the bipartite representation of the problem.
34
+ - variable_features: Initial feature encoding of decision variable points in the bipartite representation of the problem.
35
+ - model_path: Path to the trained GNN model.
36
+ - device: Select the computing device.
37
+
38
+ Return:
39
+ The neural encoding results of the decision variables.
40
+ '''
41
+ policy = GNNPolicy().to(device)
42
+ policy.load_state_dict(torch.load(model_path, policy.state_dict()))
43
+ logits = policy(
44
+ torch.FloatTensor(constraint_features).to(device),
45
+ torch.LongTensor(edge_index).to(device),
46
+ torch.FloatTensor(edge_attr).to(device),
47
+ torch.FloatTensor(variable_features).to(device),
48
+ )
49
+ #print(logits)
50
+ return logits.tolist()
51
+
52
+ class pair:
53
+ def __init__(self):
54
+ self.site = 0
55
+ self.loss = 0
56
+
57
+ def cmp(a, b):
58
+ if a.loss < b.loss:
59
+ return -1
60
+ else:
61
+ return 1
62
+
63
+ def cmp2(a, b):
64
+ if a.loss > b.loss:
65
+ return -1
66
+ else:
67
+ return 1
68
+
69
+ def get_best_solution(n, m, k, site, value, constraint, constraint_type, coefficient, time_limit, obj_type, now_sol, now_col):
70
+ '''
71
+ Function Description:
72
+ Solve the problem using an optimization solver based on the provided problem instance.
73
+
74
+ Parameters:
75
+ - n: Number of decision variables in the problem instance.
76
+ - m: Number of constraints in the problem instance.
77
+ - k: k[i] represents the number of decision variables in the i-th constraint.
78
+ - site: site[i][j] represents which decision variable the j-th decision variable of the i-th constraint corresponds to.
79
+ - value: value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
80
+ - constraint: constraint[i] represents the right-hand side value of the i-th constraint.
81
+ - constraint_type: constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
82
+ - coefficient: coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
83
+ - time_limit: Maximum solving time.
84
+ - obj_type: Whether the problem is a maximization problem or a minimization problem.
85
+
86
+ Return:
87
+ The optimal solution of the problem.
88
+ '''
89
+ raise NotImplementedError('get_best_solution method should be implemented')
90
+
91
+ def initial_solution_search(n, m, k, site, value, constraint, constraint_type, coefficient, set_time, obj_type, predict, loss):
92
+ '''
93
+ Function Description:
94
+ Perform an initial solution search using fixed-radius neighborhood search based on the given problem instance and the predicted results from GBDT.
95
+
96
+ Parameters:
97
+ - n: Number of decision variables in the problem instance.
98
+ - m: Number of constraints in the problem instance.
99
+ - k: k[i] represents the number of decision variables in the i-th constraint.
100
+ - site: site[i][j] represents which decision variable the j-th decision variable of the i-th constraint corresponds to.
101
+ - value: value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
102
+ - constraint: constraint[i] represents the right-hand side value of the i-th constraint.
103
+ - constraint_type: constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
104
+ - coefficient: coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
105
+ - time_limit: Maximum solving time.
106
+ - obj_type: Whether the problem is a maximization problem or a minimization problem.
107
+ - predict: Predicted results from GBDT.
108
+ - loss: Prediction loss from GBDT.
109
+
110
+ Return:
111
+ The initial feasible solution of the problem and its corresponding objective function value.
112
+ '''
113
+ raise NotImplementedError('initial_solution_search method should be implemented')
114
+
115
+ def cross_generate_blocks(n, loss, rate, predict, nowX, GBDT, data):
116
+ '''
117
+ Function Description:
118
+ Obtain the neighborhood partitioning result based on the given problem instance, the predicted results from GBDT, and the current solution.
119
+
120
+ Parameters:
121
+ - n: Number of decision variables in the problem instance.
122
+ - loss: Prediction loss from GBDT.
123
+ - rate: Neighborhood radius.
124
+ - predict: Predicted results from GBDT.
125
+ - nowX: Current solution of the problem instance.
126
+ - GBDT: Trained Gradient Boosting Decision Tree.
127
+ - data: Neural encoding results of the decision variables.
128
+
129
+ Return: A set of partitioning results of the neighborhood.
130
+ '''
131
+ raise NotImplementedError('cross_generate_blocks method should be implemented')
132
+
133
+ def cross(n, m, k, site, value, constraint, constraint_type, coefficient, obj_type, rate, solA, blockA, solB, blockB, set_time):
134
+ '''
135
+ Function Description:
136
+ Obtain the crossover solution of two neighborhoods based on the given problem instance, the neighborhood information and search results of neighborhood A, the neighborhood information and search results of neighborhood B.
137
+
138
+ Parameters:
139
+ - n: Number of decision variables in the problem instance.
140
+ - m: Number of constraints in the problem instance.
141
+ - k: k[i] represents the number of decision variables in the i-th constraint.
142
+ - site: site[i][j] represents which decision variable the j-th decision variable of the i-th constraint corresponds to.
143
+ - value: value[i][j] represents the coefficient of the j-th decision variable in the i-th constraint.
144
+ - constraint: constraint[i] represents the right-hand side value of the i-th constraint.
145
+ - constraint_type: constraint_type[i] represents the type of the i-th constraint, where 1 represents <=, 2 represents >=, and 3 represents =.
146
+ - coefficient: coefficient[i] represents the coefficient of the i-th decision variable in the objective function.
147
+ - rate: Neighborhood radius.
148
+ - solA: Search result of neighborhood A.
149
+ - blockA: Neighborhood information of neighborhood A.
150
+ - solB: Search result of neighborhood B.
151
+ - blockB: Neighborhood information of neighborhood B.
152
+ - set_time: Set running time.
153
+
154
+ Return:
155
+ The crossover solution of the two neighborhoods and their corresponding objective function values.
156
+ '''
157
+ raise NotImplementedError('cross method should be implemented')
158
+
159
+
160
+ def optimize(fix : float,
161
+ set_time : int,
162
+ rate : float,
163
+ model_path : str,
164
+ device : torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")):
165
+ begin_time = time.time()
166
+
167
+ if(os.path.exists('./example-IS-h/data0.pickle') == False):
168
+ print("No problem file!")
169
+
170
+ with open('./example-IS-h/data0.pickle', "rb") as f:
171
+ problem = pickle.load(f)
172
+
173
+ obj_type = problem[0]
174
+ n = problem[1]
175
+ m = problem[2]
176
+ k = problem[3]
177
+ site = problem[4]
178
+ value = problem[5]
179
+ constraint = problem[6]
180
+ constraint_type = problem[7]
181
+ coefficient = problem[8]
182
+
183
+
184
+ variable_features = []
185
+ constraint_features = []
186
+ edge_indices = [[], []]
187
+ edge_features = []
188
+
189
+ for i in range(n):
190
+ now_variable_features = []
191
+ now_variable_features.append(coefficient[i])
192
+ now_variable_features.append(0)
193
+ now_variable_features.append(1)
194
+ now_variable_features.append(1)
195
+ now_variable_features.append(random.random())
196
+ variable_features.append(now_variable_features)
197
+
198
+ for i in range(m):
199
+ now_constraint_features = []
200
+ now_constraint_features.append(constraint[i])
201
+ now_constraint_features.append(constraint_type[i])
202
+ now_constraint_features.append(random.random())
203
+ constraint_features.append(now_constraint_features)
204
+
205
+ for i in range(m):
206
+ for j in range(k[i]):
207
+ edge_indices[0].append(i)
208
+ edge_indices[1].append(site[i][j])
209
+ edge_features.append([value[i][j]])
210
+ data = make(constraint_features, edge_indices, edge_features, variable_features, model_path, device)
211
+
212
+
213
+ if(os.path.exists('./GBDT-IS-h.pickle') == False):
214
+ print("No problem file!")
215
+
216
+ with open('./GBDT-IS-h.pickle', "rb") as f:
217
+ GBDT = pickle.load(f)[0]
218
+ predict = GBDT.predict(np.array(data))
219
+ loss = GBDT.calc(np.array(data))
220
+
221
+
222
+ # Initial solution search.
223
+ ansTime = []
224
+ ansVal = []
225
+ nowX, nowVal = initial_solution_search(n, m, k, site, value, constraint, constraint_type, coefficient, set_time, obj_type, predict, loss)
226
+ ansTime.append(time.time() - begin_time)
227
+ ansVal.append(nowVal)
228
+
229
+ while(time.time() - begin_time < set_time):
230
+ turnX = []
231
+ turnVal = []
232
+ block_list, _, _ = cross_generate_blocks(n, loss, rate, predict, nowX, GBDT, data)
233
+ # GBDT-guided neighborhood partitioning and neighborhood search.
234
+ for i in range(4):
235
+ max_time = set_time - (time.time() - begin_time)
236
+ if(max_time <= 0):
237
+ break
238
+ newX, newVal = get_best_solution(n, m, k, site, value, constraint, constraint_type, coefficient, max_time, obj_type, nowX, block_list[i])
239
+ turnX.append(newX)
240
+ turnVal.append(newVal)
241
+
242
+ # First-level crossover between neighborhoods.
243
+ if(len(turnX) == 4):
244
+ max_time = set_time - (time.time() - begin_time)
245
+ if(max_time <= 0):
246
+ break
247
+ newX, newVal = cross(n, m, k, site, value, constraint, constraint_type, coefficient, obj_type, rate, turnX[0], block_list[0], turnX[1], block_list[1], max_time)
248
+ if(turnVal != -1):
249
+ turnX.append(newX)
250
+ turnVal.append(newVal)
251
+
252
+ newX, newVal = cross(n, m, k, site, value, constraint, constraint_type, coefficient, obj_type, rate, turnX[2], block_list[2], turnX[3], block_list[3], max_time)
253
+ if(turnVal != -1):
254
+ turnX.append(newX)
255
+ turnVal.append(newVal)
256
+
257
+ # Second-level crossover between neighborhoods.
258
+ if(len(turnX) == 6):
259
+ max_time = set_time - (time.time() - begin_time)
260
+ if(max_time <= 0):
261
+ break
262
+
263
+ block_list.append(np.zeros(n, int))
264
+ for i in range(n):
265
+ if(block_list[0][i] == 1 or block_list[1][i] == 1):
266
+ block_list[4][i] = 1
267
+ block_list.append(np.zeros(n, int))
268
+ for i in range(n):
269
+ if(block_list[2][i] == 1 or block_list[3][i] == 1):
270
+ block_list[5][i] = 1
271
+
272
+ newX, newVal = cross(n, m, k, site, value, constraint, constraint_type, coefficient, obj_type, rate, turnX[4], block_list[4], turnX[5], block_list[5], max_time)
273
+ if(turnVal != -1):
274
+ turnX.append(newX)
275
+ turnVal.append(newVal)
276
+
277
+ # Update the current solution as the current optimal solution.
278
+ for i in range(len(turnVal)):
279
+ if(obj_type == 'maximize'):
280
+ if(turnVal[i] > nowVal):
281
+ nowVal = turnVal[i]
282
+ for j in range(n):
283
+ nowX[j] = turnX[i][j]
284
+ else:
285
+ if(turnVal[i] < nowVal):
286
+ nowVal = turnVal[i]
287
+ for j in range(n):
288
+ nowX[j] = turnX[i][j]
289
+
290
+ ansTime.append(time.time() - begin_time)
291
+ ansVal.append(nowVal)
292
+ print(ansTime)
293
+ print(ansVal)
294
+
295
+
296
+ def parse_args():
297
+ parser = argparse.ArgumentParser()
298
+ parser.add_argument("--fix", type = float, default = 0.1, help = 'time.')
299
+ parser.add_argument("--set_time", type = int, default = 100, help = 'set_time.')
300
+ parser.add_argument("--rate", type = float, default = 0.2, help = 'sub rate.')
301
+ parser.add_argument("--model_path", type=str, default="trained_model-IS-h.pkl")
302
+ parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device to use for training.")
303
+ return parser.parse_args()
304
+
305
+ if __name__ == '__main__':
306
+ args = parse_args()
307
+ #print(vars(args)["model_path"])
308
+ optimize(**vars(args))
Paper/paper.pdf ADDED
Binary file (534 kB). View file
 
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## GNN&GBDT-Guided Fast Optimizing Framework for Large-scale Integer Programming
2
+
3
+ ### Overview
4
+
5
+ This release contains the key processes of the GNN&GBDT-Guided Fast Optimizing Framework, as described in the paper. The provided code implements the main components of the approach, covering data generation, training, and inference. We also provide interfaces that are left to be implemented by the user so that the code can be flexibly used in different contexts.
6
+
7
+ The following gives a brief overview of the contents; more detailed documentation is available within each file:
8
+
9
+ * __Code/data_generation.py__: Generating integer programming problems for training and testing.
10
+ * __Code/data_solution.py__: Generate optimal solutions to integer programming problems for training.
11
+ * __Code/data_partition.py__: Generate graph partition results for bipartite graph representations of integer programming problems used for training.
12
+ * __Code/GNN_train.py__: Use prepared training data to train GNN to generate neural embeddings of decision variables.
13
+ * __Code/GNN_inference.py__: Using a trained GNN to generate neural embeddings of decision variables for training data.
14
+ * __Code/GBDT_train.py__: Train GBDT using the training data and the resulting neural embeddings results.
15
+ * __Code/test.py__: Run test data to get optimized results.
16
+ * __Code/model/graphcnn.py__: GNN model.
17
+ * __Code/model/gbdt_regressor.py__: GBDT model.
18
+ * __Result/CA__: Running results of the very large-scale version of the Combinatorial Auction problem.
19
+ * __Result/MIS__: Running results of the very large-scale version of the Maximum Independent Set problem.
20
+ * __Result/MVC__: Running results of the very large-scale version of the Minimum Vertex Covering problem.
21
+ * __Result/SC__: Running results of the very large-scale version of the Set Covering problem.
22
+ * __Paper/paper.pdf__: PDF version of the paper.
23
+
24
+ ## Requirements
25
+
26
+ The required environment is shown in GNN_GBDT.yml.
27
+
28
+ ## Usage
29
+
30
+ 1. Implement the interfaces respectively.
31
+
32
+ 3. Perform training according to the following code running order:
33
+
34
+ ```
35
+ Code/data_generation.py
36
+ Code/data_solution.py
37
+ Code/data_partition.py
38
+ Code/GNN_train.py
39
+ Code/GNN_inference.py
40
+ Code/GBDT_train.py
41
+ ```
42
+
43
+ 3. Run tests with test.py.
44
+
45
+ ## Citing this work
46
+
47
+ Paper: [GNN&GBDT-Guided Fast Optimizing Framework for Large-scale Integer Programming](https://openreview.net/pdf?id=tX7ajV69wt)
48
+
49
+ If you use the code here please cite this paper:
50
+
51
+ @inproceedings{ye2023gnn,
52
+ title={GNN\&GBDT-Guided Fast Optimizing Framework for Large-scale Integer Programming},
53
+ author={Ye, Huigen and Xu, Hua and Wang, Hongyan and Wang, Chengming and Jiang, Yu},
54
+ booktitle={ICML},
55
+ year={2023}
56
+ }
57
+
58
+ ## Contact
59
+
60
+ Welcome to academic and business collaborations with funding support. For more details, please contact us via email at [xuhua@tsinghua.edu.cn](mailto:xuhua@tsinghua.edu.cn).
Result/CA/2-h1-S-CA2-100.log ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nohup: ignoring input
2
+ feasible solution found by trivial heuristic after 5.3 seconds, objective value 1.106511e+06
3
+ presolving:
4
+ (round 1, fast) 6664 del vars, 0 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 1000000 clqs
5
+ (18.0s) running MILP presolver
6
+ (21.5s) MILP presolver (2 rounds): 0 aggregations, 70822 fixings, 0 bound changes
7
+ (round 2, medium) 77486 del vars, 0 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 992975 clqs
8
+ (round 3, fast) 84443 del vars, 13982 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 986018 clqs
9
+ (round 4, exhaustive) 84523 del vars, 13986 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 986014 upgd conss, 0 impls, 986014 clqs
10
+ (round 5, medium) 104575 del vars, 14188 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 19272 chg coeffs, 986014 upgd conss, 0 impls, 985812 clqs
11
+ (round 6, fast) 105397 del vars, 14852 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 19272 chg coeffs, 986014 upgd conss, 0 impls, 985148 clqs
12
+ (44.4s) probing: 51/894550 (0.0%) - 0 fixings, 0 aggregations, 0 implications, 0 bound changes
13
+ (44.4s) probing aborted: 50/50 successive totally useless probings
14
+ (46.5s) symmetry computation started: requiring (bin +, int -, cont +), (fixed: bin -, int +, cont -)
15
+ (47.6s) no symmetry present
16
+ presolving (7 rounds: 7 fast, 4 medium, 2 exhaustive):
17
+ 105450 deleted vars, 14857 deleted constraints, 0 added constraints, 0 tightened bounds, 0 added holes, 0 changed sides, 19306 changed coefficients
18
+ 0 implications, 985143 cliques
19
+ presolved problem has 894550 variables (894550 bin, 0 int, 0 impl, 0 cont) and 985143 constraints
20
+ 985143 constraints of type <setppc>
21
+ Presolving Time: 45.52
22
+
23
+ time | node | left |LP iter|LP it/n|mem/heur|mdpt |vars |cons |rows |cuts |sepa|confs|strbr| dualbound | primalbound | gap | compl.
24
+ t51.0s| 1 | 0 | 0 | - | trivial| 0 | 894k| 985k| 0 | 0 | 0 | 0 | 0 | 4.573383e+08 | 2.006270e+07 |2179.55%| unknown
25
+ p9324s| 1 | 0 | 65810 | - | clique| 0 | 894k| 985k| 985k| 0 | 0 | 0 | 0 | 4.573383e+08 | 1.152777e+08 | 296.73%| unknown
26
+ 1000m| 1 | 0 |619298 | - | 6581M | 0 | 894k| 985k| 985k| 0 | 0 | 0 | 0 | 4.573383e+08 | 1.152777e+08 | 296.73%| unknown
27
+ (node 1) LP solver hit time limit in LP 3 -- using pseudo solution instead
28
+
29
+ SCIP Status : solving was interrupted [time limit reached]
30
+ Solving Time (sec) : 59978.53
31
+ Solving Nodes : 1
32
+ Primal Bound : +1.15277654568253e+08 (4 solutions)
33
+ Dual Bound : +4.57338288629170e+08
34
+ Gap : 296.73 %
35
+ 0.0
36
+ [60105.98654079437]
37
+ [115277654.56825252]
Result/CA/2-h1-S-CA2-20.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/CA/2-h1-S-CA2-30.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/CA/2-h1-S-CA2-50.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/MIS/2-h1-S-MIS2-100.log ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nohup: ignoring input
2
+ feasible solution found by trivial heuristic after 16.7 seconds, objective value 3.983191e+02
3
+ presolving:
4
+ (round 1, fast) 17431 del vars, 15004 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2984987 clqs
5
+ (round 2, fast) 25267 del vars, 59925 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2940066 clqs
6
+ (54.3s) running MILP presolver
7
+ (64.1s) MILP presolver (2 rounds): 0 aggregations, 195 fixings, 0 bound changes
8
+ (round 3, exhaustive) 25989 del vars, 62494 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2937506 clqs
9
+ (round 4, exhaustive) 25992 del vars, 62497 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 2937503 upgd conss, 0 impls, 2937503 clqs
10
+ (135.5s) probing: 51/974008 (0.0%) - 0 fixings, 0 aggregations, 0 implications, 0 bound changes
11
+ (135.5s) probing aborted: 50/50 successive totally useless probings
12
+ (141.0s) symmetry computation started: requiring (bin +, int -, cont +), (fixed: bin -, int +, cont -)
13
+ (148.9s) no symmetry present
14
+ presolving (5 rounds: 5 fast, 3 medium, 3 exhaustive):
15
+ 25992 deleted vars, 62497 deleted constraints, 0 added constraints, 0 tightened bounds, 0 added holes, 0 changed sides, 0 changed coefficients
16
+ 0 implications, 2937503 cliques
17
+ presolved problem has 974008 variables (974008 bin, 0 int, 0 impl, 0 cont) and 2937503 constraints
18
+ 2937503 constraints of type <setppc>
19
+ Presolving Time: 141.03
20
+
21
+ time | node | left |LP iter|LP it/n|mem/heur|mdpt |vars |cons |rows |cuts |sepa|confs|strbr| dualbound | primalbound | gap | compl.
22
+ t 156s| 1 | 0 | 0 | - | trivial| 0 | 974k|2937k| 0 | 0 | 0 | 0 | 0 | 4.952963e+05 | 9.025819e+03 |5387.55%| unknown
23
+ p 368m| 1 | 0 | 0 | - | locks| 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 4.952963e+05 | 1.858086e+05 | 166.56%| unknown
24
+ i 384m| 1 | 0 | 1126 | - | oneopt| 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 4.952963e+05 | 1.884205e+05 | 162.87%| unknown
25
+ 999m| 1 | 0 | 1229k| - | 9844M | 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 4.952963e+05 | 1.884205e+05 | 162.87%| unknown
26
+ (node 1) LP solver hit time limit in LP 2 -- using pseudo solution instead
27
+
28
+ SCIP Status : solving was interrupted [time limit reached]
29
+ Solving Time (sec) : 59948.10
30
+ Solving Nodes : 1
31
+ Primal Bound : +1.88420496002484e+05 (5 solutions)
32
+ Dual Bound : +4.95296290213063e+05
33
+ Gap : 162.87 %
34
+ 0.0
35
+ [60134.32302713394]
36
+ [188420.496002484]
Result/MIS/2-h1-S-MIS2-20.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/MIS/2-h1-S-MIS2-30.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/MIS/2-h1-S-MIS2-50.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/MVC/2-h1-S-MVC2-100.log ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nohup: ignoring input
2
+ feasible solution found by trivial heuristic after 68.6 seconds, objective value 4.996566e+05
3
+ presolving:
4
+ (round 1, fast) 17431 del vars, 15004 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2984987 clqs
5
+ (round 2, fast) 25267 del vars, 59925 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2940066 clqs
6
+ (185.8s) running MILP presolver
7
+ (218.2s) MILP presolver (2 rounds): 0 aggregations, 195 fixings, 0 bound changes
8
+ (round 3, exhaustive) 25989 del vars, 62494 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2937506 clqs
9
+ (round 4, exhaustive) 25992 del vars, 62497 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 2937503 upgd conss, 0 impls, 2937503 clqs
10
+ (545.8s) probing: 51/974008 (0.0%) - 0 fixings, 0 aggregations, 0 implications, 0 bound changes
11
+ (545.8s) probing aborted: 50/50 successive totally useless probings
12
+ (579.7s) symmetry computation started: requiring (bin +, int -, cont +), (fixed: bin -, int +, cont -)
13
+ (621.0s) no symmetry present
14
+ presolving (5 rounds: 5 fast, 3 medium, 3 exhaustive):
15
+ 25992 deleted vars, 62497 deleted constraints, 0 added constraints, 0 tightened bounds, 0 added holes, 0 changed sides, 0 changed coefficients
16
+ 0 implications, 2937503 cliques
17
+ presolved problem has 974008 variables (974008 bin, 0 int, 0 impl, 0 cont) and 2937503 constraints
18
+ 2937503 constraints of type <setppc>
19
+ Presolving Time: 606.48
20
+
21
+ time | node | left |LP iter|LP it/n|mem/heur|mdpt |vars |cons |rows |cuts |sepa|confs|strbr| dualbound | primalbound | gap | compl.
22
+ t 675s| 1 | 0 | 0 | - | trivial| 0 | 974k|2937k| 0 | 0 | 0 | 0 | 0 | 5.174453e+03 | 4.914449e+05 |9397.52%| unknown
23
+ p 404m| 1 | 0 | 0 | - | locks| 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 5.174453e+03 | 3.146621e+05 |5981.07%| unknown
24
+ i 423m| 1 | 0 | 1126 | - | oneopt| 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 5.174453e+03 | 3.120502e+05 |5930.59%| unknown
25
+ 998m| 1 | 0 | 1274k| - | 10G | 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 5.174453e+03 | 3.120502e+05 |5930.59%| unknown
26
+ (node 1) LP solver hit time limit in LP 2 -- using pseudo solution instead
27
+
28
+ SCIP Status : solving was interrupted [time limit reached]
29
+ Solving Time (sec) : 59892.56
30
+ Solving Nodes : 1
31
+ Primal Bound : +3.12050247079965e+05 (5 solutions)
32
+ Dual Bound : +5.17445286937992e+03
33
+ Gap : 5930.59 %
34
+ 0.0
35
+ [60219.669788360596]
36
+ [312050.24707996455]
Result/MVC/2-h1-S-MVC2-20.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/MVC/2-h1-S-MVC2-30.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/MVC/2-h1-S-MVC2-50.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/SC/2-h1-S-SC2-100.log ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nohup: ignoring input
2
+ feasible solution found by trivial heuristic after 26.5 seconds, objective value 3.983191e+02
3
+ presolving:
4
+ (round 1, fast) 17431 del vars, 15004 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2984987 clqs
5
+ (round 2, fast) 25267 del vars, 59925 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2940066 clqs
6
+ (77.7s) running MILP presolver
7
+ (90.4s) MILP presolver (2 rounds): 0 aggregations, 195 fixings, 0 bound changes
8
+ (round 3, exhaustive) 25989 del vars, 62494 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 2937506 clqs
9
+ (round 4, exhaustive) 25992 del vars, 62497 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 2937503 upgd conss, 0 impls, 2937503 clqs
10
+ (190.3s) probing: 51/974008 (0.0%) - 0 fixings, 0 aggregations, 0 implications, 0 bound changes
11
+ (190.3s) probing aborted: 50/50 successive totally useless probings
12
+ (199.5s) symmetry computation started: requiring (bin +, int -, cont +), (fixed: bin -, int +, cont -)
13
+ (214.0s) no symmetry present
14
+ presolving (5 rounds: 5 fast, 3 medium, 3 exhaustive):
15
+ 25992 deleted vars, 62497 deleted constraints, 0 added constraints, 0 tightened bounds, 0 added holes, 0 changed sides, 0 changed coefficients
16
+ 0 implications, 2937503 cliques
17
+ presolved problem has 974008 variables (974008 bin, 0 int, 0 impl, 0 cont) and 2937503 constraints
18
+ 2937503 constraints of type <setppc>
19
+ Presolving Time: 205.12
20
+
21
+ time | node | left |LP iter|LP it/n|mem/heur|mdpt |vars |cons |rows |cuts |sepa|confs|strbr| dualbound | primalbound | gap | compl.
22
+ t 228s| 1 | 0 | 0 | - | trivial| 0 | 974k|2937k| 0 | 0 | 0 | 0 | 0 | 4.952963e+05 | 9.025819e+03 |5387.55%| unknown
23
+ p 468m| 1 | 0 | 0 | - | locks| 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 4.952963e+05 | 1.858086e+05 | 166.56%| unknown
24
+ i 489m| 1 | 0 | 1126 | - | oneopt| 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 4.952963e+05 | 1.884205e+05 | 162.87%| unknown
25
+ 999m| 1 | 0 | 1224k| - | 9844M | 0 | 974k|2937k|2937k| 0 | 0 | 0 | 0 | 4.952963e+05 | 1.884205e+05 | 162.87%| unknown
26
+ (node 1) LP solver hit time limit in LP 2 -- using pseudo solution instead
27
+
28
+ SCIP Status : solving was interrupted [time limit reached]
29
+ Solving Time (sec) : 59936.37
30
+ Solving Nodes : 1
31
+ Primal Bound : +1.88420496002484e+05 (5 solutions)
32
+ Dual Bound : +4.95296290213063e+05
33
+ Gap : 162.87 %
34
+ 0.0
35
+ [60132.21912121773]
36
+ [188420.496002484]
37
+ nohup: ignoring input
38
+ nohup: ignoring input
39
+ No problem file!
40
+ Traceback (most recent call last):
41
+ File "/home/yehuigen/GNN_GBDT/08-SC2-SCIP.py", line 481, in <module>
42
+ optimize(**vars(args))
43
+ File "/home/yehuigen/GNN_GBDT/08-SC2-SCIP.py", line 238, in optimize
44
+ with open('./example-SC-h/node1.pickle', "rb") as f:
45
+ FileNotFoundError: [Errno 2] No such file or directory: './example-SC-h/node1.pickle'
46
+ nohup: ignoring input
47
+ feasible solution found by trivial heuristic after 23.7 seconds, objective value 9.876481e+05
48
+ presolving:
49
+ (round 1, fast) 36806 del vars, 0 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 0 upgd conss, 0 impls, 0 clqs
50
+ (61.1s) running MILP presolver
51
+ (69.1s) MILP presolver found nothing
52
+ (round 2, exhaustive) 36806 del vars, 0 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 2000000 upgd conss, 0 impls, 0 clqs
53
+ (round 3, fast) 132962 del vars, 1 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 0 chg coeffs, 2000000 upgd conss, 0 impls, 0 clqs
54
+ (round 4, fast) 134745 del vars, 1 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 96152 chg coeffs, 2000000 upgd conss, 0 impls, 37 clqs
55
+ (round 5, exhaustive) 138451 del vars, 57 del conss, 0 add conss, 0 chg bounds, 0 chg sides, 98058 chg coeffs, 2000000 upgd conss, 0 impls, 1931 clqs
56
+ (164.1s) probing: 51/1861533 (0.0%) - 0 fixings, 0 aggregations, 0 implications, 0 bound changes
57
+ (164.1s) probing aborted: 50/50 successive totally useless probings
58
+ (171.4s) symmetry computation started: requiring (bin +, int -, cont +), (fixed: bin -, int +, cont -)
59
+ (184.4s) no symmetry present
60
+ presolving (6 rounds: 6 fast, 3 medium, 3 exhaustive):
61
+ 138479 deleted vars, 57 deleted constraints, 0 added constraints, 0 tightened bounds, 0 added holes, 0 changed sides, 101639 changed coefficients
62
+ 0 implications, 2028 cliques
63
+ presolved problem has 1861533 variables (1861533 bin, 0 int, 0 impl, 0 cont) and 1999943 constraints
64
+ 1999943 constraints of type <logicor>
65
+ Presolving Time: 180.11
66
+
67
+ time | node | left |LP iter|LP it/n|mem/heur|mdpt |vars |cons |rows |cuts |sepa|confs|strbr| dualbound | primalbound | gap | compl.
68
+ t 202s| 1 | 0 | 0 | - | trivial| 0 |1861k|1999k| 0 | 0 | 0 | 0 | 0 | 2.666364e+00 | 9.193624e+05 | Large | unknown
69
+ p7332s| 1 | 0 | 0 | - | locks| 0 |1861k|1999k|1999k| 0 | 0 | 0 | 0 | 2.666364e+00 | 2.535118e+05 | Large | unknown
70
+ i7338s| 1 | 0 | 0 | - | oneopt| 0 |1861k|1999k|1999k| 0 | 0 | 0 | 0 | 2.666364e+00 | 2.521799e+05 | Large | unknown
71
+ 999m| 1 | 0 |753936 | - | 10G | 0 |1861k|1999k|1999k| 0 | 0 | 0 | 0 | 2.666364e+00 | 2.521799e+05 | Large | unknown
72
+ (node 1) LP solver hit time limit in LP 1 -- using pseudo solution instead
73
+
74
+ SCIP Status : solving was interrupted [time limit reached]
75
+ Solving Time (sec) : 59942.29
76
+ Solving Nodes : 1
77
+ Primal Bound : +2.52179898788591e+05 (5 solutions)
78
+ Dual Bound : +2.66636350119586e+00
79
+ Gap : 9457721.44 %
80
+ 0.0
81
+ [60210.22903895378]
82
+ [252179.89878859147]
Result/SC/2-h1-S-SC2-20.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/SC/2-h1-S-SC2-30.log ADDED
The diff for this file is too large to render. See raw diff
 
Result/SC/2-h1-S-SC2-50.log ADDED
The diff for this file is too large to render. See raw diff