repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
stellargraph | stellargraph-master/tests/layer/test_cluster_models.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stellargraph.layer import APPNP, GAT, GCN
from stellargraph.mapper import ClusterNodeGenerator
import tensorflow as tf
import numpy as np
from ..test_utils.graphs import example_graph_random
import pytest
@pytest.mark.parametrize("model_type", [APPNP, GAT, GCN])
def test_fullbatch_cluster_models(model_type):
G = example_graph_random(n_nodes=50)
generator = ClusterNodeGenerator(G, clusters=10)
nodes = G.nodes()[:40]
gen = generator.flow(nodes, targets=np.ones(len(nodes)))
gnn = model_type(
generator=generator,
layer_sizes=[16, 16, 1],
activations=["relu", "relu", "relu"],
)
model = tf.keras.Model(*gnn.in_out_tensors())
model.compile(optimizer="adam", loss="binary_crossentropy")
history = model.fit(gen, validation_data=gen, epochs=2)
results = model.evaluate(gen)
# this doesn't work for any cluster models including ClusterGCN
# because the model spits out predictions with shapes:
# [(1, cluster_1_size, feat_size), (1, cluster_2_size, feat_size)...]
# and attempts to concatenate along axis 0
# predictions = model.predict(gen)
x_in, x_out = gnn.in_out_tensors()
x_out_flat = tf.squeeze(x_out, 0)
embedding_model = tf.keras.Model(inputs=x_in, outputs=x_out_flat)
predictions = embedding_model.predict(gen)
assert predictions.shape == (len(nodes), 1)
| 1,978 | 35.648148 | 74 | py |
stellargraph | stellargraph-master/tests/layer/test_attri2vec.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Attri2Vec tests
"""
from stellargraph.core.graph import StellarGraph
from stellargraph.mapper import Attri2VecNodeGenerator
from stellargraph.layer.attri2vec import *
from tensorflow import keras
import numpy as np
import networkx as nx
import pytest
from ..test_utils.graphs import example_graph
from .. import test_utils
pytestmark = test_utils.ignore_stellargraph_experimental_mark
def test_attri2vec_constructor():
attri2vec = Attri2Vec(
layer_sizes=[4], input_dim=2, node_num=4, multiplicity=2, normalize="l2"
)
assert attri2vec.dims == [2, 4]
assert attri2vec.input_node_num == 4
assert attri2vec.n_layers == 1
assert attri2vec.bias == False
# Check incorrect activation flag
with pytest.raises(ValueError):
Attri2Vec(
layer_sizes=[4],
input_dim=2,
node_num=4,
multiplicity=2,
activation="unknown",
)
# Check incorrect normalization flag
with pytest.raises(ValueError):
Attri2Vec(
layer_sizes=[4],
input_dim=2,
node_num=4,
multiplicity=2,
normalize=lambda x: x,
)
with pytest.raises(ValueError):
Attri2Vec(
layer_sizes=[4],
input_dim=2,
node_num=4,
multiplicity=2,
normalize="unknown",
)
# Check requirement for generator or input_dim and node_num & multiplicity
with pytest.raises(ValueError):
Attri2Vec(layer_sizes=[4])
# Construction from generator
G = example_graph(feature_size=3)
gen = Attri2VecNodeGenerator(G, batch_size=2)
attri2vec = Attri2Vec(layer_sizes=[4, 8], generator=gen, bias=True)
assert attri2vec.dims == [3, 4, 8]
assert attri2vec.input_node_num == 4
assert attri2vec.n_layers == 2
assert attri2vec.bias
def test_attri2vec_apply():
attri2vec = Attri2Vec(
layer_sizes=[2, 2, 2],
bias=False,
input_dim=2,
node_num=4,
multiplicity=2,
activation="linear",
normalize=None,
)
# the rest of the test assumes the weights are 1, to get the predictions to be easily computed,
# so let's build a basic model to set those weights (which are stored statefully in Attri2vec)
model = keras.Model(*attri2vec.in_out_tensors())
model.set_weights([np.ones_like(w) for w in model.get_weights()])
x = np.array([[1, 2]])
expected = np.array([[12, 12]])
inp = keras.Input(shape=(2,))
out = attri2vec(inp)
model1 = keras.Model(inputs=inp, outputs=out)
actual = model1.predict(x)
assert expected == pytest.approx(actual)
# Use the node model:
xinp, xout = attri2vec.in_out_tensors(multiplicity=1)
model2 = keras.Model(inputs=xinp, outputs=xout)
assert pytest.approx(expected) == model2.predict(x)
x1 = np.array([[3, 1]])
x2 = np.array([[2]])
y1 = np.array([[16, 16]])
y2 = np.array([[1, 1]])
# Test the build function:
xinp, xout = attri2vec.in_out_tensors()
model3 = keras.Model(inputs=xinp, outputs=xout)
actual = model3.predict([x1, x2])
assert pytest.approx(y1) == actual[0]
assert pytest.approx(y2) == actual[1]
# Use the link model:
xinp, xout = attri2vec.in_out_tensors()
model4 = keras.Model(inputs=xinp, outputs=xout)
actual = model4.predict([x1, x2])
assert pytest.approx(y1) == actual[0]
assert pytest.approx(y2) == actual[1]
def test_attri2vec_serialize():
attri2vec = Attri2Vec(
layer_sizes=[4],
bias=False,
input_dim=2,
node_num=4,
multiplicity=2,
activation="linear",
normalize=None,
)
inp = keras.Input(shape=(2,))
out = attri2vec(inp)
model = keras.Model(inputs=inp, outputs=out)
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(model_json)
model2.set_weights(model_weights)
# Test loaded model
x = np.array([[1, 2]])
expected = np.array([[3, 3, 3, 3]])
actual = model2.predict(x)
assert expected == pytest.approx(actual)
def test_attri2vec_save_load(tmpdir):
attri2vec = Attri2Vec(
layer_sizes=[4],
bias=True,
input_dim=2,
node_num=4,
multiplicity=2,
activation="linear",
normalize=None,
)
test_utils.model_save_load(tmpdir, attri2vec)
| 5,167 | 27.086957 | 99 | py |
stellargraph | stellargraph-master/tests/layer/test_cluster_gcn.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cluster-GCN tests
"""
from tensorflow.keras import backend as K
from stellargraph.layer.cluster_gcn import *
from stellargraph.mapper import ClusterNodeGenerator
from stellargraph.core.graph import StellarGraph
import pandas as pd
import numpy as np
from tensorflow import keras
import tensorflow as tf
import pytest
from ..test_utils.graphs import create_graph_features
from .. import test_utils
pytestmark = pytest.mark.filterwarnings(
r"ignore:ClusterGCN has been replaced by GCN:DeprecationWarning"
)
def test_ClusterGCN_init():
G, features = create_graph_features()
generator = ClusterNodeGenerator(G)
cluster_gcn_model = ClusterGCN(
layer_sizes=[2], generator=generator, activations=["relu"], dropout=0.5
)
assert cluster_gcn_model.layer_sizes == [2]
assert cluster_gcn_model.activations == ["relu"]
assert cluster_gcn_model.dropout == 0.5
def test_ClusterGCN_apply():
G, _ = create_graph_features()
generator = ClusterNodeGenerator(G)
cluster_gcn_model = ClusterGCN(
layer_sizes=[2], generator=generator, activations=["relu"], dropout=0.0
)
x_in, x_out = cluster_gcn_model.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b", "c"]))
assert preds_2.shape == (1, 3, 2)
def test_ClusterGCN_activations():
G, _ = create_graph_features()
generator = ClusterNodeGenerator(G)
# Test activations are set correctly
cluster_gcn = ClusterGCN(layer_sizes=[2], generator=generator, activations=["relu"])
assert cluster_gcn.activations == ["relu"]
cluster_gcn = ClusterGCN(
layer_sizes=[2, 2], generator=generator, activations=["relu", "relu"]
)
assert cluster_gcn.activations == ["relu", "relu"]
cluster_gcn = ClusterGCN(
layer_sizes=[2], generator=generator, activations=["linear"]
)
assert cluster_gcn.activations == ["linear"]
with pytest.raises(TypeError):
# activations for layers must be specified
ClusterGCN(layer_sizes=[2], generator=generator)
with pytest.raises(ValueError):
# More activations than layers
ClusterGCN(layer_sizes=[2], generator=generator, activations=["relu", "linear"])
with pytest.raises(ValueError):
# Fewer activations than layers
ClusterGCN(layer_sizes=[2, 2], generator=generator, activations=["relu"])
with pytest.raises(ValueError):
# Unknown activation
ClusterGCN(layer_sizes=[2], generator=generator, activations=["bleach"])
def test_ClusterGCN_regularisers():
G, _ = create_graph_features()
generator = ClusterNodeGenerator(G)
cluster_gcn = ClusterGCN(
layer_sizes=[2],
activations=["relu"],
generator=generator,
kernel_regularizer=keras.regularizers.l2(),
)
with pytest.raises(ValueError):
ClusterGCN(
layer_sizes=[2],
activations=["relu"],
generator=generator,
kernel_regularizer="fred",
)
cluster_gcn = ClusterGCN(
layer_sizes=[2],
activations=["relu"],
generator=generator,
bias_initializer="zeros",
)
cluster_gcn = ClusterGCN(
layer_sizes=[2],
activations=["relu"],
generator=generator,
bias_initializer=initializers.zeros(),
)
with pytest.raises(ValueError):
ClusterGCN(
layer_sizes=[2],
activations=["relu"],
generator=generator,
bias_initializer="barney",
)
def test_kernel_and_bias_defaults():
graph, _ = create_graph_features()
generator = ClusterNodeGenerator(graph)
cluster_gcn = ClusterGCN(
layer_sizes=[2, 2], activations=["relu", "relu"], generator=generator
)
for layer in cluster_gcn._layers:
if isinstance(layer, GraphConvolution):
assert isinstance(layer.kernel_initializer, tf.initializers.GlorotUniform)
assert isinstance(layer.bias_initializer, tf.initializers.Zeros)
assert layer.kernel_regularizer is None
assert layer.bias_regularizer is None
assert layer.kernel_constraint is None
assert layer.bias_constraint is None
def test_ClusterGCN_save_load(tmpdir):
G, _ = create_graph_features()
generator = ClusterNodeGenerator(G)
cluster_gcn = ClusterGCN(
layer_sizes=[2, 3], activations=["relu", "relu"], generator=generator
)
test_utils.model_save_load(tmpdir, cluster_gcn)
| 5,180 | 28.947977 | 88 | py |
stellargraph | stellargraph-master/tests/layer/test_rgcn.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from stellargraph.layer.rgcn import RelationalGraphConvolution, RGCN
from stellargraph.mapper.full_batch_generators import RelationalFullBatchNodeGenerator
import pytest
from scipy import sparse as sps
from stellargraph.core.utils import normalize_adj
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Lambda
from stellargraph import StellarDiGraph, StellarGraph
from stellargraph.layer.misc import SqueezedSparseConversion
import pandas as pd
from ..test_utils.graphs import (
relational_create_graph_features as create_graph_features,
)
from .. import test_utils
def test_RelationalGraphConvolution_config():
rgcn_layer = RelationalGraphConvolution(units=16, num_relationships=5)
conf = rgcn_layer.get_config()
assert conf["units"] == 16
assert conf["activation"] == "linear"
assert conf["num_bases"] == 0
assert conf["num_relationships"] == 5
assert conf["use_bias"] == True
assert conf["kernel_initializer"]["class_name"] == "GlorotUniform"
assert conf["bias_initializer"]["class_name"] == "Zeros"
assert conf["basis_initializer"]["class_name"] == "GlorotUniform"
assert conf["coefficient_initializer"]["class_name"] == "GlorotUniform"
assert conf["kernel_regularizer"] is None
assert conf["bias_regularizer"] is None
assert conf["basis_regularizer"] is None
assert conf["coefficient_regularizer"] is None
assert conf["kernel_constraint"] is None
assert conf["bias_constraint"] is None
assert conf["basis_constraint"] is None
assert conf["coefficient_constraint"] is None
def test_RelationalGraphConvolution_init():
rgcn_layer = RelationalGraphConvolution(
units=16, num_relationships=5, num_bases=0, activation="relu"
)
assert rgcn_layer.units == 16
assert rgcn_layer.use_bias is True
assert rgcn_layer.num_bases == 0
assert rgcn_layer.get_config()["activation"] == "relu"
rgcn_layer = RelationalGraphConvolution(
units=16, num_relationships=5, num_bases=10, activation="relu"
)
assert rgcn_layer.units == 16
assert rgcn_layer.use_bias is True
assert rgcn_layer.num_bases == 10
assert rgcn_layer.get_config()["activation"] == "relu"
def test_RelationalGraphConvolution_sparse():
G, features = create_graph_features()
n_edge_types = len(G.edge_types)
# We need to specify the batch shape as one for the GraphConvolutional logic to work
n_nodes = features.shape[0]
n_feat = features.shape[1]
# Inputs for features
x_t = Input(batch_shape=(1, n_nodes, n_feat))
# Create inputs for sparse or dense matrices
# Placeholders for the sparse adjacency matrix
As_indices = [
Input(batch_shape=(1, None, 2), dtype="int64") for i in range(n_edge_types)
]
As_values = [Input(batch_shape=(1, None)) for i in range(n_edge_types)]
A_placeholders = As_indices + As_values
Ainput = [
SqueezedSparseConversion(shape=(n_nodes, n_nodes), dtype=As_values[i].dtype)(
[As_indices[i], As_values[i]]
)
for i in range(n_edge_types)
]
x_inp_model = [x_t] + A_placeholders
x_inp_conv = [x_t] + Ainput
out = RelationalGraphConvolution(2, num_relationships=n_edge_types)(x_inp_conv)
# Note we add a batch dimension of 1 to model inputs
As = [A.tocoo() for A in get_As(G)]
A_indices = [
np.expand_dims(np.hstack((A.row[:, None], A.col[:, None])).astype(np.int64), 0)
for A in As
]
A_values = [np.expand_dims(A.data, 0) for A in As]
out_indices = np.array([[0, 1]], dtype="int32")
x = features[None, :, :]
model = keras.Model(inputs=x_inp_model, outputs=out)
preds = model.predict([x] + A_indices + A_values, batch_size=1)
assert preds.shape == (1, 3, 2)
def test_RelationalGraphConvolution_dense():
G, features = create_graph_features()
n_edge_types = len(G.edge_types)
# We need to specify the batch shape as one for the GraphConvolutional logic to work
n_nodes = features.shape[0]
n_feat = features.shape[1]
# Inputs for features & target indices
x_t = Input(batch_shape=(1, n_nodes, n_feat))
out_indices_t = Input(batch_shape=(1, None), dtype="int32")
# Create inputs for sparse or dense matrices
# Placeholders for the sparse adjacency matrix
A_placeholders = [
Input(batch_shape=(1, n_nodes, n_nodes)) for _ in range(n_edge_types)
]
A_in = [Lambda(lambda A: K.squeeze(A, 0))(A_p) for A_p in A_placeholders]
x_inp_model = [x_t] + A_placeholders
x_inp_conv = [x_t] + A_in
out = RelationalGraphConvolution(2, num_relationships=n_edge_types)(x_inp_conv)
As = [np.expand_dims(A.todense(), 0) for A in get_As(G)]
out_indices = np.array([[0, 1]], dtype="int32")
x = features[None, :, :]
model = keras.Model(inputs=x_inp_model, outputs=out)
preds = model.predict([x] + As, batch_size=1)
assert preds.shape == (1, 3, 2)
def test_RGCN_init():
G, features = create_graph_features()
generator = RelationalFullBatchNodeGenerator(G)
rgcnModel = RGCN([2], generator, num_bases=10, activations=["relu"], dropout=0.5)
assert rgcnModel.layer_sizes == [2]
assert rgcnModel.activations == ["relu"]
assert rgcnModel.dropout == 0.5
assert rgcnModel.num_bases == 10
def test_RGCN_apply_sparse():
G, features = create_graph_features(is_directed=True)
As = get_As(G)
As = [A.tocoo() for A in As]
A_indices = [
np.expand_dims(np.hstack((A.row[:, None], A.col[:, None])).astype(np.int64), 0)
for A in As
]
A_values = [np.expand_dims(A.data, 0) for A in As]
generator = RelationalFullBatchNodeGenerator(G, sparse=True)
rgcnModel = RGCN([2], generator, num_bases=10, activations=["relu"], dropout=0.5)
x_in, x_out = rgcnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices] + A_indices + A_values)
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_RGCN_apply_dense():
G, features = create_graph_features(is_directed=True)
As = get_As(G)
As = [np.expand_dims(A.todense(), 0) for A in As]
generator = RelationalFullBatchNodeGenerator(G, sparse=False)
rgcnModel = RGCN([2], generator, num_bases=10, activations=["relu"], dropout=0.5)
x_in, x_out = rgcnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices] + As)
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_RGCN_apply_sparse_directed():
G, features = create_graph_features(is_directed=True)
As = get_As(G)
As = [A.tocoo() for A in As]
A_indices = [
np.expand_dims(np.hstack((A.row[:, None], A.col[:, None])).astype(np.int64), 0)
for A in As
]
A_values = [np.expand_dims(A.data, 0) for A in As]
generator = RelationalFullBatchNodeGenerator(G, sparse=True)
rgcnModel = RGCN([2], generator, num_bases=10, activations=["relu"], dropout=0.5)
x_in, x_out = rgcnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices] + A_indices + A_values)
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_RGCN_apply_dense_directed():
G, features = create_graph_features(is_directed=True)
As = get_As(G)
As = [np.expand_dims(A.todense(), 0) for A in As]
generator = RelationalFullBatchNodeGenerator(G, sparse=False)
rgcnModel = RGCN([2], generator, num_bases=10, activations=["relu"], dropout=0.5)
x_in, x_out = rgcnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices] + As)
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_RelationalGraphConvolution_edge_cases():
try:
rgcn_layer = RelationalGraphConvolution(
units=16, num_relationships=5, num_bases=0.5, activation="relu"
)
except TypeError as e:
error = e
assert str(error) == "num_bases should be an int"
rgcn_layer = RelationalGraphConvolution(
units=16, num_relationships=5, num_bases=-1, activation="relu"
)
rgcn_layer.build(input_shapes=[(1,)])
assert rgcn_layer.bases is None
try:
rgcn_layer = RelationalGraphConvolution(
units=16, num_relationships=0.5, num_bases=2, activation="relu"
)
except TypeError as e:
error = e
assert str(error) == "num_relationships should be an int"
try:
rgcn_layer = RelationalGraphConvolution(
units=16, num_relationships=-1, num_bases=2, activation="relu"
)
except ValueError as e:
error = e
assert str(error) == "num_relationships should be positive"
try:
rgcn_layer = RelationalGraphConvolution(
units=0.5, num_relationships=1, num_bases=2, activation="relu"
)
except TypeError as e:
error = e
assert str(error) == "units should be an int"
try:
rgcn_layer = RelationalGraphConvolution(
units=-16, num_relationships=1, num_bases=2, activation="relu"
)
except ValueError as e:
error = e
assert str(error) == "units should be positive"
def get_As(G):
As = []
node_list = list(G.nodes())
node_index = dict(zip(node_list, range(len(node_list))))
for edge_type in G.edge_types:
col_index = [
node_index[n1]
for n1, n2, etype in G.edges(include_edge_type=True)
if etype == edge_type
]
row_index = [
node_index[n2]
for n1, n2, etype in G.edges(include_edge_type=True)
if etype == edge_type
]
data = np.ones(len(col_index), np.float64)
A = sps.coo_matrix(
(data, (row_index, col_index)), shape=(len(node_list), len(node_list))
)
d = sps.diags(np.float_power(np.array(A.sum(1)) + 1e-9, -1).flatten(), 0)
A = d.dot(A).tocsr()
As.append(A)
return As
def test_kernel_and_bias_defaults():
graph, _ = create_graph_features()
generator = RelationalFullBatchNodeGenerator(graph, sparse=False)
rgcn = RGCN([2, 2], generator, num_bases=10)
for layer in rgcn._layers:
if isinstance(layer, RelationalGraphConvolution):
assert isinstance(layer.kernel_initializer, tf.initializers.GlorotUniform)
assert isinstance(layer.bias_initializer, tf.initializers.Zeros)
assert layer.kernel_regularizer is None
assert layer.bias_regularizer is None
assert layer.kernel_constraint is None
assert layer.bias_constraint is None
@pytest.mark.parametrize("num_bases", [0, 10])
@pytest.mark.parametrize(
"sparse", [False, pytest.param(True, marks=pytest.mark.xfail(reason="FIXME #1251"))]
)
def test_RGCN_save_load(tmpdir, num_bases, sparse):
graph, _ = create_graph_features()
generator = RelationalFullBatchNodeGenerator(graph, sparse=sparse)
rgcn = RGCN([2, 2], generator, num_bases=num_bases)
test_utils.model_save_load(tmpdir, rgcn)
| 12,811 | 32.020619 | 88 | py |
stellargraph | stellargraph-master/tests/layer/test_appnp.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stellargraph.layer.appnp import *
from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator
from stellargraph import StellarGraph
from stellargraph.core.utils import GCN_Aadj_feats_op
import networkx as nx
import pandas as pd
import numpy as np
from tensorflow import keras
import pytest
from ..test_utils.graphs import create_graph_features
from .. import test_utils
def test_APPNP_edge_cases():
G, features = create_graph_features()
generator = FullBatchNodeGenerator(G, sparse=False, method="gcn")
try:
appnpModel = APPNP(
[2, 2], generator=generator, activations=["relu"], dropout=0.5
)
except ValueError as e:
error = e
assert str(error) == "The number of layers should equal the number of activations"
try:
appnpModel = APPNP([2], generator=[0, 1], activations=["relu"], dropout=0.5)
except TypeError as e:
error = e
assert (
str(error) == f"Generator should be a instance of FullBatchNodeGenerator, "
f"FullBatchLinkGenerator or ClusterNodeGenerator"
)
try:
appnpModel = APPNP(
[2], generator=generator, activations=["relu"], dropout=0.0, approx_iter=-1
)
except ValueError as e:
error = e
assert str(error) == "approx_iter should be a positive integer"
try:
appnpModel = APPNP(
[2], generator=generator, activations=["relu"], dropout=0.0, approx_iter=1.2
)
except ValueError as e:
error = e
assert str(error) == "approx_iter should be a positive integer"
try:
appnpModel = APPNP(
[2],
generator=generator,
activations=["relu"],
dropout=0.0,
teleport_probability=1.2,
)
except ValueError as e:
error = e
assert str(error) == "teleport_probability should be between 0 and 1 (inclusive)"
try:
appnpModel = APPNP(
[2],
generator=generator,
activations=["relu"],
dropout=0.0,
teleport_probability=1.2,
)
except ValueError as e:
error = e
assert str(error) == "teleport_probability should be between 0 and 1 (inclusive)"
def test_APPNP_apply_dense():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = GCN_Aadj_feats_op(features, adj)
adj = np.array(adj.todense()[None, :, :])
generator = FullBatchNodeGenerator(G, sparse=False, method="gcn")
appnpModel = APPNP([2], generator=generator, activations=["relu"], dropout=0.5)
x_in, x_out = appnpModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, adj])
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_APPNP_apply_sparse():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = GCN_Aadj_feats_op(features, adj)
adj = adj.tocoo()
A_indices = np.expand_dims(
np.hstack((adj.row[:, None], adj.col[:, None])).astype(np.int64), 0
)
A_values = np.expand_dims(adj.data, 0)
generator = FullBatchNodeGenerator(G, sparse=True, method="gcn")
appnpnModel = APPNP([2], generator=generator, activations=["relu"], dropout=0.5)
x_in, x_out = appnpnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, A_indices, A_values])
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_APPNP_linkmodel_apply_dense():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
adj = np.array(adj.todense()[None, :, :])
generator = FullBatchLinkGenerator(G, sparse=False, method="none")
appnpnModel = APPNP([3], generator, activations=["relu"], dropout=0.5)
x_in, x_out = appnpnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[[0, 1], [1, 2]]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, adj])
assert preds_1.shape == (1, 2, 2, 3)
# Check fit method
preds_2 = model.predict(generator.flow([("a", "b"), ("b", "c")]))
assert preds_2.shape == (1, 2, 2, 3)
assert preds_1 == pytest.approx(preds_2)
def test_APPNP_linkmodel_apply_sparse():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = GCN_Aadj_feats_op(features, adj)
adj = adj.tocoo()
A_indices = np.expand_dims(
np.hstack((adj.row[:, None], adj.col[:, None])).astype(np.int64), 0
)
A_values = np.expand_dims(adj.data, 0)
generator = FullBatchLinkGenerator(G, sparse=True, method="gcn")
appnpnModel = APPNP(
layer_sizes=[3], activations=["relu"], generator=generator, dropout=0.5
)
x_in, x_out = appnpnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[[0, 1], [1, 2]]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, A_indices, A_values])
assert preds_1.shape == (1, 2, 2, 3)
# Check fit method
preds_2 = model.predict(generator.flow([("a", "b"), ("b", "c")]))
assert preds_2.shape == (1, 2, 2, 3)
assert preds_1 == pytest.approx(preds_2)
def test_APPNP_apply_propagate_model_dense():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = GCN_Aadj_feats_op(features, adj)
adj = np.array(adj.todense()[None, :, :])
generator = FullBatchNodeGenerator(G, sparse=False, method="gcn")
appnpnModel = APPNP([2], generator=generator, activations=["relu"], dropout=0.5)
fully_connected_model = keras.Sequential()
fully_connected_model.add(Dense(2))
x_in, x_out = appnpnModel.propagate_model(fully_connected_model)
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, adj])
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
@pytest.mark.parametrize("model_type", ["sequential", "model"])
def test_APPNP_propagate_model_matches_manual(model_type):
dense_size = 5
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = GCN_Aadj_feats_op(features, adj)
adj = np.array(adj.todense()[None, :, :])
out_indices = np.array([[0, 1]], dtype="int32")
generator = FullBatchNodeGenerator(G, sparse=False, method="gcn")
appnpnModel = APPNP(
[dense_size], generator=generator, activations=["relu"], dropout=0.0
)
dense = Dense(dense_size)
if model_type == "sequential":
fully_connected_model = keras.Sequential()
fully_connected_model.add(dense)
else:
inp = keras.Input(shape=features.shape)
fully_connected_model = keras.Model(inp, dense(inp))
x_in, x_out = appnpnModel.propagate_model(fully_connected_model)
end_to_end_model = keras.Model(inputs=x_in, outputs=x_out)
preds_1 = end_to_end_model.predict([features[None, :, :], out_indices, adj])
# run the process manually: transform the features, and then propagate
float_feats = features[None, :, :].astype("float32")
manual_preds = manual_inp = fully_connected_model.predict(float_feats)
propagate = APPNPPropagationLayer(dense_size, teleport_probability=0.1)
for _ in range(10):
manual_preds = propagate([manual_preds, manual_inp, adj])
# select the relevant pieces
manual_preds = manual_preds.numpy()[:, out_indices.ravel(), :]
np.testing.assert_allclose(preds_1, manual_preds)
def test_APPNP_apply_propagate_model_sparse():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = GCN_Aadj_feats_op(features, adj)
adj = adj.tocoo()
A_indices = np.expand_dims(
np.hstack((adj.row[:, None], adj.col[:, None])).astype(np.int64), 0
)
A_values = np.expand_dims(adj.data, 0)
generator = FullBatchNodeGenerator(G, sparse=True, method="gcn")
appnpnModel = APPNP([2], generator=generator, activations=["relu"], dropout=0.5)
fully_connected_model = keras.Sequential()
fully_connected_model.add(Dense(2))
x_in, x_out = appnpnModel.propagate_model(fully_connected_model)
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, A_indices, A_values])
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
@pytest.mark.parametrize(
"sparse",
[False, pytest.param(True, marks=pytest.mark.xfail(reason="FIXME #1251"))],
)
def test_APPNP_save_load(tmpdir, sparse):
G, _ = create_graph_features()
generator = FullBatchNodeGenerator(G, sparse=sparse)
appnp = APPNP([2, 3], generator, ["relu", "relu"])
test_utils.model_save_load(tmpdir, appnp)
| 10,399 | 32.656958 | 88 | py |
stellargraph | stellargraph-master/tests/layer/test_gcn.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GCN tests
"""
import stellargraph as sg
from stellargraph.layer.gcn import *
from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator
from stellargraph.core.graph import StellarGraph
from stellargraph.core.utils import GCN_Aadj_feats_op
import networkx as nx
import pandas as pd
import numpy as np
from tensorflow import keras
import tensorflow as tf
import pytest
from ..test_utils.graphs import create_graph_features
from .. import test_utils
def test_GraphConvolution_config():
gcn_layer = GraphConvolution(units=16)
conf = gcn_layer.get_config()
assert conf["units"] == 16
assert conf["activation"] == "linear"
assert conf["use_bias"] == True
assert conf["kernel_initializer"]["class_name"] == "GlorotUniform"
assert conf["bias_initializer"]["class_name"] == "Zeros"
assert conf["kernel_regularizer"] == None
assert conf["bias_regularizer"] == None
assert conf["kernel_constraint"] == None
assert conf["bias_constraint"] == None
def test_GraphConvolution_init():
gcn_layer = GraphConvolution(units=16, activation="relu")
assert gcn_layer.units == 16
assert gcn_layer.use_bias == True
assert gcn_layer.get_config()["activation"] == "relu"
def test_GraphConvolution_dense():
G, features = create_graph_features()
# We need to specify the batch shape as one for the GraphConvolutional logic to work
x_t = Input(batch_shape=(1,) + features.shape, name="X")
A_t = Input(batch_shape=(1, 3, 3), name="A")
# Note we add a batch dimension of 1 to model inputs
adj = G.to_adjacency_matrix().toarray()[None, :, :]
x = features[None, :, :]
out = GraphConvolution(2)([x_t, A_t])
model = keras.Model(inputs=[x_t, A_t], outputs=out)
preds = model.predict([x, adj], batch_size=1)
assert preds.shape == (1, 3, 2)
# batch dimension > 1 should work with a dense matrix
x_t = Input(batch_shape=(10,) + features.shape)
A_t = Input(batch_shape=(10, 3, 3))
input_data = [np.broadcast_to(x, x_t.shape), np.broadcast_to(adj, A_t.shape)]
out = GraphConvolution(2)([x_t, A_t])
model = keras.Model(inputs=[x_t, A_t], outputs=out)
preds = model.predict(input_data, batch_size=10)
assert preds.shape == (10, 3, 2)
for i in range(1, 10):
# every batch element had the same input data, so the predictions should all be identical
np.testing.assert_array_equal(preds[i, ...], preds[0, ...])
def test_GraphConvolution_sparse():
G, features = create_graph_features()
n_nodes = features.shape[0]
# We need to specify the batch shape as one for the GraphConvolutional logic to work
x_t = Input(batch_shape=(1,) + features.shape)
A_ind = Input(batch_shape=(1, None, 2), dtype="int64")
A_val = Input(batch_shape=(1, None), dtype="float32")
A_mat = SqueezedSparseConversion(shape=(n_nodes, n_nodes), dtype=A_val.dtype)(
[A_ind, A_val]
)
out = GraphConvolution(2)([x_t, A_mat])
# Note we add a batch dimension of 1 to model inputs
adj = G.to_adjacency_matrix().tocoo()
A_indices = np.expand_dims(
np.hstack((adj.row[:, None], adj.col[:, None])).astype(np.int64), 0
)
A_values = np.expand_dims(adj.data, 0)
out_indices = np.array([[0, 1]], dtype="int32")
x = features[None, :, :]
model = keras.Model(inputs=[x_t, A_ind, A_val], outputs=out)
preds = model.predict([x, A_indices, A_values], batch_size=1)
assert preds.shape == (1, 3, 2)
x_t_10 = Input(batch_shape=(10,) + features.shape)
with pytest.raises(
ValueError,
match="features: expected batch dimension = 1 .* found features batch dimension 10",
):
GraphConvolution(2)([x_t_10, A_mat])
A_mat = keras.layers.Lambda(lambda x: tf.sparse.expand_dims(x, axis=0))(A_mat)
with pytest.raises(
ValueError,
match="adjacency: expected a single adjacency matrix .* found adjacency tensor of rank 3",
):
GraphConvolution(2)([x_t, A_mat])
def test_GCN_init():
G, _ = create_graph_features()
generator = FullBatchNodeGenerator(G)
gcnModel = GCN([2], generator, activations=["relu"], dropout=0.5)
assert gcnModel.layer_sizes == [2]
assert gcnModel.activations == ["relu"]
assert gcnModel.dropout == 0.5
def test_GCN_apply_dense():
G, features = create_graph_features()
adj = G.to_adjacency_matrix().toarray()[None, :, :]
n_nodes = features.shape[0]
generator = FullBatchNodeGenerator(G, sparse=False, method="none")
gcnModel = GCN([2], generator, activations=["relu"], dropout=0.5)
x_in, x_out = gcnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, adj])
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_GCN_apply_sparse():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = GCN_Aadj_feats_op(features, adj)
adj = adj.tocoo()
A_indices = np.expand_dims(
np.hstack((adj.row[:, None], adj.col[:, None])).astype(np.int64), 0
)
A_values = np.expand_dims(adj.data, 0)
generator = FullBatchNodeGenerator(G, sparse=True, method="gcn")
gcnModel = GCN(
layer_sizes=[2], activations=["relu"], generator=generator, dropout=0.5
)
x_in, x_out = gcnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, A_indices, A_values])
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_GCN_linkmodel_apply_dense():
G, features = create_graph_features()
adj = G.to_adjacency_matrix().toarray()[None, :, :]
n_nodes = features.shape[0]
generator = FullBatchLinkGenerator(G, sparse=False, method="none")
gcnModel = GCN([3], generator, activations=["relu"], dropout=0.5)
x_in, x_out = gcnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[[0, 1], [1, 2]]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, adj])
assert preds_1.shape == (1, 2, 2, 3)
# Check fit method
preds_2 = model.predict(generator.flow([("a", "b"), ("b", "c")]))
assert preds_2.shape == (1, 2, 2, 3)
assert preds_1 == pytest.approx(preds_2)
def test_GCN_linkmodel_apply_sparse():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = GCN_Aadj_feats_op(features, adj)
adj = adj.tocoo()
A_indices = np.expand_dims(
np.hstack((adj.row[:, None], adj.col[:, None])).astype(np.int64), 0
)
A_values = np.expand_dims(adj.data, 0)
generator = FullBatchLinkGenerator(G, sparse=True, method="gcn")
gcnModel = GCN(
layer_sizes=[3], activations=["relu"], generator=generator, dropout=0.5
)
x_in, x_out = gcnModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[[0, 1], [1, 2]]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, A_indices, A_values])
assert preds_1.shape == (1, 2, 2, 3)
# Check fit method
preds_2 = model.predict(generator.flow([("a", "b"), ("b", "c")]))
assert preds_2.shape == (1, 2, 2, 3)
assert preds_1 == pytest.approx(preds_2)
def test_GCN_activations():
G, features = create_graph_features()
adj = G.to_adjacency_matrix().toarray()[None, :, :]
n_nodes = features.shape[0]
generator = FullBatchNodeGenerator(G, sparse=False, method="none")
gcn = GCN([2], generator)
assert gcn.activations == ["relu"]
gcn = GCN([2, 2], generator)
assert gcn.activations == ["relu", "relu"]
gcn = GCN([2], generator, activations=["linear"])
assert gcn.activations == ["linear"]
with pytest.raises(ValueError):
# More regularisers than layers
gcn = GCN([2], generator, activations=["relu", "linear"])
with pytest.raises(ValueError):
# Fewer regularisers than layers
gcn = GCN([2, 2], generator, activations=["relu"])
with pytest.raises(ValueError):
# Unknown regularisers
gcn = GCN([2], generator, activations=["bleach"])
def test_GCN_regularisers():
G, features = create_graph_features()
adj = G.to_adjacency_matrix().toarray()[None, :, :]
n_nodes = features.shape[0]
generator = FullBatchNodeGenerator(G, sparse=False, method="none")
gcn = GCN([2], generator)
gcn = GCN([2], generator, kernel_initializer="ones")
gcn = GCN([2], generator, kernel_initializer=initializers.ones())
with pytest.raises(ValueError):
gcn = GCN([2], generator, kernel_initializer="fred")
gcn = GCN([2], generator, bias_initializer="zeros")
gcn = GCN([2], generator, bias_initializer=initializers.zeros())
with pytest.raises(ValueError):
gcn = GCN([2], generator, bias_initializer="barney")
def test_kernel_and_bias_defaults():
graph, _ = create_graph_features()
generator = FullBatchNodeGenerator(graph, sparse=False, method="none")
gcn = GCN([2, 2], generator)
for layer in gcn._layers:
if isinstance(layer, GraphConvolution):
assert isinstance(layer.kernel_initializer, tf.initializers.GlorotUniform)
assert isinstance(layer.bias_initializer, tf.initializers.Zeros)
assert layer.kernel_regularizer is None
assert layer.bias_regularizer is None
assert layer.kernel_constraint is None
assert layer.bias_constraint is None
@pytest.mark.parametrize(
"sparse", [False, pytest.param(True, marks=pytest.mark.xfail(reason="FIXME #1251"))]
)
def test_gcn_save_load(tmpdir, sparse):
G, _ = create_graph_features()
generator = FullBatchNodeGenerator(G, sparse=sparse)
gcn = GCN([2, 3], generator)
test_utils.model_save_load(tmpdir, gcn)
| 11,000 | 32.336364 | 98 | py |
stellargraph | stellargraph-master/tests/layer/test_node2vec.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Node2Vec tests
"""
from stellargraph.core.graph import StellarGraph
from stellargraph.mapper import Node2VecNodeGenerator
from stellargraph.layer.node2vec import *
from tensorflow import keras
import numpy as np
import pytest
from ..test_utils.graphs import example_graph
from .. import test_utils
def test_node2vec_constructor():
node2vec = Node2Vec(emb_size=4, node_num=4, multiplicity=2)
assert node2vec.emb_size == 4
assert node2vec.input_node_num == 4
assert node2vec.multiplicity == 2
# Check requirement for generator or node_num & multiplicity
with pytest.raises(ValueError):
Node2Vec(emb_size=4)
with pytest.raises(ValueError):
Node2Vec(emb_size=4, node_num=4)
with pytest.raises(ValueError):
Node2Vec(emb_size=4, multiplicity=2)
# Construction from generator
G = example_graph()
gen = Node2VecNodeGenerator(G, batch_size=2)
node2vec = Node2Vec(emb_size=4, generator=gen)
assert node2vec.emb_size == 4
assert node2vec.input_node_num == 4
assert node2vec.multiplicity == 1
def test_node2vec_apply():
node2vec = Node2Vec(emb_size=4, node_num=4, multiplicity=2)
x = np.array([[1]])
expected = np.array([[1, 1, 1, 1]])
inp = keras.Input(shape=(1,))
out = node2vec(inp, "target")
model1 = keras.Model(inputs=inp, outputs=out)
model_weights1 = [np.ones_like(w) for w in model1.get_weights()]
model1.set_weights(model_weights1)
actual = model1.predict(x)
assert expected == pytest.approx(actual)
x1 = np.array([[0]])
x2 = np.array([[2]])
y1 = np.array([[1, 1, 1, 1]])
y2 = np.array([[1, 1, 1, 1]])
# Test the in_out_tensors function:
xinp, xout = node2vec.in_out_tensors()
model2 = keras.Model(inputs=xinp, outputs=xout)
model_weights2 = [np.ones_like(w) for w in model2.get_weights()]
model2.set_weights(model_weights2)
actual = model2.predict([x1, x2])
assert pytest.approx(y1) == actual[0]
assert pytest.approx(y2) == actual[1]
def test_node2vec_serialize():
node2vec = Node2Vec(emb_size=4, node_num=4, multiplicity=2)
inp = keras.Input(shape=(1,))
out = node2vec(inp, "target")
model = keras.Model(inputs=inp, outputs=out)
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(model_json)
model2.set_weights(model_weights)
# Test loaded model
x = np.array([[2]])
expected = np.array([[1, 1, 1, 1]])
actual = model2.predict(x)
assert expected == pytest.approx(actual)
def test_node2vec_save_load(tmpdir):
node2vec = Node2Vec(emb_size=4, node_num=4, multiplicity=2)
test_utils.model_save_load(tmpdir, node2vec)
| 3,449 | 29.263158 | 74 | py |
stellargraph | stellargraph-master/tests/layer/test_graphsage.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GraphSAGE tests
"""
from tensorflow import keras
from tensorflow.keras import initializers, regularizers
import tensorflow as tf
import numpy as np
import pytest
from stellargraph.mapper import GraphSAGENodeGenerator
from stellargraph.layer.graphsage import (
GraphSAGE,
MeanAggregator,
MaxPoolingAggregator,
MeanPoolingAggregator,
AttentionalAggregator,
)
from ..test_utils.graphs import example_graph
from .. import test_utils
pytestmark = test_utils.ignore_stellargraph_experimental_mark
# Mean aggregator tests
def test_mean_agg_constructor():
agg = MeanAggregator(2)
assert agg.output_dim == 2
assert not agg.has_bias
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_mean_agg_constructor_1():
agg = MeanAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_mean_agg_apply():
agg = MeanAggregator(5, bias=True, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
assert agg.weight_dims == [3, 2]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 5, 5]]])
assert expected == pytest.approx(actual)
def test_mean_agg_apply_groups():
agg = MeanAggregator(11, bias=True, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 2, 2))
inp3 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2, inp3])
assert agg.weight_dims == [5, 3, 3]
model = keras.Model(inputs=[inp1, inp2, inp3], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
x3 = np.array([[[[5, 5], [4, 4]]]])
actual = model.predict([x1, x2, x3])
print(actual)
expected = np.array([[[2] * 5 + [5] * 3 + [9] * 3]])
assert expected == pytest.approx(actual)
def test_mean_agg_zero_neighbours():
agg = MeanAggregator(4, bias=False, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# MaxPooling aggregator tests
def test_maxpool_agg_constructor():
agg = MaxPoolingAggregator(2, bias=False)
assert agg.output_dim == 2
assert agg.hidden_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
assert agg.hidden_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] == False
assert config["act"] == "relu"
def test_maxpool_agg_constructor_1():
agg = MaxPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.hidden_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_maxpool_agg_apply_hidden_bias():
# Specifying bias_initializer="ones" initialises all bias terms to ones;
# using bias=False turns of outer bias but retains hidden bias.
agg = MaxPoolingAggregator(
2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
)
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = max(relu(x2 · ones(2x2)) + ones(2)), axis=1) = max([[5,5],[7,7]]) = [[7,7]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[14]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 14]]])
assert expected == pytest.approx(actual)
def test_maxpool_agg_apply_no_bias():
# By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
agg = MaxPoolingAggregator(2, act="linear", kernel_initializer="ones")
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = max(relu(x2 · ones(2x2)) + zeros(2)), axis=1) = max([[4,4],[6,6]]) = [[6,6]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[12]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 12]]])
assert expected == pytest.approx(actual)
def test_maxpool_agg_zero_neighbours():
agg = MaxPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# MeanPooling aggregator tests
def test_meanpool_agg_constructor():
agg = MeanPoolingAggregator(2, bias=False)
assert agg.output_dim == 2
assert agg.hidden_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
assert agg.hidden_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_meanpool_agg_constructor_1():
agg = MeanPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.hidden_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_meanpool_agg_apply_hidden_bias():
# Specifying bias_initializer="ones" initialises all bias terms to ones;
# using bias=False turns of outer bias but retains hidden bias.
agg = MeanPoolingAggregator(
2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
)
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = mean(relu(x2 · ones(2x2) + ones(2)), axis=1)
# = mean([[5,5],[7,7]]) = [[6,6]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones(2x1)) = [[12]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 12]]])
assert expected == pytest.approx(actual)
def test_meanpool_agg_apply_no_bias():
# By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
agg = MeanPoolingAggregator(2, act="linear", kernel_initializer="ones")
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = mean(relu(x2 · ones(2x2) + zeros(2)), axis=1)
# = mean([[4,4],[6,6]]) = [[5,5]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[10]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 10]]])
assert expected == pytest.approx(actual)
def test_meanpool_agg_zero_neighbours():
agg = MeanPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
# Now we have an input shape with a 0, the attention model switches to
# a MLP and the first group will have non-zero output size.
assert agg.weight_dims == [4, 0]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# Attentional aggregator tests
def test_attn_agg_constructor():
agg = AttentionalAggregator(2, bias=False)
assert agg.output_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
# assert agg.attn_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_attn_agg_constructor_1():
agg = AttentionalAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_attn_agg_apply():
agg = AttentionalAggregator(2, bias=False, act="linear", kernel_initializer="ones")
agg.attn_act = keras.activations.get("linear")
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# The AttentionalAggregator implementation is a hack at the moment, it doesn't
# assign any dimensions in the output to head-node features.
assert agg.weight_dims == [0, 2]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# hs = relu(x1 · ones(2x2)) = [2,2]
# hn = relu(x2 · ones(2x2)) = [[2,2], [4,4], [6,6]]
# attn_u = ones(2) · hs + ones(2) · hn = [8, 12, 16]
# attn = softmax(attn_u) = [3.3e-4, 1.8e-4, 9.81e-1]
# hout = attn · hn = [5.96, 5.96]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[5.963, 5.963]]])
assert expected == pytest.approx(actual, rel=1e-4)
def test_attn_agg_zero_neighbours():
agg = AttentionalAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
def test_graphsage_constructor():
gs = GraphSAGE(
layer_sizes=[4], n_samples=[2], input_dim=2, normalize="l2", multiplicity=1
)
assert gs.dims == [2, 4]
assert gs.n_samples == [2]
assert gs.max_hops == 1
assert gs.bias
assert len(gs._aggs) == 1
# Check incorrect normalization flag
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
normalize=lambda x: x,
multiplicity=1,
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
normalize="unknown",
multiplicity=1,
)
# Check requirement for generator or n_samples
with pytest.raises(ValueError):
GraphSAGE(layer_sizes=[4])
# Construction from generator
G = example_graph(feature_size=3)
gen = GraphSAGENodeGenerator(G, batch_size=2, num_samples=[2, 2])
gs = GraphSAGE(layer_sizes=[4, 8], generator=gen, bias=True)
# The GraphSAGE should no longer accept a Sequence
t_gen = gen.flow([1, 2])
with pytest.raises(TypeError):
gs = GraphSAGE(layer_sizes=[4, 8], generator=t_gen, bias=True)
assert gs.dims == [3, 4, 8]
assert gs.n_samples == [2, 2]
assert gs.max_hops == 2
assert gs.bias
assert len(gs._aggs) == 2
def test_graphsage_constructor_passing_aggregator():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
aggregator=MeanAggregator,
)
assert gs.dims == [2, 4]
assert gs.n_samples == [2]
assert gs.max_hops == 1
assert gs.bias
assert len(gs._aggs) == 1
with pytest.raises(TypeError):
GraphSAGE(
layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1, aggregator=1
)
def test_graphsage_constructor_1():
gs = GraphSAGE(
layer_sizes=[4, 6, 8],
n_samples=[2, 4, 6],
input_dim=2,
multiplicity=1,
bias=True,
dropout=0.5,
)
assert gs.dims == [2, 4, 6, 8]
assert gs.n_samples == [2, 4, 6]
assert gs.max_hops == 3
assert gs.bias
assert len(gs._aggs) == 3
def test_graphsage_apply():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
kernel_initializer="ones",
)
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(2, 2))
out = gs([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
def test_graphsage_apply_1():
gs = GraphSAGE(
layer_sizes=[2, 2, 2],
n_samples=[2, 2, 2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
kernel_initializer="ones",
)
inp = [keras.Input(shape=(i, 2)) for i in [1, 2, 4, 8]]
out = gs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [
np.array([[[1, 1]]]),
np.array([[[2, 2], [2, 2]]]),
np.array([[[3, 3], [3, 3], [3, 3], [3, 3]]]),
np.array([[[4, 4], [4, 4], [4, 4], [4, 4], [5, 5], [5, 5], [5, 5], [5, 5]]]),
]
expected = np.array([[16, 25]])
actual = model.predict(x)
assert expected == pytest.approx(actual)
# Use the node model:
xinp, xout = gs.in_out_tensors()
model2 = keras.Model(inputs=xinp, outputs=xout)
assert pytest.approx(expected) == model2.predict(x)
def test_graphsage_serialize():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
)
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(2, 2))
out = gs([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(
model_json, custom_objects={"MeanAggregator": MeanAggregator}
)
model2.set_weights(model_weights)
# Test loaded model
x1 = np.array([[[1, 1]]])
x2 = np.array([[[2, 2], [3, 3]]])
expected = np.array([[2, 2, 5, 5]])
actual = model2.predict([x1, x2])
assert expected == pytest.approx(actual)
def test_graphsage_zero_neighbours():
gs = GraphSAGE(
layer_sizes=[2, 2],
n_samples=[0, 0],
bias=False,
input_dim=2,
multiplicity=1,
normalize="none",
kernel_initializer="ones",
)
inp = [keras.Input(shape=(i, 2)) for i in [1, 0, 0]]
out = gs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [np.array([[[1.5, 1]]]), np.zeros((1, 0, 2)), np.zeros((1, 0, 2))]
actual = model.predict(x)
expected = np.array([[5, 5]])
assert actual == pytest.approx(expected)
def test_graphsage_passing_activations():
gs = GraphSAGE(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1)
assert gs.activations == ["linear"]
gs = GraphSAGE(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)
assert gs.activations == ["relu", "linear"]
gs = GraphSAGE(
layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2, multiplicity=1
)
assert gs.activations == ["relu", "relu", "linear"]
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["relu"],
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["relu"] * 2,
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["fred", "wilma", "barney"],
)
gs = GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["linear"] * 3,
)
assert gs.activations == ["linear"] * 3
def test_graphsage_passing_regularisers():
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer="fred",
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer="ones",
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer=initializers.ones(),
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_regularizer=regularizers.l2(0.01),
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_regularizer="wilma",
)
def test_kernel_and_bias_defaults():
gs = GraphSAGE(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)
for layer in gs._aggs:
assert isinstance(layer.kernel_initializer, tf.initializers.GlorotUniform)
assert isinstance(layer.bias_initializer, tf.initializers.Zeros)
assert layer.kernel_regularizer is None
assert layer.bias_regularizer is None
assert layer.kernel_constraint is None
assert layer.bias_constraint is None
def test_graphsage_save_load(tmpdir):
gs = GraphSAGE(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)
test_utils.model_save_load(tmpdir, gs)
| 20,315 | 27.776204 | 94 | py |
stellargraph | stellargraph-master/tests/layer/test_graph_attention.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GAT tests
"""
import pytest
import scipy.sparse as sps
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input
from stellargraph.mapper import (
FullBatchNodeGenerator,
FullBatchLinkGenerator,
GraphSAGENodeGenerator,
)
from stellargraph.layer import *
from ..test_utils.graphs import example_graph
from .. import test_utils
pytestmark = test_utils.ignore_stellargraph_experimental_mark
class Test_GraphAttention:
"""
Tests of GraphAttention layer
"""
N = 10
F_in = 5
F_out = 2
attn_heads = 8
activation = "relu"
layer = GraphAttention
def get_inputs(self):
x_inp = [
Input(batch_shape=(1, self.N, self.F_in)),
Input(batch_shape=(1, self.N, self.N)),
]
# duplicate input here for Test_GraphAttentionSparse to work
return x_inp, x_inp
def get_matrix(self, edges=[]):
# adjacency matrix with self-loops only
A = np.eye(self.N)
for e, v in edges:
A[e[0], e[1]] = v
return [A[None, :, :]]
def test_constructor(self):
# attn_heads_reduction = "concat":
layer = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="concat",
activation=self.activation,
)
assert layer.units == self.F_out
assert layer.attn_heads == self.attn_heads
assert layer.output_dim == self.F_out * self.attn_heads
assert layer.activation == keras.activations.get(self.activation)
# attn_heads_reduction = "average":
layer = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="average",
activation=self.activation,
)
assert layer.output_dim == self.F_out
# attn_heads_reduction = "ave":
with pytest.raises(ValueError):
self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="ave",
activation=self.activation,
)
def test_apply_concat(self):
gat = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="concat",
activation=self.activation,
kernel_initializer="ones",
)
x_inp, layer_inp = self.get_inputs()
# Instantiate layer with squeezed matrix
x_out = gat(layer_inp)
model = keras.Model(inputs=x_inp, outputs=x_out)
assert model.output_shape[-1] == self.F_out * self.attn_heads
As = self.get_matrix()
X = np.ones((1, self.N, self.F_in)) # features
expected = np.ones((self.N, self.F_out * self.attn_heads)) * self.F_in
actual = model.predict([X] + As)
np.testing.assert_allclose(actual.squeeze(), expected)
def test_apply_average(self):
gat = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="average",
activation=self.activation,
kernel_initializer="ones",
attn_kernel_initializer="zeros",
bias_initializer="zeros",
)
x_inp, layer_inp = self.get_inputs()
# Instantiate layer with squeezed matrix
x_out = gat(layer_inp)
model = keras.Model(inputs=x_inp, outputs=x_out)
assert model.output_shape[-1] == self.F_out
X = np.ones((1, self.N, self.F_in)) # features
for i in range(self.N):
X[:, i, :] = i + 1
As = self.get_matrix()
expected = (X * self.F_in)[..., : self.F_out]
actual = model.predict([X] + As)
np.testing.assert_allclose(actual.squeeze(), expected.squeeze())
def test_apply_average_with_neighbours(self):
gat_saliency = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="average",
activation=self.activation,
kernel_initializer="ones",
attn_kernel_initializer="zeros",
bias_initializer="zeros",
saliency_map_support=True,
)
gat_origin = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="average",
activation=self.activation,
kernel_initializer="ones",
attn_kernel_initializer="zeros",
bias_initializer="zeros",
saliency_map_support=False,
)
x_inp, layer_inp = self.get_inputs()
# Instantiate layer with squeezed matrix
x_out_saliency = gat_saliency(layer_inp)
x_out_origin = gat_origin(layer_inp)
model_origin = keras.Model(inputs=x_inp, outputs=x_out_origin)
model_saliency = keras.Model(inputs=x_inp, outputs=x_out_saliency)
assert model_origin.output_shape[-1] == self.F_out
assert model_saliency.output_shape[-1] == self.F_out
X = np.zeros((1, self.N, self.F_in)) # features
for i in range(self.N):
X[:, i, :] = i
As = self.get_matrix([((0, 1), 1), ((1, 0), 1)])
expected = (X * self.F_in)[..., : self.F_out]
expected[:, :2] = self.F_in / 2
actual_origin = model_origin.predict([X] + As)
actual_saliency = model_saliency.predict([X] + As)
np.testing.assert_allclose(expected, actual_origin)
np.testing.assert_allclose(expected, actual_saliency)
def test_layer_config(self):
layer = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="concat",
activation=self.activation,
)
conf = layer.get_config()
assert conf["units"] == self.F_out
assert conf["attn_heads"] == self.attn_heads
assert conf["attn_heads_reduction"] == "concat"
assert conf["activation"] == self.activation
assert conf["use_bias"] == True
assert conf["kernel_initializer"]["class_name"] == "GlorotUniform"
assert conf["bias_initializer"]["class_name"] == "Zeros"
assert conf["kernel_regularizer"] == None
assert conf["bias_regularizer"] == None
assert conf["kernel_constraint"] == None
assert conf["bias_constraint"] == None
class Test_GraphAttentionSparse(Test_GraphAttention):
"""
Tests of GraphAttentionSparse layer
"""
N = 10
F_in = 5
F_out = 2
attn_heads = 8
activation = "relu"
layer = GraphAttentionSparse
def get_inputs(self):
x_inp = [
Input(batch_shape=(1, self.N, self.F_in)),
Input(batch_shape=(1, None, 2), dtype="int64"),
Input(batch_shape=(1, None), dtype="float32"),
]
A_mat = SqueezedSparseConversion(shape=(self.N, self.N))(x_inp[1:])
# For dense matrix, remove batch dimension
layer_inp = x_inp[:1] + [A_mat]
return x_inp, layer_inp
def get_matrix(self, edges=[]):
# adjacency matrix with self-loops + edges
A_sparse = sps.eye(self.N, format="lil")
for e, v in edges:
A_sparse[e[0], e[1]] = v
# Extract indices & values to feed to tensorflow
A_sparse = A_sparse.tocoo()
A_indices = np.expand_dims(
np.hstack((A_sparse.row[:, None], A_sparse.col[:, None])), 0
)
A_values = np.expand_dims(A_sparse.data, 0)
return [A_indices, A_values]
class Test_GAT:
"""
Tests of GAT class
"""
N = 10
F_in = 5
F_out = 2
attn_heads = 8
layer_sizes = [4, 16]
activations = ["relu", "linear"]
sparse = False
method = "gat"
def test_constructor(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
# test default if no activations are passed:
gat = GAT(layer_sizes=self.layer_sizes, generator=gen, bias=True)
assert gat.activations == ["elu", "elu"]
# test error if too many activations:
with pytest.raises(ValueError):
gat = GAT(layer_sizes=[10], activations=self.activations, generator=gen)
# test error if too few activations:
with pytest.raises(ValueError):
gat = GAT(layer_sizes=[10, 10], activations=["relu"], generator=gen)
# test error where layer_sizes is not a list:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=10,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
# test error where layer_sizes values are not valid
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=[4, 0],
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
# test for incorrect length of att_heads list:
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=[8, 8, 1],
generator=gen,
bias=True,
)
# test for invalid values in att_heads list:
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=[8, 0],
generator=gen,
bias=True,
)
# test for invalid type of att_heads argument:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=8.0,
generator=gen,
bias=True,
)
# test error where activations is not a list:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations="relu",
generator=gen,
bias=True,
)
# test attn_heads_reduction errors:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
attn_heads_reduction="concat",
generator=gen,
bias=True,
)
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
attn_heads_reduction=["concat", "concat", "average"],
generator=gen,
bias=True,
)
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
attn_heads_reduction=["concat", "sum"],
generator=gen,
bias=True,
)
# test error where len(activations) is not equal to len(layer_sizes):
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=["relu"],
generator=gen,
bias=True,
)
# Default attention heads reductions:
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
assert gat.activations == self.activations
assert gat.attn_heads_reduction == ["concat", "average"]
assert gat.generator == gen
# User-specified attention heads reductions:
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
attn_heads_reduction=["concat", "concat"],
generator=gen,
bias=True,
)
assert gat.attn_heads_reduction == ["concat", "concat"]
def test_gat_build_constructor(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
assert len(gat.in_out_tensors()) == 2
x_in, x_out = gat.in_out_tensors()
assert len(x_in) == 4 if self.sparse else 3
assert int(x_in[0].shape[-1]) == self.F_in
assert K.int_shape(x_in[-1]) == (1, G.number_of_nodes(), G.number_of_nodes())
assert int(x_out.shape[-1]) == self.layer_sizes[-1]
def test_gat_build_linkmodel_constructor(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchLinkGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
assert len(gat.in_out_tensors()) == 2
x_in, x_out = gat.in_out_tensors()
assert len(x_in) == 4 if self.sparse else 3
assert int(x_in[0].shape[-1]) == self.F_in
assert int(x_out.shape[-1]) == self.layer_sizes[-1]
def test_gat_build_constructor_no_generator(self):
G = example_graph(feature_size=self.F_in)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
bias=True,
num_nodes=1000,
num_features=self.F_in,
multiplicity=1,
)
assert gat.use_sparse == False
x_in, x_out = gat.in_out_tensors()
assert len(x_in) == 4 if self.sparse else 3
assert int(x_in[0].shape[-1]) == self.F_in
assert int(x_out.shape[-1]) == self.layer_sizes[-1]
def test_gat_build_constructor_wrong_generator(self):
G = example_graph(feature_size=self.F_in)
gen = GraphSAGENodeGenerator(G, self.N, [5, 10])
# test error where generator is of the wrong type for GAT:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
bias=True,
generator=gen,
)
def test_gat_build_l2norm(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
normalize="l2",
kernel_initializer="ones",
attn_kernel_initializer="ones",
)
x_in, x_out = gat.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
ng = gen.flow(G.nodes())
actual = model.predict(ng)
expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (
1.0 / G.number_of_nodes()
)
np.testing.assert_allclose(expected, actual[0])
def test_gat_build_no_norm(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
normalize=None,
kernel_initializer="ones",
attn_kernel_initializer="ones",
)
x_in, x_out = gat.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
ng = gen.flow(G.nodes())
actual = model.predict(ng)
expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (
self.F_in
* self.layer_sizes[0]
* self.attn_heads
* np.max(G.node_features(G.nodes()))
)
np.testing.assert_allclose(expected, actual[0])
def test_gat_build_wrong_norm(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G)
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
normalize="whatever",
)
def test_gat_serialize(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
normalize="l2",
)
x_in, x_out = gat.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
ng = gen.flow(G.nodes())
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(
model_json,
custom_objects={
"GraphAttention": GraphAttention,
"GatherIndices": GatherIndices,
},
)
model2.set_weights(model_weights)
# Test deserialized model
actual = model2.predict(ng)
expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (
1.0 / G.number_of_nodes()
)
np.testing.assert_allclose(expected, actual[0])
def test_kernel_and_bias_defaults(self):
graph = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(graph, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
)
for layer in gat._layers:
if isinstance(layer, GraphAttention):
assert isinstance(
layer.kernel_initializer, tf.initializers.GlorotUniform
)
assert isinstance(layer.bias_initializer, tf.initializers.Zeros)
assert isinstance(
layer.attn_kernel_initializer, tf.initializers.GlorotUniform
)
assert layer.kernel_regularizer is None
assert layer.bias_regularizer is None
assert layer.attn_kernel_regularizer is None
assert layer.kernel_constraint is None
assert layer.bias_constraint is None
assert layer.attn_kernel_constraint is None
def test_save_load(self, tmpdir):
graph = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(graph, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
)
test_utils.model_save_load(tmpdir, gat)
def TestGATsparse(Test_GAT):
sparse = True
method = "gat"
| 20,638 | 31.86465 | 85 | py |
stellargraph | stellargraph-master/tests/layer/test_link_inference.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for link inference functions
"""
from stellargraph.layer.link_inference import *
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pytest
def make_orthonormal_vectors(dim):
x_src = np.random.randn(dim)
x_src /= np.linalg.norm(x_src) # normalize x_src
x_dst = np.random.randn(dim)
x_dst -= x_dst.dot(x_src) * x_src # make x_dst orthogonal to x_src
x_dst /= np.linalg.norm(x_dst) # normalize x_dst
# Check the IP is zero for numpy operations
assert np.dot(x_src, x_dst) == pytest.approx(0)
return x_src, x_dst
class Test_LinkEmbedding(object):
"""
Group of tests for link_inference() function
"""
d = 100 # dimensionality of embedding vector space
d_out = 10 # dimensionality of link inference output
def test_ip(self):
""" Test the 'ip' binary operator on orthogonal vectors"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = tf.constant(x_src, shape=(1, self.d), dtype="float64")
x_dst = tf.constant(x_dst, shape=(1, self.d), dtype="float64")
li = LinkEmbedding(method="ip", activation="linear")([x_src, x_dst])
print(
"link inference with 'ip' operator on orthonormal vectors: {}".format(
li.numpy()
)
)
assert li.numpy() == pytest.approx(0, abs=1.5e-7)
li = LinkEmbedding(method="ip", activation="linear")([x_src, x_src])
print("link inference with 'ip' operator on unit vector: ", li.numpy())
assert li.numpy() == pytest.approx(1, abs=1.5e-7)
# Test sigmoid activation
li = LinkEmbedding(method="ip", activation="sigmoid")([x_src, x_dst])
assert li.numpy() == pytest.approx(0.5, abs=1.5e-7)
li = LinkEmbedding(method="ip", activation="sigmoid")([x_src, x_src])
assert li.numpy() == pytest.approx(0.7310586, abs=1.5e-7)
def test_ip_single_tensor(self):
""" Test the 'ip' binary operator on orthogonal vectors"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = tf.constant(x_src, shape=(1, self.d), dtype="float64")
x_dst = tf.constant(x_dst, shape=(1, self.d), dtype="float64")
x_link_sd = tf.stack([x_src, x_dst], axis=1)
x_link_ss = tf.stack([x_src, x_src], axis=1)
li = LinkEmbedding(method="ip", activation="linear")(x_link_sd)
print(
"link inference with 'ip' operator on orthonormal vectors: {}".format(
li.numpy()
)
)
assert li.numpy() == pytest.approx(0, abs=1.5e-7)
li = LinkEmbedding(method="ip", activation="linear")(x_link_ss)
print("link inference with 'ip' operator on unit vector: ", li.numpy())
assert li.numpy() == pytest.approx(1, abs=1.5e-7)
# Test sigmoid activation
li = LinkEmbedding(method="ip", activation="sigmoid")(x_link_sd)
assert li.numpy() == pytest.approx(0.5, abs=1.5e-7)
li = LinkEmbedding(method="ip", activation="sigmoid")(x_link_ss)
assert li.numpy() == pytest.approx(0.7310586, abs=1.5e-7)
def test_mul_l1_l2_avg(self):
""" Test the binary operators: 'mul'/'Hadamard', 'l1', 'l2', 'avg'"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, 1, self.d)
x_dst = x_dst.reshape(1, 1, self.d)
inp_src = keras.Input(shape=(1, self.d))
inp_dst = keras.Input(shape=(1, self.d))
for op in ["mul", "l1", "l2", "avg"]:
out = LinkEmbedding(method=op)([inp_src, inp_dst])
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert res.shape == (1, 1, self.d)
assert isinstance(res.flatten()[0], np.float32)
for op in ["concat"]:
out = LinkEmbedding(method=op)([inp_src, inp_dst])
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert res.shape == (1, 1, 2 * self.d)
assert isinstance(res.flatten()[0], np.float32)
def test_mul_l1_l2_avg_single_tensor(self):
""" Test the binary operators: 'mul'/'Hadamard', 'l1', 'l2', 'avg'"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, self.d)
x_dst = x_dst.reshape(1, self.d)
x_link_np = np.stack([x_src, x_dst], axis=1)
x_link = keras.Input(shape=(2, self.d))
for op in ["mul", "l1", "l2", "avg"]:
out = LinkEmbedding(method=op)(x_link)
li = keras.Model(inputs=x_link, outputs=out)
res = li.predict(x=x_link_np)
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert res.shape == (1, self.d)
assert isinstance(res.flatten()[0], np.float32)
for op in ["concat"]:
out = LinkEmbedding(method=op)(x_link)
li = keras.Model(inputs=x_link, outputs=out)
res = li.predict(x=x_link_np)
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert res.shape == (1, 2 * self.d)
assert isinstance(res.flatten()[0], np.float32)
class Test_Link_Inference(object):
"""
Group of tests for link_inference() function
"""
d = 100 # dimensionality of embedding vector space
d_out = 10 # dimensionality of link inference output
def test_ip(self):
""" Test the 'ip' binary operator on orthogonal vectors"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = tf.constant(x_src, shape=(1, self.d), dtype="float64")
x_dst = tf.constant(x_dst, shape=(1, self.d), dtype="float64")
li = link_inference(edge_embedding_method="ip", output_act="linear")(
[x_src, x_dst]
)
print("link inference with 'ip' operator on orthonormal vectors: {}".format(li))
assert li.numpy() == pytest.approx(0, abs=1.5e-7)
li = link_inference(edge_embedding_method="ip", output_act="linear")(
[x_src, x_src]
)
print("link inference with 'ip' operator on unit vector: ", li)
assert li.numpy() == pytest.approx(1, abs=1.5e-7)
# Test sigmoid activation
li = link_classification(edge_embedding_method="ip", output_act="sigmoid")(
[x_src, x_dst]
)
assert li.numpy() == pytest.approx(0.5, abs=1.5e-7)
li = link_classification(edge_embedding_method="ip", output_act="sigmoid")(
[x_src, x_src]
)
assert li.numpy() == pytest.approx(0.7310586, abs=1.5e-7)
def test_mul_l1_l2_avg(self):
""" Test the binary operators: 'mul'/'Hadamard', 'l1', 'l2', 'avg'"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, 1, self.d)
x_dst = x_dst.reshape(1, 1, self.d)
inp_src = keras.Input(shape=(1, self.d))
inp_dst = keras.Input(shape=(1, self.d))
for op in ["mul", "l1", "l2", "avg", "concat"]:
out = link_inference(output_dim=self.d_out, edge_embedding_method=op)(
[inp_src, inp_dst]
)
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
print(x_src.shape)
res = li.predict(x=[x_src, x_dst])
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert res.shape == (1, self.d_out)
assert isinstance(res.flatten()[0], np.float32)
class Test_Link_Classification(object):
"""
Group of tests for link_classification() function
"""
d = 100 # dimensionality of embedding vector space
d_out = 10 # dimensionality of link classification output
def test_ip(self):
""" Test the 'ip' binary operator on orthogonal vectors"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = tf.constant(x_src, shape=(1, self.d), dtype="float64")
x_dst = tf.constant(x_dst, shape=(1, self.d), dtype="float64")
# Test linear activation
li = link_classification(edge_embedding_method="ip", output_act="linear")(
[x_src, x_dst]
)
assert li.numpy() == pytest.approx(0, abs=1.5e-7)
li = link_classification(edge_embedding_method="ip", output_act="linear")(
[x_src, x_src]
)
assert li.numpy()[0, 0] == pytest.approx(1, abs=1.5e-7)
# Test sigmoid activation
li = link_classification(edge_embedding_method="ip", output_act="sigmoid")(
[x_src, x_dst]
)
assert li.numpy() == pytest.approx(0.5, abs=1.5e-7)
li = link_classification(edge_embedding_method="ip", output_act="sigmoid")(
[x_src, x_src]
)
assert li.numpy() == pytest.approx(0.7310586, abs=1.5e-7)
def test_mul_l1_l2_avg(self):
""" Test the binary operators: 'mul'/'Hadamard', 'l1', 'l2', 'avg'"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, 1, self.d)
x_dst = x_dst.reshape(1, 1, self.d)
inp_src = keras.Input(shape=(1, self.d))
inp_dst = keras.Input(shape=(1, self.d))
for op in ["mul", "l1", "l2", "avg", "concat"]:
out = link_classification(output_dim=self.d_out, edge_embedding_method=op)(
[inp_src, inp_dst]
)
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print(
"link classification with '{}' operator: {}".format(op, res.flatten())
)
assert res.shape == (1, self.d_out)
assert isinstance(res.flatten()[0], np.float32)
assert all(res.flatten() >= 0)
assert all(res.flatten() <= 1)
class Test_Link_Regression(object):
"""
Group of tests for link_regression() function
"""
d = 100 # dimensionality of embedding vector space
d_out = 10 # dimensionality of link classification output
clip_limits = (0, 1)
def test_ip(self):
""" Test the 'ip' binary operator on orthogonal vectors"""
x_src, x_dst = make_orthonormal_vectors(self.d)
expected = np.dot(x_src, x_dst)
x_src = tf.constant(x_src, shape=(1, self.d), dtype="float64")
x_dst = tf.constant(x_dst, shape=(1, self.d), dtype="float64")
li = link_regression(edge_embedding_method="ip")([x_src, x_dst])
print(
"link regression with 'ip' operator on orthonormal vectors: {}, expected: {}".format(
li, expected
)
)
assert li.numpy() == pytest.approx(0, abs=1.5e-7)
li = link_regression(edge_embedding_method="ip")([x_src, x_src])
print("link regression with 'ip' operator on unit vector: ", li)
assert li.numpy() == pytest.approx(1, abs=1.5e-7)
def test_mul_l1_l2_avg(self):
""" Test the binary operators: 'mul'/'Hadamard', 'l1', 'l2', 'avg'"""
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, 1, self.d)
x_dst = x_dst.reshape(1, 1, self.d)
inp_src = keras.Input(shape=(1, self.d))
inp_dst = keras.Input(shape=(1, self.d))
for op in ["mul", "l1", "l2", "avg", "concat"]:
out = link_regression(output_dim=self.d_out, edge_embedding_method=op)(
[inp_src, inp_dst]
)
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print("link regression with '{}' operator: {}".format(op, res.flatten()))
assert res.shape == (1, self.d_out)
assert isinstance(res.flatten()[0], np.float32)
def test_clip_limits(self):
"""
Test calling with the leaky clip thresholds
Not sure what a meaningful test should do (as the LeakyClippedLinear layer provides some advantages at model training),
so just making sure applying the clip limits doesn't break anything.
"""
print("\n Testing clip limits...")
x_src, x_dst = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, 1, self.d)
x_dst = x_dst.reshape(1, 1, self.d)
inp_src = keras.Input(shape=(1, self.d))
inp_dst = keras.Input(shape=(1, self.d))
for op in ["mul", "l1", "l2", "avg", "concat"]:
out = link_regression(
output_dim=self.d_out,
edge_embedding_method=op,
clip_limits=self.clip_limits,
)([inp_src, inp_dst])
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print("link regression with '{}' operator: {}".format(op, res.flatten()))
assert res.shape == (1, self.d_out)
assert isinstance(res.flatten()[0], np.float32)
| 13,763 | 36.606557 | 127 | py |
stellargraph | stellargraph-master/tests/layer/test_knowledge_graph.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import pytest
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, initializers, losses as tf_losses, layers
from stellargraph import StellarGraph, StellarDiGraph, losses as sg_losses
from stellargraph.mapper.knowledge_graph import KGTripleGenerator
from stellargraph.layer.knowledge_graph import (
KGModel,
KGScore,
ComplEx,
DistMult,
RotatE,
RotE,
RotH,
_ranks_from_score_columns,
)
from .. import test_utils
from ..test_utils.graphs import knowledge_graph
pytestmark = [
test_utils.ignore_stellargraph_experimental_mark,
pytest.mark.filterwarnings(
r"ignore:ComplEx:stellargraph.core.experimental.ExperimentalWarning"
),
]
def triple_df(*values):
return pd.DataFrame(values, columns=["source", "label", "target"])
@pytest.mark.parametrize("sample_strategy", ["uniform", "self-adversarial"])
def test_complex(knowledge_graph, sample_strategy):
# this test creates a random untrained model and predicts every possible edge in the graph, and
# compares that to a direct implementation of the scoring method in the paper
gen = KGTripleGenerator(knowledge_graph, 3)
# use a random initializer with a large positive range, so that any differences are obvious
init = initializers.RandomUniform(-1, 1)
complex_model = ComplEx(gen, 5, embeddings_initializer=init)
x_inp, x_out = complex_model.in_out_tensors()
model = Model(x_inp, x_out)
if sample_strategy == "uniform":
loss = tf_losses.BinaryCrossentropy(from_logits=True)
else:
loss = sg_losses.SelfAdversarialNegativeSampling()
model.compile(loss=loss)
every_edge = itertools.product(
knowledge_graph.nodes(),
knowledge_graph._edges.types.pandas_index,
knowledge_graph.nodes(),
)
df = triple_df(*every_edge)
# check the model can be trained on a few (uneven) batches
model.fit(
gen.flow(df.iloc[:7], negative_samples=2, sample_strategy=sample_strategy),
validation_data=gen.flow(
df.iloc[7:14], negative_samples=3, sample_strategy=sample_strategy
),
)
# compute the exact values based on the model by extracting the embeddings for each element and
# doing the Re(<e_s, w_r, conj(e_o)>) inner product
s_idx = knowledge_graph.node_ids_to_ilocs(df.source)
r_idx = knowledge_graph._edges.types.to_iloc(df.label)
o_idx = knowledge_graph.node_ids_to_ilocs(df.target)
nodes, edge_types = complex_model.embeddings()
# the rows correspond to the embeddings for the given edge, so we can do bulk operations
e_s = nodes[s_idx, :]
w_r = edge_types[r_idx, :]
e_o = nodes[o_idx, :]
actual = (e_s * w_r * e_o.conj()).sum(axis=1).real
# predict every edge using the model
prediction = model.predict(gen.flow(df))
# (use an absolute tolerance to allow for catastrophic cancellation around very small values)
np.testing.assert_allclose(prediction[:, 0], actual, rtol=1e-3, atol=1e-6)
# the model is stateful (i.e. it holds the weights permanently) so the predictions with a second
# 'build' should be the same as the original one
model2 = Model(*complex_model.in_out_tensors())
prediction2 = model2.predict(gen.flow(df))
np.testing.assert_array_equal(prediction, prediction2)
def test_distmult(knowledge_graph):
# this test creates a random untrained model and predicts every possible edge in the graph, and
# compares that to a direct implementation of the scoring method in the paper
gen = KGTripleGenerator(knowledge_graph, 3)
# use a random initializer with a large range, so that any differences are obvious
init = initializers.RandomUniform(-1, 1)
distmult_model = DistMult(gen, 5, embeddings_initializer=init)
x_inp, x_out = distmult_model.in_out_tensors()
model = Model(x_inp, x_out)
model.compile(loss=tf_losses.BinaryCrossentropy(from_logits=True))
every_edge = itertools.product(
knowledge_graph.nodes(),
knowledge_graph._edges.types.pandas_index,
knowledge_graph.nodes(),
)
df = triple_df(*every_edge)
# check the model can be trained on a few (uneven) batches
model.fit(
gen.flow(df.iloc[:7], negative_samples=2),
validation_data=gen.flow(df.iloc[7:14], negative_samples=3),
)
# compute the exact values based on the model by extracting the embeddings for each element and
# doing the y_(e_1)^T M_r y_(e_2) = <e_1, w_r, e_2> inner product
s_idx = knowledge_graph.node_ids_to_ilocs(df.source)
r_idx = knowledge_graph._edges.types.to_iloc(df.label)
o_idx = knowledge_graph.node_ids_to_ilocs(df.target)
nodes, edge_types = distmult_model.embeddings()
# the rows correspond to the embeddings for the given edge, so we can do bulk operations
e_s = nodes[s_idx, :]
w_r = edge_types[r_idx, :]
e_o = nodes[o_idx, :]
actual = (e_s * w_r * e_o).sum(axis=1)
# predict every edge using the model
prediction = model.predict(gen.flow(df))
# (use an absolute tolerance to allow for catastrophic cancellation around very small values)
np.testing.assert_allclose(prediction[:, 0], actual, rtol=1e-3, atol=1e-14)
# the model is stateful (i.e. it holds the weights permanently) so the predictions with a second
# 'build' should be the same as the original one
model2 = Model(*distmult_model.in_out_tensors())
prediction2 = model2.predict(gen.flow(df))
np.testing.assert_array_equal(prediction, prediction2)
@test_utils.flaky_xfail_mark(AssertionError, 1623)
def test_rotate(knowledge_graph):
margin = 2.34
norm_order = 1.234
# this test creates a random untrained model and predicts every possible edge in the graph, and
# compares that to a direct implementation of the scoring method in the paper
gen = KGTripleGenerator(knowledge_graph, 3)
# use a random initializer with a large range, so that any differences are obvious
init = initializers.RandomUniform(-1, 1)
rotate_model = RotatE(
gen, 5, margin=margin, norm_order=norm_order, embeddings_initializer=init
)
x_inp, x_out = rotate_model.in_out_tensors()
model = Model(x_inp, x_out)
model.compile(loss=tf_losses.BinaryCrossentropy(from_logits=True))
every_edge = itertools.product(
knowledge_graph.nodes(),
knowledge_graph._edges.types.pandas_index,
knowledge_graph.nodes(),
)
df = triple_df(*every_edge)
# check the model can be trained on a few (uneven) batches
model.fit(
gen.flow(df.iloc[:7], negative_samples=2),
validation_data=gen.flow(df.iloc[7:14], negative_samples=3),
)
# compute the exact values based on the model by extracting the embeddings for each element and
# doing the y_(e_1)^T M_r y_(e_2) = <e_1, w_r, e_2> inner product
s_idx = knowledge_graph.node_ids_to_ilocs(df.source)
r_idx = knowledge_graph._edges.types.to_iloc(df.label)
o_idx = knowledge_graph.node_ids_to_ilocs(df.target)
nodes, edge_types = rotate_model.embeddings()
# the rows correspond to the embeddings for the given edge, so we can do bulk operations
e_s = nodes[s_idx, :]
w_r = edge_types[r_idx, :]
e_o = nodes[o_idx, :]
# every edge-type embedding should be a unit rotation
np.testing.assert_allclose(np.abs(w_r), 1)
actual = margin - np.linalg.norm(e_s * w_r - e_o, ord=norm_order, axis=1)
# predict every edge using the model
prediction = model.predict(gen.flow(df))
# (use an absolute tolerance to allow for catastrophic cancellation around very small values)
np.testing.assert_allclose(prediction[:, 0], actual, rtol=1e-3, atol=1e-14)
# the model is stateful (i.e. it holds the weights permanently) so the predictions with a second
# 'build' should be the same as the original one
model2 = Model(*rotate_model.in_out_tensors())
prediction2 = model2.predict(gen.flow(df))
np.testing.assert_array_equal(prediction, prediction2)
@pytest.mark.parametrize("model_class", [RotE, RotH])
def test_rote_roth(knowledge_graph, model_class):
# this test creates a random untrained model and predicts every possible edge in the graph, and
# compares that to a direct implementation of the scoring method in the paper
gen = KGTripleGenerator(knowledge_graph, 3)
# use a random initializer with a large range, so that any differences are obvious
init = initializers.RandomUniform(-1, 1)
rot_model = model_class(gen, 6, embeddings_initializer=init)
x_inp, x_out = rot_model.in_out_tensors()
model = Model(x_inp, x_out)
model.summary()
model.compile(loss=tf_losses.BinaryCrossentropy(from_logits=True))
every_edge = itertools.product(
knowledge_graph.nodes(),
knowledge_graph._edges.types.pandas_index,
knowledge_graph.nodes(),
)
df = triple_df(*every_edge)
# check the model can be trained on a few (uneven) batches
model.fit(
gen.flow(df.iloc[:7], negative_samples=2),
validation_data=gen.flow(df.iloc[7:14], negative_samples=3),
)
# predict every edge using the model
prediction = model.predict(gen.flow(df))
(node_emb, node_bias), (et_emb, et_theta) = rot_model.embedding_arrays()
if model_class is RotE:
# compute the exact values based on the model, for RotationE (the RotationH model is too
# hard to test directly)
s_idx = knowledge_graph.node_ids_to_ilocs(df.source)
r_idx = knowledge_graph.edge_type_names_to_ilocs(df.label)
o_idx = knowledge_graph.node_ids_to_ilocs(df.target)
# the rows correspond to the embeddings for the given edge, so we can do bulk operations
e_s = node_emb[s_idx, :]
b_s = node_bias[s_idx, 0]
r_r = et_emb[r_idx, :]
theta_r = et_theta[r_idx, :]
e_o = node_emb[o_idx, :]
b_o = node_bias[o_idx, 0]
rot_r = np.cos(theta_r) + 1j * np.sin(theta_r)
assert e_s.dtype == np.float32
rotated = (e_s.view(np.complex64) * rot_r).view(np.float32)
actual = -np.linalg.norm(rotated + r_r - e_o, axis=-1) ** 2 + b_s + b_o
np.testing.assert_allclose(prediction[:, 0], actual, rtol=1e-3, atol=1e-14)
# the model is stateful (i.e. it holds the weights permanently) so the predictions with a second
# 'build' should be the same as the original one
model2 = Model(*rot_model.in_out_tensors())
prediction2 = model2.predict(gen.flow(df))
np.testing.assert_array_equal(prediction, prediction2)
@pytest.mark.parametrize(
"model_maker",
[
ComplEx,
DistMult,
pytest.param(RotatE, marks=test_utils.flaky_xfail_mark(AssertionError, 1623)),
pytest.param(RotH, marks=test_utils.flaky_xfail_mark(AssertionError, 1675)),
RotE,
],
)
def test_model_rankings(model_maker):
nodes = pd.DataFrame(index=["a", "b", "c", "d"])
rels = ["W", "X", "Y", "Z"]
empty = pd.DataFrame(columns=["source", "target"])
every_edge = itertools.product(nodes.index, rels, nodes.index)
every_edge_df = triple_df(*every_edge)
no_edges = StellarDiGraph(nodes, {name: empty for name in rels})
# the filtering is most interesting when there's a smattering of edges, somewhere between none
# and all; this does a stratified sample by label, to make sure there's at least one edge from
# each label.
one_per_label_df = (
every_edge_df.groupby("label").apply(lambda df: df.sample(n=1)).droplevel(0)
)
others_df = every_edge_df.sample(frac=0.25)
some_edges_df = pd.concat([one_per_label_df, others_df], ignore_index=True)
some_edges = StellarDiGraph(
nodes,
{name: df.drop(columns="label") for name, df in some_edges_df.groupby("label")},
)
all_edges = StellarDiGraph(
nodes=nodes,
edges={
name: df.drop(columns="label")
for name, df in every_edge_df.groupby("label")
},
)
gen = KGTripleGenerator(all_edges, 3)
sg_model = model_maker(gen, embedding_dimension=6)
x_inp, x_out = sg_model.in_out_tensors()
model = Model(x_inp, x_out)
raw_some, filtered_some = sg_model.rank_edges_against_all_nodes(
gen.flow(every_edge_df), some_edges
)
# basic check that the ranks are formed correctly
assert raw_some.dtype == int
assert np.all(raw_some >= 1)
# filtered ranks are never greater, and sometimes less
assert np.all(filtered_some <= raw_some)
assert np.any(filtered_some < raw_some)
raw_no, filtered_no = sg_model.rank_edges_against_all_nodes(
gen.flow(every_edge_df), no_edges
)
np.testing.assert_array_equal(raw_no, raw_some)
# with no edges, filtering does nothing
np.testing.assert_array_equal(raw_no, filtered_no)
raw_all, filtered_all = sg_model.rank_edges_against_all_nodes(
gen.flow(every_edge_df), all_edges
)
np.testing.assert_array_equal(raw_all, raw_some)
# when every edge is known, the filtering should eliminate every possibility
assert np.all(filtered_all == 1)
# check the ranks against computing them from the model predictions directly. That is, for each
# edge, compare the rank against one computed by counting the predictions. This computes the
# filtered ranks naively too.
predictions = model.predict(gen.flow(every_edge_df))
for (source, rel, target), score, raw, filtered in zip(
every_edge_df.itertuples(index=False), predictions, raw_some, filtered_some
):
# rank for the subset specified by the given selector
def rank(compare_selector):
return 1 + (predictions[compare_selector] > score).sum()
same_r = every_edge_df.label == rel
same_s_r = (every_edge_df.source == source) & same_r
expected_raw_mod_o_rank = rank(same_s_r)
assert raw[0] == expected_raw_mod_o_rank
known_objects = some_edges_df[
(some_edges_df.source == source) & (some_edges_df.label == rel)
]
object_is_unknown = ~every_edge_df.target.isin(known_objects.target)
expected_filt_mod_o_rank = rank(same_s_r & object_is_unknown)
assert filtered[0] == expected_filt_mod_o_rank
same_r_o = same_r & (every_edge_df.target == target)
expected_raw_mod_s_rank = rank(same_r_o)
assert raw[1] == expected_raw_mod_s_rank
known_subjects = some_edges_df[
(some_edges_df.label == rel) & (some_edges_df.target == target)
]
subject_is_unknown = ~every_edge_df.source.isin(known_subjects.source)
expected_filt_mod_s_rank = rank(subject_is_unknown & same_r_o)
assert filtered[1] == expected_filt_mod_s_rank
@pytest.mark.parametrize("tie_breaking", ["top", "bottom", "random"])
def test_tie_breaking(tie_breaking):
pred_scores = np.array(
[
[1, 5, 8], # true_modified_node_ilocs:
[1, 3, 8], # 1
[1, 2, 7], # 2
[1, 2, 6], # 3
]
)
known_edges_graph = StellarDiGraph(
nodes=pd.DataFrame(index=["a", "b", "c", "d"]),
edges=pd.DataFrame(
[
# preds[0, :]: edge being predicted, checking it's counted properly for 'filtered'
("a", "b"),
# preds[1, :]: the other tied edge, to see the 'bottom' score move up
("b", "d"),
],
columns=["source", "target"],
),
)
copies = 100
rankings = [
_ranks_from_score_columns(
pred_scores,
true_modified_node_ilocs=np.array([1, 2, 3]),
unmodified_node_ilocs=np.array([0, 1, 2]),
true_rel_ilocs=np.array([0, 0, 0]),
modified_object=True,
known_edges_graph=known_edges_graph,
tie_breaking=tie_breaking,
)
for _ in range(copies)
]
all_rankings = np.array(rankings)
assert all_rankings.shape == (copies, 2, 3)
top_expected = np.repeat([[[1, 3, 4], [1, 3, 4]]], copies, axis=0)
bottom_expected = np.repeat([[[4, 4, 4], [4, 3, 4]]], copies, axis=0)
if tie_breaking == "top":
np.testing.assert_array_equal(all_rankings, top_expected)
elif tie_breaking == "bottom":
np.testing.assert_array_equal(all_rankings, bottom_expected)
elif tie_breaking == "random":
assert (all_rankings >= top_expected).all()
assert (all_rankings <= bottom_expected).all()
# check both raw and filtered results (independently) have some variation in them
for i in range(all_rankings.shape[1]):
raw_or_filtered = all_rankings[:, i, :]
assert (raw_or_filtered != top_expected[:, i, :]).any()
assert (raw_or_filtered != bottom_expected[:, i, :]).any()
def test_embedding_validation(knowledge_graph):
class X(layers.Layer, KGScore):
def __init__(self, emb):
self.emb = emb
def embeddings(self, *args, **kwargs):
return self.emb
def bulk_scoring(self, *args, **kwargs):
raise NotImplementedError()
gen = KGTripleGenerator(knowledge_graph, 3)
e = layers.Embedding(5, 4)
kwargs = {
"embeddings_initializer": None,
"embeddings_regularizer": None,
}
with pytest.raises(ValueError, match="scoring: .* found a sequence of length 3"):
KGModel(gen, X((1, 2, 3)), 2, **kwargs)
with pytest.raises(
ValueError, match=r"scoring: .* found a pair with types \(Embedding, int\)"
):
KGModel(gen, X((e, 2)), 2, **kwargs)
with pytest.raises(
ValueError,
match=r"scoring: .* found a pair of lists containing types \(\[Embedding, Embedding\], \[int\]\)",
):
KGModel(gen, X(([e, e], [2])), 2, **kwargs)
# all good:
KGModel(gen, X(([e, e], [e, e, e])), 2, **kwargs)
@pytest.mark.parametrize("model_maker", [ComplEx, DistMult, RotatE, RotH, RotE])
def test_save_load(tmpdir, knowledge_graph, model_maker):
gen = KGTripleGenerator(knowledge_graph, 3)
sg_model = model_maker(gen, embedding_dimension=6)
test_utils.model_save_load(tmpdir, sg_model)
| 18,872 | 36.005882 | 106 | py |
stellargraph | stellargraph-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import urllib.parse
import docutils
import sphinx
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import re
sys.path.insert(0, os.path.abspath(".."))
version = {}
with open(os.path.abspath("../stellargraph/version.py"), "r") as fh:
exec(fh.read(), version)
# -- Project information -----------------------------------------------------
project = "StellarGraph"
copyright = "2018-2020, Data61, CSIRO"
author = "Data61, CSIRO"
# Get global version
# see: https://packaging.python.org/guides/single-sourcing-package-version/
release = version["__version__"]
# The short X.Y version
m = re.match("^(\d+.\d+)", version["__version__"])
if m is None:
raise RuntimeError("Couldn't parse version")
version = m.group()
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"recommonmark",
"sphinx_markdown_tables",
"nbsphinx",
"nbsphinx_link",
"notfound.extension",
"sphinxcontrib.spelling",
]
# Add mappings
intersphinx_mapping = {
"python": ("http://docs.python.org/3", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"requirements.txt",
"demos/community_detection/*",
"demos/use-cases/*",
"demos/interpretability/hateful-twitters-interpretability.nblink",
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# allow the sidebar ToC to be arbitrarily deep, good for demos
"navigation_depth": -1
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_logo = "banner.png"
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "StellarGraphdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"StellarGraph.tex",
"StellarGraph Documentation",
"Data61, CSIRO",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "stellargraph", "StellarGraph Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"StellarGraph",
"StellarGraph Documentation",
author,
"StellarGraph",
"StellarGraph is a graph machine learning library for Python.",
"Miscellaneous",
),
]
# -- Extension configuration -------------------------------------------------
# This is processed by Jinja2 and inserted before each notebook
# We use internal readthedocs variables to get the git version if available. Note that this is undocumented, however it
# is shown in our readthedocs build logs, and is generated from a template:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl
nbsphinx_prolog = r"""
{% if env.config.html_context.github_version is defined and env.config.html_context.current_version != "stable" %}
{% set git_revision = env.config.html_context.github_version %}
{% else %}
{% set git_revision = "master" %}
{% endif %}
.. raw:: html
<div class="admonition info">
<p>
Execute this notebook:
<a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/{{ git_revision }}?urlpath=lab/tree/{{ env.docname }}.ipynb" alt="Open In Binder"><img src="https://mybinder.org/badge_logo.svg"/></a>
<a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/{{ git_revision }}/{{ env.docname }}.ipynb" alt="Open In Colab"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a>
<a href="{{ env.docname.rsplit('/', 1).pop() }}.ipynb" class="btn">Download locally</a>
</p>
</div>
"""
nbsphinx_epilog = nbsphinx_prolog # also insert after each notebook
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for spelling extension ------------------------------------------
spelling_lang = "en_AU"
tokenizer_lang = "en_AU"
# -- StellarGraph customisation ----------------------------------------------
class RewriteLinks(docutils.transforms.Transform):
# before NBSphinx's link processing
default_priority = 300
RTD_PATH_RE = re.compile("^/(?P<lang>[^/]*)/(?P<version>[^/]*)/(?P<rest>.*)$")
def _rewrite_rtd_link(self, node, refuri, parsed):
"""
Rewrite deep links to the Read the Docs documentation to point to the relevant page in this
build.
Having full URLs is good when viewing the docs without rendering, such as with `help(...)`
or `?...`, but internal links make for a more consistent experience (no jumping from
.../1.2/ or .../latest/ to .../stable/) as well as allowing checks for validity. The
rewriting done here gives the best of both worlds: full URLs without rendering, internal
links within Sphinx.
"""
env = self.document.settings.env
match = self.RTD_PATH_RE.match(parsed.path)
if not match:
return
lang = match["lang"]
version = match["version"]
rest = match["rest"]
fragment = parsed.fragment
# validate that the links all have the same basic structure:
if lang != "en" or version != "stable" or not rest.endswith(".html"):
self.document.reporter.warning(
f"Links to stellargraph.readthedocs.io should always be to the stable english form <https://stellargraph.readthedocs.io/en/stable/...> and have the path end with a .html file extension, found language {lang!r}, version {version!r} and path ending with {rest[-8:]!r} in <{refuri}>"
)
return
if rest == "api.html" and fragment:
# a link to a Python element in the API: infer which one from the fragment. Examples:
# - https://stellargraph.readthedocs.io/en/stable/api.html#module-stellargraph
# - https://stellargraph.readthedocs.io/en/stable/api.html#stellargraph.StellarGraph
# - https://stellargraph.readthedocs.io/en/stable/api.html#stellargraph.StellarGraph.to_networkx
new_domain = "py"
module_prefix = "module-"
if fragment.startswith(module_prefix):
new_type = "mod"
new_target = fragment[len(module_prefix) :]
else:
new_type = "any"
new_target = fragment
else:
# a link to a file (e.g. a demo)
if fragment:
self.document.reporter.warning(
f"Link <{refuri}> to stellargraph.readthedocs.io has a fragment {fragment!r} after #, which isn't yet supported for rewriting; remove the fragment, or search for this message in conf.py and extend it"
)
return
html_suffix = ".html"
new_domain = "std"
new_type = "doc"
new_target = "/" + rest[: -len(html_suffix)]
xref = sphinx.addnodes.pending_xref(
refdomain=new_domain,
reftype=new_type,
reftarget=new_target,
refwarn=True,
refexplicit=True,
refdoc=env.docname,
)
linktext = node.astext()
xref += docutils.nodes.Text(linktext, linktext)
node.replace_self(xref)
def apply(self):
env = self.document.settings.env
for node in self.document.traverse(docutils.nodes.reference):
refuri = node.get("refuri")
parsed = urllib.parse.urlparse(refuri)
if parsed.netloc == "" and parsed.path.endswith("README.md"):
# the notebooks include links to READMEs so that the links work locally and on
# GitHub, but on Read the Docs, the equivalent files are 'index', not 'README'.
new_path = parsed.path.replace("README.md", "index.rst")
new_components = (
parsed.scheme,
parsed.netloc,
new_path,
parsed.params,
parsed.query,
parsed.fragment,
)
node["refuri"] = urllib.parse.urlunparse(new_components)
elif parsed.netloc == "stellargraph.readthedocs.io":
self._rewrite_rtd_link(node, refuri, parsed)
def setup(app):
app.add_transform(RewriteLinks)
app.add_stylesheet("custom.css")
| 12,154 | 33.928161 | 296 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/pretrain.py | # -*- encoding: utf-8 -*-
'''
@Time : 2022/06/10 15:51:44
@Author : Chu Xiaokai
@Contact : xiaokaichu@gmail.com
'''
import time
import sys
import os
sys.path.append(os.getcwd())
from dataloader import *
from Transformer4Ranking.model import *
import torch
from torch import nn
from torch.utils.data import DataLoader
from metrics import evaluate_all_metric
from args import config
# control seed
# 生成随机数,以便固定后续随机数,方便复现代码
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
# load dataset
train_dataset = TrainDataset(config.train_datadir, max_seq_len=config.max_seq_len, buffer_size=config.buffer_size)
train_data_loader = DataLoader(train_dataset, batch_size=config.train_batch_size)
vaild_annotate_dataset = TestDataset(config.valid_annotate_path, max_seq_len=config.max_seq_len, data_type='annotate')
vaild_annotate_loader = DataLoader(vaild_annotate_dataset, batch_size=config.eval_batch_size)
vaild_click_dataset = TestDataset(config.valid_click_path, max_seq_len=config.max_seq_len, data_type='click', buffer_size=100000)
vaild_click_loader = DataLoader(vaild_click_dataset, batch_size=config.eval_batch_size)
model = TransformerModel(
ntoken=config.ntokens,
hidden=config.emb_dim,
nhead=config.nhead,
nlayers=config.nlayers,
dropout=config.dropout,
mode='pretrain'
)
if torch.cuda.device_count() >= config.n_gpus > 1:
print("Let's use", config.n_gpus, "GPUs!")
model = nn.DataParallel(model, device_ids = list(range(config.n_gpus)))
model.cuda()
# load pretrained model
if config.init_parameters != "":
print('load warm up model ', config.init_parameters)
if config.n_gpus > 1:
model.load_state_dict(torch.load(config.init_parameters))
else:
model.load_state_dict(torch.load(config.init_parameters), strict=False)
# init optimization
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
scheduler = get_linear_schedule_with_warmup(
optimizer, \
num_warmup_steps=config.warmup_steps,\
num_training_steps=config.max_steps,
)
# train model
model.train() # turn on train mode
log_interval = config.log_interval
total_loss = 0
total_ctr_loss = 0.0
total_mlm_loss = 0.0
start_time = time.time()
criterion = nn.BCEWithLogitsLoss()
idx = 0
for src_input, src_segment, src_padding_mask, click_label in train_data_loader:
model.train()
optimizer.zero_grad()
masked_src_input, mask_label = mask_data(src_input)
score, mlm_loss = model(
src=masked_src_input, # mask data
src_segment=src_segment,
src_padding_mask=src_padding_mask,
mlm_label=mask_label,
)
mlm_loss = torch.mean(mlm_loss)
click_label = click_label.cuda()
ctr_loss = criterion(score, click_label)
loss = mlm_loss + ctr_loss
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
loss.backward()
optimizer.step()
scheduler.step()
total_ctr_loss += ctr_loss.item()
total_mlm_loss += mlm_loss.item()
total_loss += loss.item()
# log time
if idx % log_interval == 0:
lr = scheduler.get_last_lr()[0]
ms_per_batch = (time.time() - start_time) * 1000 / log_interval
cur_loss = total_loss / log_interval
cur_ctr_loss = total_ctr_loss / log_interval
cur_mlmloss = total_mlm_loss / log_interval
print(
f'{idx:5d}th step | '
f'lr {lr:.3e} | ms/batch {ms_per_batch:5.2f} | '
f'ctr {cur_ctr_loss:5.5f} | mlm {cur_mlmloss:5.5f}')
total_mlm_loss = 0
total_ctr_loss = 0
total_loss = 0
start_time = time.time()
# evaluate
if idx % config.eval_step == 0:
all_ndcg_list = []
model.eval()
# ------------ evaluate on annotated data -------------- #
total_scores = []
for src_input, src_segment, src_padding_mask in vaild_annotate_loader:
score = model(src=src_input, src_segment=src_segment, src_padding_mask=src_padding_mask).cpu().detach().numpy().tolist()
total_scores += score
result_dict_ann = evaluate_all_metric(
qid_list=vaild_annotate_dataset.total_qids,
label_list=vaild_annotate_dataset.total_labels,
score_list=total_scores,
freq_list=vaild_annotate_dataset.total_freqs
)
print(
f'{idx}th step valid annotate | '
f'dcg@10: all {result_dict_ann["all_dcg@10"]:.5f} | '
f'high {result_dict_ann["high_dcg@10"]:.5f} | '
f'mid {result_dict_ann["mid_dcg@10"]:.5f} | '
f'low {result_dict_ann["low_dcg@10"]:.5f} | '
f'pnr {result_dict_ann["pnr"]:.5f}'
)
# ------------ evaluate on click data -------------- #
total_scores = []
for src_input, src_segment, src_padding_mask in vaild_click_loader:
score = model(src=src_input, src_segment=src_segment, src_padding_mask=src_padding_mask).cpu().detach().numpy().tolist()
total_scores += score
result_dict_click = evaluate_all_metric(
qid_list=vaild_click_dataset.total_qids,
label_list=vaild_click_dataset.total_labels,
score_list=total_scores,
freq_list=None
)
print(
f'{idx}th step valid click |'
f'dcg@3 {result_dict_click["all_dcg@3"]:.5f} | '
f'dcg@5 {result_dict_click["all_dcg@5"]:.5f} | '
f'dcg@10 {result_dict_click["all_dcg@10"]:.5f} | '
f'pnr {result_dict_click["pnr"]:.5f}'
)
if idx % config.save_step == 0 and idx > 0:
torch.save(model.state_dict(),
'save_model/save_steps{}_{:.5f}_{:5f}.model'.format(idx, result_dict_ann['pnr'], result_dict_click['pnr'])
)
idx += 1 | 5,998 | 35.138554 | 132 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/dataloader.py | # -*- encoding: utf-8 -*-
'''
@Time : 2022/06/10 15:51:44
@Author : Chu Xiaokai
@Contact : xiaokaichu@gmail.com
'''
import math
import torch
import torch.nn.functional as F
import os
import random
from torch.utils.data import Dataset, DataLoader, IterableDataset
import gzip
from functools import reduce
from args import config
# --------------- data process for masked language modeling (MLM) ---------------- #
def prob_mask_like(t, prob):
return torch.zeros_like(t).float().uniform_(0, 1) < prob
def mask_with_tokens(t, token_ids):
init_no_mask = torch.full_like(t, False, dtype=torch.bool)
mask = reduce(lambda acc, el: acc | (t == el), token_ids, init_no_mask)
return mask
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
def mask_data(seq, mask_ignore_token_ids=[config._CLS_, config._SEP_, config._PAD_],
mask_token_id=config._MASK_,
mask_prob=0.1,
pad_token_id=config._PAD_,
replace_prob=1.0
):
no_mask = mask_with_tokens(seq, mask_ignore_token_ids)
mask = get_mask_subset_with_prob(~no_mask, mask_prob)
masked_seq = seq.clone()
labels = seq.masked_fill(~mask, pad_token_id) # use pad to fill labels
replace_prob = prob_mask_like(seq, replace_prob)
mask = mask * replace_prob
masked_seq = masked_seq.masked_fill(mask, mask_token_id)
return masked_seq, labels
# ---------------------- DataLoader ----------------------- #
def process_data(query, title, content, max_seq_len):
""" process [query, title, content] into a tensor
[CLS] + query + [SEP] + title + [SEP] + content + [SEP] + [PAD]
"""
data = [config._CLS_]
segment = [0]
data = data + [int(item) + 10 for item in query.split(b'\x01')] # query
data = data + [config._SEP_]
segment = segment + [0] * (len(query.split(b'\x01')) + 1)
data = data + [int(item) + 10 for item in title.split(b'\x01')] # content
data = data + [config._SEP_] # sep defined as 1
segment = segment + [1] * (len(title.split(b'\x01')) + 1)
data = data + [int(item) + 10 for item in content.split(b'\x01')] # content
data = data + [config._SEP_]
segment = segment + [1] * (len(content.split(b'\x01')) + 1)
# padding
padding_mask = [False] * len(data)
if len(data) < max_seq_len:
padding_mask += [True] * (max_seq_len - len(data))
data += [config._PAD_] * (max_seq_len - len(data))
else:
padding_mask = padding_mask[:max_seq_len]
data = data[:max_seq_len]
# segment id
if len(segment) < max_seq_len:
segment += [1] * (max_seq_len-len(segment))
else:
segment = segment[:max_seq_len]
padding_mask = torch.BoolTensor(padding_mask)
data = torch.LongTensor(data)
segment = torch.LongTensor(segment)
return data, segment, padding_mask
class TrainDataset(IterableDataset):
def __init__(self, directory_path, buffer_size=100000, max_seq_len=128):
self.directory_path = directory_path
self.buffer_size = buffer_size
self.files = os.listdir(self.directory_path)
random.shuffle(self.files)
self.cur_query = "#"
self.max_seq_len = max_seq_len
def __iter__(self):
buffer = []
for file in self.files:
print('load file', file)
if file[-3:] != '.gz' or file == 'part-00000.gz': # part-00000.gz is for evaluation
continue
with gzip.open(os.path.join(self.directory_path, file), 'rb') as f:
for line in f.readlines():
line_list = line.strip(b'\n').split(b'\t')
if len(line_list) == 3: # new query
self.cur_query = line_list[1]
elif len(line_list) > 6: # urls
position, title, content, click_label = line_list[0], line_list[2], line_list[3], line_list[5]
try:
src_input, segment, src_padding_mask = process_data(self.cur_query, title, content, self.max_seq_len)
buffer.append([src_input, segment, src_padding_mask, float(click_label)])
except:
pass
if len(buffer) >= self.buffer_size:
random.shuffle(buffer)
for record in buffer:
yield record
class TestDataset(Dataset):
def __init__(self, fpath, max_seq_len, data_type, buffer_size=300000):
self.max_seq_len = max_seq_len
self.buffer_size = buffer_size
if data_type == 'annotate':
self.buffer, self.total_qids, self.total_labels, self.total_freqs = self.load_annotate_data(fpath)
elif data_type == 'click':
self.buffer, self.total_qids, self.total_labels = self.load_click_data(fpath)
def __len__(self):
return len(self.buffer)
def __getitem__(self, index):
return self.buffer[index]
def load_annotate_data(self, fpath):
print('load annotated data from ', fpath)
total_qids = []
buffer = []
total_labels = []
total_freqs = []
for line in open(fpath, 'rb'):
line_list = line.strip(b'\n').split(b'\t')
qid, query, title, content, label, freq = line_list
if 0 <= int(freq) <= 2: # high freq
freq = 0
elif 3 <= int(freq) <= 6: # mid freq
freq = 1
elif 7 <= int(freq): # tail
freq = 2
total_qids.append(int(qid))
total_labels.append(int(label))
total_freqs.append(freq)
src_input, src_segment, src_padding_mask = process_data(query, title, content, self.max_seq_len)
buffer.append([src_input, src_segment, src_padding_mask])
return buffer, total_qids, total_labels, total_freqs
def load_click_data(self, fpath):
print('load logged click data from ', fpath)
with gzip.open(fpath, 'rb') as f:
buffer = []
total_qids = []
total_labels = []
cur_qids = 0
for line in f.readlines():
line_list = line.strip(b'\n').split(b'\t')
if len(line_list) == 3: # new query
self.cur_query = line_list[1]
cur_qids += 1
elif len(line_list) > 6: # urls
position, title, content, click_label = line_list[0], line_list[2], line_list[3], line_list[5]
try:
src_input, src_segment, src_padding_mask = process_data(self.cur_query, title, content, self.max_seq_len)
buffer.append([src_input, src_segment, src_padding_mask])
total_qids.append(cur_qids)
total_labels.append(int(click_label))
except:
pass
if len(buffer) >= self.buffer_size: # we use 300,000 click records for test
break
return buffer, total_qids, total_labels
def build_feed_dict(data_batch):
if len(data_batch) == 4: # for training
src, src_segment, src_padding_mask, click_label = data_batch
elif len(data_batch) == 3: # for validation
src, src_segment, src_padding_mask = data_batch
feed_dict = {
'src': src,
'src_segment': src_segment,
'src_padding_mask': src_padding_mask,
}
if len(data_batch) == 4:
click_label = click_label.numpy().reshape(-1, 10).T
for i in range(10):
feed_dict['label'+str(i)] = click_label[i]
return feed_dict | 8,424 | 37.47032 | 129 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/unbiased_learning.py | # -*- encoding: utf-8 -*-
'''
@Time : 2022/06/12 14:49:28
@Author : Chu Xiaokai
@Contact : xiaokaichu@gmail.com
'''
from baseline_model.utils.sys_tools import find_class
import torch
import numpy as np
import warnings
import sys
from metrics import *
from Transformer4Ranking.model import *
from dataloader import *
from args import config
random.seed(config.seed+1)
np.random.seed(config.seed+1)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
warnings.filterwarnings('ignore')
print(config)
exp_settings = config.exp_settings
token_encoder = TransformerModel(
ntoken=config.ntokens,
hidden=config.emb_dim,
nhead=config.nhead,
nlayers=config.nlayers,
dropout=config.dropout,
mode='finetune'
)
# load pretrained model
if config.init_parameters != "":
print('load warm up model ', config.init_parameters)
if config.n_gpus > 1:
token_encoder.load_state_dict(torch.load(config.init_parameters))
else:
token_encoder.load_state_dict(torch.load(config.init_parameters), strict=False)
method_str = exp_settings['method_name']
if method_str not in ['IPWrank', 'DLA', 'RegressionEM', 'PairDebias', 'NavieAlgorithm']:
print("please choose a method in 'IPWrank', 'DLA', 'RegressionEM', 'PairDebias', 'NavieAlgorithm'")
sys.exit()
model = find_class('baseline_model.learning_algorithm.'+method_str)\
(exp_settings=exp_settings, encoder_model=token_encoder)
train_dataset = TrainDataset(config.train_datadir, max_seq_len=config.max_seq_len, buffer_size=config.buffer_size)
train_data_loader = DataLoader(train_dataset, batch_size=config.train_batch_size)
vaild_annotate_dataset = TestDataset(config.valid_annotate_path, max_seq_len=config.max_seq_len, data_type='annotate')
vaild_annotate_loader = DataLoader(vaild_annotate_dataset, batch_size=config.eval_batch_size)
vaild_click_dataset = TestDataset(config.valid_click_path, max_seq_len=config.max_seq_len, data_type='click', buffer_size=100000)
vaild_click_loader = DataLoader(vaild_click_dataset, batch_size=config.eval_batch_size)
idx = 0
for train_batch in train_data_loader:
loss = model.train(build_feed_dict(train_batch))
if idx % config.log_interval == 0:
print(f'{idx:5d}th step | loss {loss:5.6f}')
if idx % config.eval_step == 0:
# ------------ evaluate on annotated data -------------- #
total_scores = []
for test_data_batch in vaild_annotate_loader:
feed_input = build_feed_dict(test_data_batch)
score = model.get_scores(feed_input)
score = score.cpu().detach().numpy().tolist()
total_scores += score
result_dict_ann = evaluate_all_metric(
qid_list=vaild_annotate_dataset.total_qids,
label_list=vaild_annotate_dataset.total_labels,
score_list=total_scores,
freq_list=vaild_annotate_dataset.total_freqs
)
print(
f'{idx}th step valid annotate | '
f'dcg@10: all {result_dict_ann["all_dcg@10"]:.6f} | '
f'high {result_dict_ann["high_dcg@10"]:.6f} | '
f'mid {result_dict_ann["mid_dcg@10"]:.6f} | '
f'low {result_dict_ann["low_dcg@10"]:.6f} | '
f'pnr {result_dict_ann["pnr"]:.6f}'
)
# ------------ evaluate on click data -------------- #
total_scores = []
for test_data_batch in vaild_click_loader:
feed_input = build_feed_dict(test_data_batch)
score = model.get_scores(feed_input)
score = score.cpu().detach().numpy().tolist()
total_scores += score
result_dict_click = evaluate_all_metric(
qid_list=vaild_click_dataset.total_qids,
label_list=vaild_click_dataset.total_labels,
score_list=total_scores,
freq_list=None
)
print(
f'{idx}th step valid click | '
f'dcg@3 {result_dict_click["all_dcg@3"]:.6f} | '
f'dcg@5 {result_dict_click["all_dcg@5"]:.6f} | '
f'dcg@10 {result_dict_click["all_dcg@10"]:.6f} | '
f'pnr {result_dict_click["pnr"]:.6f}'
)
if idx % config.save_step == 0 and idx > 0:
torch.save(model.model.state_dict(),
'save_model/save_steps{}_{:.5f}_{:5f}.model'.format(idx, result_dict_ann['pnr'], result_dict_click['pnr'])
)
idx += 1
| 4,451 | 37.713043 | 129 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/submit.py | #!/usr/bin/env python
# coding=utf-8
# File Name: evaluate.py
# Author: Lixin Zou
# Mail: zoulixin15@gmail.com
# Created Time: Tue Sep 13 23:21:03 2022
#### Demo submission to WSDM Cup 2023 #####
# Step 1:
# Get the prediction score of model.
# > ``` python submit.py --emb_dim 768 --nlayer 12 --nhead 12 --dropout 0.1 --eval_batch_size 100 --test_annotate_path ./data/wsdm_round_1/wsdm_test_1.txt```
# To be noticed, the prediction score file must be ended with csv. Otherwise, the evaluation may fail.
# Step 2:
# Compress the prediction score file to zip file and submit the competition page.
from baseline_model.utils.sys_tools import find_class
import torch
import numpy as np
import warnings
import sys
from metrics import *
from Transformer4Ranking.model import *
from dataloader import *
from args import config
warnings.filterwarnings('ignore')
print(config)
exp_settings = config.exp_settings
token_encoder = TransformerModel(
ntoken=config.ntokens,
hidden=config.emb_dim,
nhead=config.nhead,
nlayers=config.nlayers,
dropout=config.dropout,
mode='finetune'
)
method_str = exp_settings['method_name']
if method_str not in ['IPWrank', 'DLA', 'RegressionEM', 'PairDebias', 'NavieAlgorithm']:
print("please choose a method in 'IPWrank', 'DLA', 'RegressionEM', 'PairDebias', 'NavieAlgorithm'")
sys.exit()
model = find_class('baseline_model.learning_algorithm.'+method_str)\
(exp_settings=exp_settings, encoder_model=token_encoder)
if config.evaluate_model_path != "":
# load model
model.model.load_state_dict(torch.load(config.evaluate_model_path))
# load dataset
test_annotate_dataset = TestDataset(config.test_annotate_path, max_seq_len=config.max_seq_len, data_type='annotate')
test_annotate_loader = DataLoader(test_annotate_dataset, batch_size=config.eval_batch_size)
# evaluate
total_scores = []
for test_data_batch in test_annotate_loader:
feed_input = build_feed_dict(test_data_batch)
score = model.get_scores(feed_input)
score = score.cpu().detach().numpy().tolist()
total_scores += score
with open(config.result_path, "w") as f:
f.writelines("\n".join(map(str, total_scores)))
| 2,185 | 32.121212 | 157 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/baseline_model/learning_algorithm/dla.py | """Training and testing the dual learning algorithm for unbiased learning to rank.
See the following paper for more information on the dual learning algorithm.
* Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import torch
import numpy as np
from baseline_model.learning_algorithm.base_algorithm import BaseAlgorithm
import baseline_model.utils as utils
def sigmoid_prob(logits):
return torch.sigmoid(logits - torch.mean(logits, -1, keepdim=True))
class DenoisingNet(nn.Module):
def __init__(self, input_vec_size):
super(DenoisingNet, self).__init__()
self.linear_layer = nn.Linear(input_vec_size, 1)
self.elu_layer = nn.ELU()
self.propensity_net = nn.Sequential(self.linear_layer, self.elu_layer)
self.list_size = input_vec_size
def forward(self, input_list):
output_propensity_list = []
for i in range(self.list_size):
# Add position information (one-hot vector)
click_feature = [
torch.unsqueeze(
torch.zeros_like(
input_list[i]), -1) for _ in range(self.list_size)]
click_feature[i] = torch.unsqueeze(
torch.ones_like(input_list[i]), -1)
# Predict propensity with a simple network
output_propensity_list.append(
self.propensity_net(
torch.cat(
click_feature, 1)))
return torch.cat(output_propensity_list, 1)
class DLA(BaseAlgorithm):
"""The Dual Learning Algorithm for unbiased learning to rank.
This class implements the Dual Learning Algorithm (DLA) based on the input layer
feed. See the following paper for more information on the algorithm.
* Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18
"""
def __init__(self, exp_settings, encoder_model):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
"""
print('Build DLA')
self.hparams = utils.hparams.HParams(
learning_rate=exp_settings['lr'], # Learning rate.
max_gradient_norm=0.5, # Clip gradients to this norm.
loss_func='sigmoid_loss', # Select Loss function
# the function used to convert logits to probability distributions
logits_to_prob='sigmoid_loss',
# The learning rate for ranker (-1 means same with learning_rate).
propensity_learning_rate=-1.0,
ranker_loss_weight=1.0, # Set the weight of unbiased ranking loss
# Set strength for L2 regularization.
l2_loss=0.0,
max_propensity_weight=-1, # Set maximum value for propensity weights
constant_propensity_initialization=False,
# Set true to initialize propensity with constants.
grad_strategy='ada', # Select gradient strategy
)
self.train_summary = {}
self.eval_summary = {}
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
self.max_candidate_num = exp_settings['max_candidate_num']
self.feature_size = exp_settings['feature_size']
if 'selection_bias_cutoff' in exp_settings.keys():
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.propensity_model = DenoisingNet(self.rank_list_size)
# DataParallel
self.model = encoder_model
if torch.cuda.device_count() >= exp_settings['n_gpus'] > 1:
print("Let's use", exp_settings['n_gpus'], "GPUs!")
self.model = nn.DataParallel(self.model, device_ids = list(range(exp_settings['n_gpus'])))
self.model.cuda()
if exp_settings['init_parameters'] != "":
print('load ', exp_settings['init_parameters'])
self.model.load_state_dict(torch.load(exp_settings['init_parameters'] ), strict=False)
self.propensity_model = self.propensity_model.cuda()
self.letor_features_name = "letor_features"
self.letor_features = None
self.labels_name = [] # the labels for the documents (e.g., clicks)
self.docid_inputs = [] # a list of top documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.labels_name.append("label{0}".format(i))
if self.hparams.propensity_learning_rate < 0:
self.propensity_learning_rate = float(self.hparams.learning_rate)
else:
self.propensity_learning_rate = float(self.hparams.propensity_learning_rate)
self.learning_rate = float(self.hparams.learning_rate)
self.global_step = 0
# Select logits to prob function
self.logits_to_prob = nn.Softmax(dim=-1)
if self.hparams.logits_to_prob == 'sigmoid':
self.logits_to_prob = sigmoid_prob
self.optimizer_func = torch.optim.Adagrad
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = torch.optim.SGD
print('Loss Function is ' + self.hparams.loss_func)
# Select loss function
self.loss_func = None
if self.hparams.loss_func == 'sigmoid_loss':
self.loss_func = self.sigmoid_loss_on_list
elif self.hparams.loss_func == 'pairwise_loss':
self.loss_func = self.pairwise_loss_on_list
else: # softmax loss without weighting
self.loss_func = self.softmax_loss
def separate_gradient_update(self):
denoise_params = self.propensity_model.parameters()
ranking_model_params = self.model.parameters()
# Select optimizer
if self.hparams.l2_loss > 0:
for p in ranking_model_params:
self.rank_loss += self.hparams.l2_loss * self.l2_loss(p)
self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss
opt_denoise = self.optimizer_func(self.propensity_model.parameters(), self.propensity_learning_rate)
opt_ranker = self.optimizer_func(self.model.parameters(), self.learning_rate)
opt_denoise.zero_grad()
opt_ranker.zero_grad()
self.loss.backward()
if self.hparams.max_gradient_norm > 0:
nn.utils.clip_grad_norm_(self.propensity_model.parameters(), self.hparams.max_gradient_norm)
nn.utils.clip_grad_norm_(self.model.parameters(), self.hparams.max_gradient_norm)
opt_denoise.step()
opt_ranker.step()
def train(self, input_feed):
"""Run a step of the model feeding the given inputs.
Args:
input_feed: (dictionary) A dictionary containing all the input feed data.
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
# Build model
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.model.train()
self.create_input_feed(input_feed, self.rank_list_size)
# start train
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
train_output = self.model(src, src_segment, src_padding_mask)
train_output = train_output.reshape(-1, self.max_candidate_num)
self.propensity_model.train()
propensity_labels = torch.transpose(self.labels,0,1)
self.propensity = self.propensity_model(
propensity_labels)
with torch.no_grad():
self.propensity_weights = self.get_normalized_weights(
self.logits_to_prob(self.propensity))
self.rank_loss = self.loss_func(
train_output, self.labels, self.propensity_weights)
# Compute examination loss
with torch.no_grad():
self.relevance_weights = self.get_normalized_weights(
self.logits_to_prob(train_output))
self.exam_loss = self.loss_func(
self.propensity,
self.labels,
self.relevance_weights
)
self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss
self.separate_gradient_update()
self.clip_grad_value(self.labels, clip_value_min=0, clip_value_max=1)
self.global_step+=1
return self.loss.item()
def get_scores(self, input_feed):
self.model.eval()
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
scores = self.model(src, src_segment, src_padding_mask)
return scores
def get_normalized_weights(self, propensity):
"""Computes listwise softmax loss with propensity weighting.
Args:
propensity: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
Returns:
(tf.Tensor) A tensor containing the propensity weights.
"""
propensity_list = torch.unbind(
propensity, dim=1) # Compute propensity weights
pw_list = []
for i in range(len(propensity_list)):
pw_i = propensity_list[0] / propensity_list[i]
pw_list.append(pw_i)
propensity_weights = torch.stack(pw_list, dim=1)
if self.hparams.max_propensity_weight > 0:
self.clip_grad_value(propensity_weights,clip_value_min=0,
clip_value_max=self.hparams.max_propensity_weight)
return propensity_weights
def clip_grad_value(self, parameters, clip_value_min, clip_value_max) -> None:
"""Clips gradient of an iterable of parameters at specified value.
Gradients are modified in-place.
Args:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
clip_value (float or int): maximum allowed value of the gradients.
The gradients are clipped in the range
:math:`\left[\text{-clip\_value}, \text{clip\_value}\right]`
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
clip_value_min = float(clip_value_min)
clip_value_max = float(clip_value_max)
for p in filter(lambda p: p.grad is not None, parameters):
p.grad.data.clamp_(min=clip_value_min, max=clip_value_max) | 11,065 | 39.985185 | 165 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/baseline_model/learning_algorithm/ipw_rank.py | """Training and testing the inverse propensity weighting algorithm for unbiased learning to rank.
See the following paper for more information on the inverse propensity weighting algorithm.
* Xuanhui Wang, Michael Bendersky, Donald Metzler, Marc Najork. 2016. Learning to Rank with Selection Bias in Personal Search. In Proceedings of SIGIR '16
* Thorsten Joachims, Adith Swaminathan, Tobias Schnahel. 2017. Unbiased Learning-to-Rank with Biased Feedback. In Proceedings of WSDM '17
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
import torch
from baseline_model.learning_algorithm.base_algorithm import BaseAlgorithm
import baseline_model.utils as utils
import baseline_model as ultra
def selu(x):
# with tf.name_scope('selu') as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * torch.where(x >= 0.0, x, alpha * F.elu(x))
class IPWrank(BaseAlgorithm):
"""The Inverse Propensity Weighting algorithm for unbiased learning to rank.
This class implements the training and testing of the Inverse Propensity Weighting algorithm for unbiased learning to rank. See the following paper for more information on the algorithm.
* Xuanhui Wang, Michael Bendersky, Donald Metzler, Marc Najork. 2016. Learning to Rank with Selection Bias in Personal Search. In Proceedings of SIGIR '16
* Thorsten Joachims, Adith Swaminathan, Tobias Schnahel. 2017. Unbiased Learning-to-Rank with Biased Feedback. In Proceedings of WSDM '17
"""
def __init__(self, exp_settings, encoder_model):
"""Create the model.
Args:
exp_settings: (dictionary) The dictionary containing the model settings.
"""
self.hparams = utils.hparams.HParams(
propensity_estimator_type='baseline_model.utils.propensity_estimator.RandomizedPropensityEstimator',
propensity_estimator_json='baseline_model/randomized_pbm_0.1_1.0_4_1.0.json',
learning_rate=exp_settings['lr'], # Learning rate.
max_gradient_norm=0.5, # Clip gradients to this norm.
loss_func='sigmoid_loss',
# Set strength for L2 regularization.
l2_loss=0.0,
grad_strategy='ada', # Select gradient strategy
)
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
if 'selection_bias_cutoff' in self.exp_settings.keys():
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.letor_features_name = "letor_features"
self.letor_features = None
self.feature_size = exp_settings["feature_size"]
# DataParallel
self.model = encoder_model
if torch.cuda.device_count() >= exp_settings['n_gpus'] > 1:
print("Let's use", exp_settings['n_gpus'], "GPUs!")
self.model = nn.DataParallel(self.model, device_ids = list(range(exp_settings['n_gpus'])))
self.model.cuda()
if exp_settings['init_parameters'] != "":
print('load ', exp_settings['init_parameters'])
self.model.load_state_dict(torch.load(exp_settings['init_parameters'] ), strict=False)
# propensity_estimator
self.propensity_estimator = ultra.utils.find_class(
self.hparams.propensity_estimator_type)(
self.hparams.propensity_estimator_json
)
self.max_candidate_num = exp_settings['max_candidate_num']
self.learning_rate = float(self.hparams.learning_rate)
self.global_step = 0
# Feeds for inputs.
self.docid_inputs_name = [] # a list of top documents
self.labels_name = [] # the labels for the documents (e.g., clicks)
self.docid_inputs = [] # a list of top documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.docid_inputs_name.append("docid_input{0}".format(i))
self.labels_name.append("label{0}".format(i))
self.optimizer_func = torch.optim.Adagrad(self.model.parameters(), lr=self.learning_rate)
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)
def train(self, input_feed):
"""Run a step of the model feeding the given inputs for training process.
Args:
input_feed: (dictionary) A dictionary containing all the input feed data.
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
# Output feed: depends on whether we do a backward step or not.
# compute propensity weights for the input data.
self.global_step += 1
pw = []
self.model.train()
for l in range(self.rank_list_size):
input_feed["propensity_weights{0}".format(l)] = []
for i in range(len(input_feed[self.labels_name[0]])):
click_list = [input_feed[self.labels_name[l]][i]
for l in range(self.rank_list_size)]
pw_list = self.propensity_estimator.getPropensityForOneList(
click_list, use_non_clicked_data=True)
pw.append(pw_list)
for l in range(self.rank_list_size):
input_feed["propensity_weights{0}".format(l)].append(
pw_list[l])
self.propensity_weights = pw
# Gradients and SGD update operation for training the model.
# start train
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
train_output = self.model(src, src_segment, src_padding_mask)
train_output = train_output.reshape(-1, self.max_candidate_num)
# start optimize
self.create_input_feed(input_feed, self.rank_list_size)
train_labels = self.labels
train_pw = torch.as_tensor(self.propensity_weights).cuda()
self.loss = None
if self.hparams.loss_func == 'sigmoid_loss':
self.loss = self.sigmoid_loss_on_list(
train_output, train_labels, train_pw)
elif self.hparams.loss_func == 'pairwise_loss':
self.loss = self.pairwise_loss_on_list(
train_output, train_labels, train_pw)
else:
self.loss = self.softmax_loss(
train_output, train_labels, train_pw)
params = self.model.parameters()
if self.hparams.l2_loss > 0:
for p in params:
self.loss += self.hparams.l2_loss * self.l2_loss(p)
self.opt_step(self.optimizer_func, params)
nn.utils.clip_grad_value_(train_labels, 1)
# print(" Loss %f at Global Step %d: " % (self.loss.item(),self.global_step))
return self.loss.item()
def get_scores(self, input_feed):
self.model.eval()
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
scores = self.model(src, src_segment, src_padding_mask)
return scores
| 7,426 | 41.44 | 190 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/baseline_model/learning_algorithm/pairwise_debias.py | """Training and testing the Pairwise Debiasing algorithm for unbiased learning to rank.
See the following paper for more information on the Pairwise Debiasing algorithm.
* Hu, Ziniu, Yang Wang, Qu Peng, and Hang Li. "Unbiased LambdaMART: An Unbiased Pairwise Learning-to-Rank Algorithm." In The World Wide Web Conference, pp. 2830-2836. ACM, 2019.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from baseline_model.learning_algorithm.base_algorithm import BaseAlgorithm
import baseline_model.utils as utils
def get_bernoulli_sample(probs):
"""Conduct Bernoulli sampling according to a specific probability distribution.
Args:
prob: (tf.Tensor) A tensor in which each element denotes a probability of 1 in a Bernoulli distribution.
Returns:
A Tensor of binary samples (0 or 1) with the same shape of probs.
"""
return torch.ceil(probs - torch.rand(probs.shape).to(device=torch.device('cuda')))
class PairDebias(BaseAlgorithm):
"""The Pairwise Debiasing algorithm for unbiased learning to rank.
This class implements the Pairwise Debiasing algorithm based on the input layer
feed. See the following paper for more information on the algorithm.
* Hu, Ziniu, Yang Wang, Qu Peng, and Hang Li. "Unbiased LambdaMART: An Unbiased Pairwise Learning-to-Rank Algorithm." In The World Wide Web Conference, pp. 2830-2836. ACM, 2019.
"""
def __init__(self, exp_settings, encoder_model):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
"""
print('Build Pairwise Debiasing algorithm.')
self.hparams = utils.hparams.HParams(
EM_step_size=exp_settings['lr']*10, # Step size for EM algorithm.
learning_rate=exp_settings['lr'], # Learning rate.
max_gradient_norm=5.0, # Clip gradients to this norm.
# An int specify the regularization term.
regulation_p=1,
# Set strength for L2 regularization.
l2_loss=0.0,
grad_strategy='ada', # Select gradient strategy
)
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
if 'selection_bias_cutoff' in self.exp_settings.keys():
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.feature_size = exp_settings['feature_size']
# DataParallel
self.model = encoder_model
if torch.cuda.device_count() >= exp_settings['n_gpus'] > 1:
print("Let's use", exp_settings['n_gpus'], "GPUs!")
self.model = nn.DataParallel(self.model, device_ids = list(range(exp_settings['n_gpus'])))
self.model.cuda()
self.max_candidate_num = exp_settings['max_candidate_num']
self.learning_rate = float(self.hparams.learning_rate)
# Feeds for inputs.
self.docid_inputs_name = [] # a list of top documents
self.labels_name = [] # the labels for the documents (e.g., clicks)
self.docid_inputs = [] # a list of top documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.labels_name.append("label{0}".format(i))
self.global_step = 0
if 'selection_bias_cutoff' in self.exp_settings:
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.t_plus = torch.ones([1, self.rank_list_size])
self.t_minus = torch.ones([1, self.rank_list_size])
self.t_plus = torch.ones([1, self.rank_list_size]).cuda()
self.t_minus = torch.ones([1, self.rank_list_size]).cuda()
self.t_plus.requires_grad = False
self.t_minus.requires_grad = False
# Select optimizer
self.optimizer_func = torch.optim.Adagrad(self.model.parameters(), lr=self.hparams.learning_rate)
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = torch.optim.SGD(self.model.parameters(), lr=self.hparams.learning_rate)
def train(self, input_feed):
"""Run a step of the model feeding the given inputs for training process.
Args:
input_feed: (dictionary) A dictionary containing all the input feed data.
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
self.labels = []
self.model.train()
for i in range(self.rank_list_size):
self.labels.append(input_feed[self.labels_name[i]])
self.labels = torch.as_tensor(self.labels).cuda()
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
train_output = self.model(src, src_segment, src_padding_mask)
train_output = train_output.reshape(-1, self.max_candidate_num)
self.splitted_t_plus = torch.split(
self.t_plus, 1, dim=1)
self.splitted_t_minus = torch.split(
self.t_minus, 1, dim=1)
split_size = int(train_output.shape[1] / self.rank_list_size)
output_list = torch.split(train_output, split_size, dim=1)
t_plus_loss_list = [0.0 for _ in range(self.rank_list_size)]
t_minus_loss_list = [0.0 for _ in range(self.rank_list_size)]
self.loss = 0.0
for i in range(self.rank_list_size):
for j in range(self.rank_list_size):
if i == j:
continue
valid_pair_mask = torch.minimum(
torch.ones_like(
self.labels[i]), F.relu(self.labels[i] - self.labels[j]))
pair_loss = torch.sum(
valid_pair_mask *
self.pairwise_cross_entropy_loss(
output_list[i], output_list[j])
)
t_plus_loss_list[i] += pair_loss / self.splitted_t_minus[j]
t_minus_loss_list[j] += pair_loss / self.splitted_t_plus[i]
self.loss += pair_loss / \
self.splitted_t_plus[i] / self.splitted_t_minus[j]
with torch.no_grad():
self.t_plus = (1 - self.hparams.EM_step_size) * self.t_plus + self.hparams.EM_step_size * torch.pow(
torch.cat(t_plus_loss_list, dim=1) / (t_plus_loss_list[0]+10e-9), 1 / (self.hparams.regulation_p + 1))
self.t_minus = (1 - self.hparams.EM_step_size) * self.t_minus + self.hparams.EM_step_size * torch.pow(torch.cat(
t_minus_loss_list, dim=1) / (t_minus_loss_list[0]+10e-9), 1 / (self.hparams.regulation_p + 1))
# Add l2 loss
params = self.model.parameters()
if self.hparams.l2_loss > 0:
for p in params:
self.loss += self.hparams.l2_loss * self.l2_loss(p)
self.opt_step(self.optimizer_func, params)
self.global_step+=1
return self.loss.item()
def get_scores(self, input_feed):
self.model.eval()
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
scores = self.model(src, src_segment, src_padding_mask)
return scores
| 7,675 | 41.644444 | 181 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/baseline_model/learning_algorithm/base_algorithm.py | """The basic class that contains all the API needed for the implementation of an unbiased learning to rank algorithm.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from matplotlib.cbook import print_cycles
import torch.nn.functional as F
import torch
import numpy as np
from abc import ABC, abstractmethod
import baseline_model.utils as utils
def softmax_cross_entropy_with_logits(logits, labels):
"""Computes softmax cross entropy between logits and labels.
Args:
output: A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
Returns:
A single value tensor containing the loss.
"""
loss = torch.sum(- labels * F.log_softmax(logits, -1), -1)
return loss
class BaseAlgorithm(ABC):
"""The basic class that contains all the API needed for the
implementation of an unbiased learning to rank algorithm.
"""
PADDING_SCORE = -100000
@abstractmethod
def __init__(self, exp_settings, encoder_model):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
"""
self.is_training = None
self.docid_inputs = None # a list of top documents
self.letor_features = None # the letor features for the documents
self.labels = None # the labels for the documents (e.g., clicks)
self.output = None # the ranking scores of the inputs
# the number of documents considered in each rank list.
self.rank_list_size = None
# the maximum number of candidates for each query.
self.max_candidate_num = None
self.optimizer_func = torch.optim.adagrad()
@abstractmethod
def train(self, input_feed):
"""Run a step of the model feeding the given inputs for training.
Args:
input_feed: (dictionary) A dictionary containing all the input feed data.
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a summary containing related information about the step.
"""
pass
def create_input_feed(self, input_feed, list_size):
self.labels = []
for i in range(list_size):
self.labels.append(input_feed[self.labels_name[i]])
self.labels = np.transpose(self.labels)
self.labels = torch.FloatTensor(self.labels).cuda()
def opt_step(self, opt, params):
""" Perform an optimization step
Args:
opt: Optimization Function to use
params: Model's parameters
Returns
The ranking model that will be used to computer the ranking score.
"""
opt.zero_grad()
self.loss.backward()
if self.hparams.max_gradient_norm > 0:
self.clipped_gradient = torch.nn.utils.clip_grad_norm_(
params, self.hparams.max_gradient_norm)
opt.step()
def pairwise_cross_entropy_loss(
self, pos_scores, neg_scores, propensity_weights=None):
"""Computes pairwise softmax loss without propensity weighting.
Args:
pos_scores: (torch.Tensor) A tensor with shape [batch_size, 1]. Each value is
the ranking score of a positive example.
neg_scores: (torch.Tensor) A tensor with shape [batch_size, 1]. Each value is
the ranking score of a negative example.
propensity_weights: (torch.Tensor) A tensor of the same shape as `output` containing the weight of each element.
Returns:
(torch.Tensor) A single value tensor containing the loss.
"""
if propensity_weights is None:
propensity_weights = torch.ones_like(pos_scores)
label_dis = torch.cat(
[torch.ones_like(pos_scores), torch.zeros_like(neg_scores)], dim=1)
loss = softmax_cross_entropy_with_logits(
logits = torch.cat([pos_scores, neg_scores], dim=1), labels = label_dis)* propensity_weights
return loss
def sigmoid_loss_on_list(self, output, labels,
propensity_weights=None):
"""Computes pointwise sigmoid loss without propensity weighting.
Args:
output: (torch.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (torch.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (torch.Tensor) A tensor of the same shape as `output` containing the weight of each element.
Returns:
(torch.Tensor) A single value tensor containing the loss.
"""
if propensity_weights is None:
propensity_weights = torch.ones_like(labels)
criterion = torch.nn.BCEWithLogitsLoss(reduction="none")
loss = criterion(output, labels) * propensity_weights
return torch.mean(torch.sum(loss, dim=1))
def pairwise_loss_on_list(self, output, labels,
propensity_weights=None):
"""Computes pairwise entropy loss.
Args:
output: (torch.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (torch.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (torch.Tensor) A tensor of the same shape as `output` containing the weight of each element.
Returns:
(torch.Tensor) A single value tensor containing the loss.
"""
if propensity_weights is None:
propensity_weights = torch.ones_like(labels)
loss = None
sliced_output = torch.unbind(output, dim=1)
sliced_label = torch.unbind(labels, dim=1)
sliced_propensity = torch.unbind(propensity_weights, dim=1)
for i in range(len(sliced_output)):
for j in range(i + 1, len(sliced_output)):
cur_label_weight = torch.sign(
sliced_label[i] - sliced_label[j])
cur_propensity = sliced_propensity[i] * \
sliced_label[i] + \
sliced_propensity[j] * sliced_label[j]
cur_pair_loss = - \
torch.exp(
sliced_output[i]) / (torch.exp(sliced_output[i]) + torch.exp(sliced_output[j]))
if loss is None:
loss = cur_label_weight * cur_pair_loss
loss += cur_label_weight * cur_pair_loss * cur_propensity
batch_size = labels.size()[0]
return torch.sum(loss) / batch_size.type(torch.float32)
def softmax_loss(self, output, labels, propensity_weights=None):
"""Computes listwise softmax loss without propensity weighting.
Args:
output: (torch.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (torch.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (torch.Tensor) A tensor of the same shape as `output` containing the weight of each element.
Returns:
(torch.Tensor) A single value tensor containing the loss.
"""
if propensity_weights is None:
propensity_weights = torch.ones_like(labels)
weighted_labels = (labels + 0.0000001) * propensity_weights
label_dis = weighted_labels / \
torch.sum(weighted_labels, 1, keepdim=True)
label_dis = torch.nan_to_num(label_dis)
loss = softmax_cross_entropy_with_logits(
logits = output, labels = label_dis)* torch.sum(weighted_labels, 1)
return torch.sum(loss) / torch.sum(weighted_labels)
def l2_loss(self, input):
return torch.sum(input ** 2)/2
| 8,278 | 38.802885 | 124 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/baseline_model/learning_algorithm/regression_EM.py | """Training and testing the regression-based EM algorithm for unbiased learning to rank.
See the following paper for more information on the regression-based EM algorithm.
* Wang, Xuanhui, Nadav Golbandi, Michael Bendersky, Donald Metzler, and Marc Najork. "Position bias estimation for unbiased learning to rank in personal search." In Proceedings of the Eleventh ACM International Conference on Web Search and Data Mining, pp. 610-618. ACM, 2018.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import torch
from baseline_model.learning_algorithm.base_algorithm import BaseAlgorithm
import baseline_model.utils as utils
def get_bernoulli_sample(probs):
"""Conduct Bernoulli sampling according to a specific probability distribution.
Args:
prob: (torch.Tensor) A tensor in which each element denotes a probability of 1 in a Bernoulli distribution.
Returns:
A Tensor of binary samples (0 or 1) with the same shape of probs.
"""
if torch.cuda.is_available():
bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape, device=torch.device('cuda')))
else:
bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape))
return bernoulli_sample
class RegressionEM(BaseAlgorithm):
"""The regression-based EM algorithm for unbiased learning to rank.
This class implements the regression-based EM algorithm based on the input layer
feed. See the following paper for more information.
* Wang, Xuanhui, Nadav Golbandi, Michael Bendersky, Donald Metzler, and Marc Najork. "Position bias estimation for unbiased learning to rank in personal search." In Proceedings of the Eleventh ACM International Conference on Web Search and Data Mining, pp. 610-618. ACM, 2018.
In particular, we use the online EM algorithm for the parameter estimations:
* Cappé, Olivier, and Eric Moulines. "Online expectation–maximization algorithm for latent data models." Journal of the Royal Statistical Society: Series B (Statistical Methodology) 71.3 (2009): 593-613.
"""
def __init__(self, exp_settings, encoder_model):
"""Create the model.
Args:
exp_settings: (dictionary) The dictionary containing the model settings.
"""
print('Build Regression-based EM algorithm.')
self.hparams = utils.hparams.HParams(
EM_step_size=exp_settings['lr']*10., # Step size for EM algorithm.
learning_rate=exp_settings['lr'], # Learning rate.
max_gradient_norm=0.5, # Clip gradients to this norm.
# Set strength for L2 regularization.
l2_loss=0.0,
grad_strategy='ada', # Select gradient strategy
)
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
if 'selection_bias_cutoff' in self.exp_settings.keys():
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.max_candidate_num = exp_settings['max_candidate_num']
self.feature_size = exp_settings['feature_size']
# DataParallel
self.model = encoder_model
if torch.cuda.device_count() >= exp_settings['n_gpus'] > 1:
print("Let's use", exp_settings['n_gpus'], "GPUs!")
self.model = nn.DataParallel(self.model, device_ids = list(range(exp_settings['n_gpus'])))
self.model.cuda()
self.letor_features_name = "letor_features"
self.letor_features = None
self.docid_inputs_name = [] # a list of top documents
self.labels_name = [] # the labels for the documents (e.g., clicks)
self.docid_inputs = [] # a list of top documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.docid_inputs_name.append("docid_input{0}".format(i))
self.labels_name.append("label{0}".format(i))
with torch.no_grad():
self.propensity = (torch.ones([1, self.rank_list_size]) * 0.9)
self.propensity = self.propensity.cuda()
self.learning_rate = float(self.hparams.learning_rate)
self.global_step = 0
self.sigmoid_prob_b = (torch.ones([1]) - 1.0)
self.sigmoid_prob_b = self.sigmoid_prob_b.cuda()
# self.sigmoid_prob_b = self.sigmoid_prob_b.to(device=self.cuda)
# Select optimizer
self.optimizer_func = torch.optim.Adagrad(self.model.parameters(), lr=self.hparams.learning_rate)
# tf.train.AdagradOptimizer
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = torch.optim.SGD(self.model.parameters(), lr=self.hparams.learning_rate)
def train(self, input_feed):
"""Run a step of the model feeding the given inputs for training process.
Args:
input_feed: (dictionary) A dictionary containing all the input feed data.
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
self.model.train()
self.create_input_feed(input_feed, self.rank_list_size)
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
train_output = self.model(src, src_segment, src_padding_mask)
train_output = train_output.reshape(-1, self.max_candidate_num)
train_output = train_output + self.sigmoid_prob_b
# Conduct estimation step.
gamma = torch.sigmoid(train_output)
# reshape from [rank_list_size, ?] to [?, rank_list_size]
reshaped_train_labels = self.labels
p_e1_r0_c0 = self.propensity * \
(1 - gamma) / (1 - self.propensity * gamma)
p_e0_r1_c0 = (1 - self.propensity) * gamma / \
(1 - self.propensity * gamma)
p_r1 = reshaped_train_labels + \
(1 - reshaped_train_labels) * p_e0_r1_c0
# Get Bernoulli samples and compute rank loss
self.ranker_labels = get_bernoulli_sample(p_r1).cuda()
criterion = torch.nn.BCEWithLogitsLoss()
self.loss = criterion(train_output,self.ranker_labels)
params = self.model.parameters()
if self.hparams.l2_loss > 0:
for p in params:
self.loss += self.hparams.l2_loss * self.l2_loss(p)
opt = self.optimizer_func
opt.zero_grad(set_to_none=True)
self.loss.backward()
if self.loss == 0:
for name, param in self.model.named_parameters():
print(name, param)
if self.hparams.max_gradient_norm > 0:
self.clipped_gradient = nn.utils.clip_grad_norm_(
params, self.hparams.max_gradient_norm)
opt.step()
nn.utils.clip_grad_value_(reshaped_train_labels, 1)
# Conduct maximization step
with torch.no_grad():
self.propensity = (1 - self.hparams.EM_step_size) * self.propensity + self.hparams.EM_step_size * torch.mean(
reshaped_train_labels + (1 - reshaped_train_labels) * p_e1_r0_c0, dim=0, keepdim=True)
self.update_propensity_op = self.propensity
self.propensity_weights = 1.0 / self.propensity
self.global_step += 1
return self.loss.item()
def get_scores(self, input_feed):
self.model.eval()
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
scores = self.model(src, src_segment, src_padding_mask)
return scores
| 7,819 | 42.687151 | 280 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/baseline_model/learning_algorithm/navie_algorithm.py | """The navie algorithm that directly trains ranking models with clicks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from baseline_model.learning_algorithm.base_algorithm import BaseAlgorithm
import baseline_model.utils as utils
class NavieAlgorithm(BaseAlgorithm):
"""The navie algorithm that directly trains ranking models with input labels.
"""
def __init__(self, exp_settings, encoder_model):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
"""
print('Build NavieAlgorithm')
self.hparams = utils.hparams.HParams(
learning_rate=exp_settings['lr'], # Learning rate.
max_gradient_norm=0.5, # Clip gradients to this norm.
loss_func='sigmoid_loss', # Select Loss function
# Set strength for L2 regularization.
l2_loss=0.0,
grad_strategy='ada', # Select gradient strategy
)
self.train_summary = {}
self.eval_summary = {}
self.is_training = "is_train"
print(exp_settings['learning_algorithm_hparams'])
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
if 'selection_bias_cutoff' in self.exp_settings.keys():
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.feature_size = exp_settings['feature_size']
# DataParallel
self.model = encoder_model
if torch.cuda.device_count() >= exp_settings['n_gpus'] > 1:
print("Let's use", exp_settings['n_gpus'], "GPUs!")
self.model = nn.DataParallel(self.model, device_ids = list(range(exp_settings['n_gpus'])))
self.model.cuda()
if exp_settings['init_parameters'] != "":
print('load ', exp_settings['init_parameters'])
self.model.load_state_dict(torch.load(exp_settings['init_parameters'] ), strict=False)
self.max_candidate_num = exp_settings['max_candidate_num']
self.learning_rate = float(self.hparams.learning_rate)
self.global_step = 0
# Feeds for inputs.
self.letor_features_name = "letor_features"
self.letor_features = None
self.labels_name = [] # the labels for the documents (e.g., clicks)
self.docid_inputs = [] # a list of top documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.labels_name.append("label{0}".format(i))
self.optimizer_func = torch.optim.Adagrad(self.model.parameters(), lr=self.learning_rate)
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)
def train(self, input_feed):
"""Run a step of the model feeding the given inputs for training process.
Args:
input_feed: (dictionary) A dictionary containing all the input feed data.
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
self.global_step += 1
self.model.train()
self.create_input_feed(input_feed, self.rank_list_size)
# Gradients and SGD update operation for training the model.
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
train_output = self.model(src, src_segment, src_padding_mask)
train_output = train_output.reshape(-1, self.max_candidate_num)
train_labels = self.labels
self.loss = None
if self.hparams.loss_func == 'sigmoid_loss':
self.loss = self.sigmoid_loss_on_list(
train_output, train_labels)
elif self.hparams.loss_func == 'pairwise_loss':
self.loss = self.pairwise_loss_on_list(
train_output, train_labels)
else:
self.loss = self.softmax_loss(
train_output, train_labels)
params = self.model.parameters()
if self.hparams.l2_loss > 0:
loss_l2 = 0.0
for p in params:
loss_l2 += self.l2_loss(p)
self.loss += self.hparams.l2_loss * loss_l2
self.opt_step(self.optimizer_func, params)
nn.utils.clip_grad_value_(train_labels, 1)
return self.loss.item()
def get_scores(self, input_feed):
self.model.eval()
src = input_feed['src']
src_segment = input_feed['src_segment']
src_padding_mask = input_feed['src_padding_mask']
scores = self.model(src, src_segment, src_padding_mask)
return scores
| 4,950 | 37.084615 | 102 | py |
baidu_ultr_dataset | baidu_ultr_dataset-main/Transformer4Ranking/model.py | # -*- encoding: utf-8 -*-
'''
@Time : 2022/06/10 15:51:44
@Author : Chu Xiaokai
@Contact : xiaokaichu@gmail.com
'''
import math
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.optim.lr_scheduler import LambdaLR
from args import config
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
elif current_step >= num_training_steps: # end_learning_rate=8e-8
return 0.04
else:
return float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
return LambdaLR(optimizer, lr_lambda, last_epoch)
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 513):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x: Tensor, shape [seq_len, batch_size, embedding_dim]
"""
return self.dropout(self.pe[:x.size(0)])
class TransformerModel(nn.Module):
def __init__(self, ntoken, hidden, nhead, nlayers, dropout, mode='finetune'):
super().__init__()
print('Transformer is used for {}'.format(mode))
self.pos_encoder = PositionalEncoding(hidden, dropout)
encoder_layers = TransformerEncoderLayer(hidden, nhead, hidden, dropout, activation='gelu')
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.token_encoder = nn.Embedding(ntoken, hidden)
self.segment_encoder = nn.Embedding(2, hidden)
self.norm_layer = nn.LayerNorm(hidden)
self.hidden = hidden
self.mode = mode
self.dropout = nn.Dropout(dropout)
if mode == 'pretrain':
self.to_logics = nn.Linear(hidden, ntoken)
self.decoder = nn.Linear(hidden, 1)
elif mode == 'finetune':
self.act = nn.ELU(alpha=1.0)
self.fc1 = nn.Linear(hidden, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 128)
self.fc4 = nn.Linear(128, 1)
def forward(self, src, src_segment, src_padding_mask=None, mlm_label=None):
src = src.t().contiguous().cuda()
src_segment = src_segment.t().contiguous().cuda()
src_padding_mask = src_padding_mask.cuda()
# transformer input
pos_emb = self.pos_encoder(src) # get position embedding
token_emb = self.token_encoder(src) # get token embedding
seg_emb = self.segment_encoder(src_segment) # get position embedding
x = token_emb + pos_emb + seg_emb
x = self.norm_layer(x)
x = self.dropout(x)
output = self.transformer_encoder(
src=x, \
mask=None,
src_key_padding_mask=src_padding_mask,
)
X = output[0, :, :] # [seqlen, bs, 1]
X = self.dropout(X)
if self.mode == 'pretrain': # for train
scores = self.decoder(X)
scores = torch.squeeze(scores, dim=-1)
if self.training:
mlm_label = mlm_label.cuda()
output = output.transpose(0, 1)
logits = self.to_logics(output) # shape = [bs, seq_len, num_tokens]
mlm_loss = F.cross_entropy(logits.transpose(1,2), # shape=[bs, num_class, seq_len]\
mlm_label,\
ignore_index=config._PAD_ # _pad
)
return scores, mlm_loss
else:
return scores
elif self.mode == 'finetune':
h1 = self.act(self.fc1(X))
h1 = self.dropout(h1)
h2 = self.act(self.fc2(h1))
h2 = self.dropout(h2)
h3 = self.act(self.fc3(h2))
h3 = self.dropout(h3)
scores = self.fc4(h3)
scores = torch.squeeze(scores, dim=-1)
return scores
| 4,708 | 37.284553 | 114 | py |
DaVinci | DaVinci-main/VE.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
import argparse
import os
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_ve import DaVinciVE
from models.resnet import interpolate_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer
from apex import amp
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_epochs, device, scheduler, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.8f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
for i,(images, text, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
images, targets = images.to(device,non_blocking=True), targets.to(device,non_blocking=True)
text_inputs = tokenizer(text, padding='longest', return_tensors="pt").to(device)
loss = model(images, text_inputs, targets=targets, train=True)
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
optimizer.step()
scheduler.step()
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
metric_logger.update(loss=loss.item())
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Train Averaged stats:", metric_logger.global_avg())
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, data_loader, tokenizer, device, config, info="None"):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = f'{info} Evaluation:'
print_freq = 50
for images, text, targets in metric_logger.log_every(data_loader, print_freq, header):
images, targets = images.to(device,non_blocking=True), targets.to(device,non_blocking=True)
text_inputs = tokenizer(text, padding='longest', return_tensors="pt").to(device)
prediction = model(images, text_inputs, targets=targets, train=False)
_, pred_class = prediction.max(1)
accuracy = (targets==pred_class).sum() / targets.size(0)
metric_logger.meters['acc'].update(accuracy.item(), n=images.size(0))
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(f"{info} Averaged stats:", metric_logger.global_avg())
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
warmup_epochs = config['schedular']['warmup_epochs']
#### Dataset ####
print("Creating VE dataset")
datasets = create_dataset('ve', config)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False, False], num_tasks, global_rank)
else:
samplers = [None, None, None]
train_loader, val_loader, test_loader = create_loader(datasets,samplers,
batch_size=[config['batch_size_train']]+[config['batch_size_test']]*2,
num_workers=[4,4,4],is_trains=[True,False,False],
collate_fns=[None,None,None])
tokenizer = BertTokenizer.from_pretrained(args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
#### Model ####
print("Creating model")
model = DaVinciVE(config=config, encoder=args.encoder, tokenizer=tokenizer)
model = model.to(device)
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
step_per_epoch = len(train_loader)
arg_sche['num_warmup_steps'] = arg_sche['warmup_epochs'] * step_per_epoch
arg_sche['num_training_steps'] = arg_sche['epochs'] * step_per_epoch
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
for key in list(state_dict.keys())[:]:
new_key = 'davinci.'+key
state_dict[new_key] = state_dict[key]
del state_dict[key]
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['davinci.visual_encoder.pos_embed'],model.davinci.visual_encoder)
state_dict['davinci.visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%args.checkpoint)
print(msg)
model_without_ddp = model
model, optimizer = amp.initialize(model, optimizer, opt_level="O1") # 这里是“欧一”,不是“零一”
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
best = 0
best_epoch = 0
print("Start training")
start_time = time.time()
test_acc_dict = {}
max_acc = 0
for epoch in range(start_epoch, max_epoch):
if not args.evaluate:
if args.distributed:
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, train_loader, optimizer, tokenizer, epoch, warmup_epochs, device, lr_scheduler, config)
val_stats = evaluate(model, val_loader, tokenizer, device, config, info="Validation")
test_stats = evaluate(model, test_loader, tokenizer, device, config, info="Test")
test_acc_dict.update({epoch: test_stats['acc']})
max_acc = max(max_acc, float(test_stats['acc']))
if utils.is_main_process():
if args.evaluate:
log_stats = {**{f'val_{k}': v for k, v in val_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
}
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'val_{k}': v for k, v in val_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
}
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
save_obj = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'config': config,
'epoch': epoch,
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
best = float(val_stats['acc'])
if args.evaluate:
break
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
print("All test acc: ", test_acc_dict)
print("Best acc: ", max_acc)
if utils.is_main_process():
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write("best epoch: %d"%best_epoch)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/VE.yaml')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--output_dir', default='output/VE')
parser.add_argument('--encoder', default='bert-base-uncased')
parser.add_argument('--text_decoder', default='bert-base-uncased')
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=12, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
parser.add_argument('--override_cfg', default="", type=str, help="Use ; to separate keys")
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
if args.override_cfg != "":
override_cfg_str = args.override_cfg.replace(";", "\n").replace(":", ": ")
override_cfg = yaml.load(override_cfg_str, Loader=yaml.Loader)
for k, v in override_cfg.items():
if type(v) == dict:
for kk, vv in v.items():
config[k][kk] = vv
else:
config[k] = v
args.result_dir = os.path.join(args.output_dir, 'result')
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
Path(args.result_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config) | 10,713 | 40.366795 | 136 | py |
DaVinci | DaVinci-main/NLVR.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
import argparse
import os
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_nlvr import DaVinciNLVR
from models.resnet import interpolate_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer
from apex import amp
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_epochs, device, scheduler, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.8f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
for i,(image0, image1, text, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image0, image1, targets = image0.to(device), image1.to(device), targets.to(device)
text_inputs = tokenizer(text, padding='longest', return_tensors="pt").to(device)
loss = model(image0, image1, text_inputs, targets=targets, train=True)
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
optimizer.step()
scheduler.step()
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
metric_logger.update(loss=loss.item())
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, data_loader, tokenizer, device, config, info="None"):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = f'{info} Evaluation:'
print_freq = 50
for image0, image1, text, targets in metric_logger.log_every(data_loader, print_freq, header):
image0, image1, targets = image0.to(device), image1.to(device), targets.to(device)
text_inputs = tokenizer(text, padding='longest', return_tensors="pt").to(device)
prediction = model(image0, image1, text_inputs, targets=targets, train=False)
_, pred_class = prediction.max(1)
accuracy = (targets==pred_class).sum() / targets.size(0)
metric_logger.meters['acc'].update(accuracy.item(), n=image0.size(0))
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(f"{info} Averaged stats:", metric_logger.global_avg())
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
warmup_epochs = config['schedular']['warmup_epochs']
#### Dataset ####
print("Creating dataset")
datasets = create_dataset('nlvr', config)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False, False], num_tasks, global_rank)
else:
samplers = [None, None, None]
train_loader, val_loader, test_loader = create_loader(datasets,samplers,batch_size=[config['batch_size_train']]*3,
num_workers=[4,4,4],is_trains=[True,False,False], collate_fns=[None,None,None])
tokenizer = BertTokenizer.from_pretrained(args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
#### Model ####
print("Creating model")
model = DaVinciNLVR(config=config, encoder=args.encoder, text_decoder=args.text_decoder, tokenizer=tokenizer)
model = model.to(device)
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
step_per_epoch = len(train_loader)
arg_sche['num_warmup_steps'] = arg_sche['warmup_epochs'] * step_per_epoch
arg_sche['num_training_steps'] = arg_sche['epochs'] * step_per_epoch
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
for key in list(state_dict.keys())[:]:
new_key = 'davinci.'+key
state_dict[new_key] = state_dict[key]
del state_dict[key]
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['davinci.visual_encoder.pos_embed'],model.davinci.visual_encoder)
state_dict['davinci.visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%args.checkpoint)
print(msg)
model_without_ddp = model
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
print("Start training")
start_time = time.time()
best = 0
best_epoch = 0
test_acc_dict = {}
max_acc = 0
for epoch in range(start_epoch, max_epoch):
if args.distributed:
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, train_loader, optimizer, tokenizer, epoch, warmup_epochs, device, lr_scheduler, config)
val_stats = evaluate(model, val_loader, tokenizer, device, config, info="Validation")
test_stats = evaluate(model, test_loader, tokenizer, device, config, info="Test")
test_acc_dict.update({epoch: test_stats['acc']})
max_acc = max(max_acc, float(test_stats['acc']))
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'val_{k}': v for k, v in val_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
}
save_obj = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'config': config,
'epoch': epoch,
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
best = float(val_stats['acc'])
best_epoch = epoch
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
print("All test acc: ", test_acc_dict)
print("Best acc: ", max_acc)
if utils.is_main_process():
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write("best epoch: %d"%best_epoch)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/NLVR.yaml')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--output_dir', default='output/NLVR')
parser.add_argument('--encoder', default='bert-base-uncased')
parser.add_argument('--text_decoder', default='bert-base-uncased')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
parser.add_argument('--override_cfg', default="", type=str, help="Use ; to separate keys")
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
if args.override_cfg != "":
override_cfg_str = args.override_cfg.replace(";", "\n").replace(":", ": ")
override_cfg = yaml.load(override_cfg_str, Loader=yaml.Loader)
for k, v in override_cfg.items():
if type(v) == dict:
for kk, vv in v.items():
config[k][kk] = vv
else:
config[k] = v
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config)
| 9,857 | 39.904564 | 136 | py |
DaVinci | DaVinci-main/glue.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning a 🤗 Transformers model for sequence classification on GLUE."""
import argparse
import json
import logging
import math
import os
import random
from pathlib import Path
import ruamel.yaml as yaml
import datasets
import torch
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from models.model_glue import DaVinciGLUE
from models.resnet import interpolate_pos_embed
from models.tokenization_bert import BertTokenizer
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from huggingface_hub import Repository
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils.versions import require_version
logger = get_logger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--task_name",
type=str,
default=None,
help="The name of the glue task to train on.",
choices=list(task_to_keys.keys()),
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--eval_steps",
type=int,
default=20000,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--ignore_mismatched_sizes",
action="store_true",
help="Whether or not to enable to load a pretrained model whose head dimensions are different.",
)
args = parser.parse_args()
print("args: ", args)
# Sanity checks
if args.task_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment
accelerator = (
Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator()
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.task_name is not None:
raw_datasets = datasets.load_dataset("glue", args.task_name)
else:
# Loading the dataset from local csv or json file.
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = (args.train_file if args.train_file is not None else args.validation_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if args.task_name is not None:
is_regression = args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = yaml.load(open('configs/GLUE.yaml', 'r'), Loader=yaml.Loader)
# config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
model = DaVinciGLUE(config=config, encoder='bert-base-uncased', tokenizer=tokenizer,num_labels=num_labels)
if args.model_name_or_path:
checkpoint = torch.load(args.model_name_or_path, map_location='cpu')
state_dict = checkpoint['model']
for key in list(state_dict.keys())[:]:
new_key = 'davinci.'+key
state_dict[new_key] = state_dict[key]
del state_dict[key]
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['davinci.visual_encoder.pos_embed'],model.davinci.visual_encoder)
state_dict['davinci.visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%args.model_name_or_path)
print(msg)
# Preprocessing the datasets
if args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
labels = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=False,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Figure out how many steps we should save the Accelerator states
if hasattr(args.checkpointing_steps, "isdigit"):
checkpointing_steps = args.checkpointing_steps
if args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
checkpointing_steps = None
# We need to initialize the trackers we use, and also store our configuration.
# We initialize the trackers only on main process because `accelerator.log`
# only logs on main process and we don't want empty logs/runs on other processes.
if args.with_tracking:
if accelerator.is_main_process:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("glue_no_trainer", experiment_config)
# Get the metric function
if args.task_name is not None:
metric = load_metric("glue", args.task_name)
else:
metric = load_metric("accuracy")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
else:
resume_step = int(training_difference.replace("step_", ""))
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for step, data in enumerate(train_dataloader):
# We need to skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == starting_epoch:
if resume_step is not None and step < resume_step:
completed_steps += 1
continue
loss = model(data,targets=data['labels'])
#loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= args.max_train_steps:
break
if step % args.eval_steps == 0 or step == len(train_dataloader) - 1:
real_step = step
model.eval()
samples_seen = 0
for step, data in enumerate(eval_dataloader):
with torch.no_grad():
preds = model(data,targets=data['labels'],train=False)
if not is_regression:
_, predictions = preds.max(1)
else:
predictions = preds.squeeze()
#predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()
predictions, references = accelerator.gather((predictions, data['labels']))
# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader) - 1:
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
logger.info(f"epoch {epoch}, step {real_step}: {eval_metric}")
if args.with_tracking:
accelerator.log(
{
"accuracy" if args.task_name is not None else "glue": eval_metric,
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
"step": completed_steps,
},
step=completed_steps,
)
if args.checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
save_obj = {
'model': unwrapped_model.state_dict(),
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
if args.task_name == "mnli":
# Final evaluation on mismatched validation set
eval_dataset = processed_datasets["validation_mismatched"]
eval_dataloader = DataLoader(
eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
eval_dataloader = accelerator.prepare(eval_dataloader)
model.eval()
for step, batch in enumerate(eval_dataloader):
outputs = model(batch,targets=batch['labels'],train=False)
_, predictions = outputs.max(1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metric = metric.compute()
logger.info(f"mnli-mm: {eval_metric}")
if __name__ == "__main__":
main() | 25,935 | 42.443886 | 136 | py |
DaVinci | DaVinci-main/utils.py | import numpy as np
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def global_avg(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f}".format(name, meter.global_avg)
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None, dataset_len=None, epoch_info=None):
i = 0
if not header:
header = ''
if not dataset_len:
dataset_len = len(iterable)
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(dataset_len))) + 'd'
_msg = [
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
_msg.append('max mem: {memory:.0f}')
_msg = self.delimiter.join(_msg)
MB = 1024.0 * 1024.0
iterable = iter(iterable)
train_steps = dataset_len
if epoch_info:
start_epoch, end_epoch = epoch_info
train_steps = (end_epoch - start_epoch) * dataset_len
for i in range(train_steps):
obj = next(iterable)
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if epoch_info:
header = int(i / dataset_len) + start_epoch
header = 'Train step: [{}]'.format(header)
log_msg = header + " " + _msg
if (i % dataset_len) % print_freq == 0 or i == dataset_len - 1:
eta_seconds = iter_time.global_avg * (dataset_len - i % dataset_len)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i % dataset_len, dataset_len, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i % dataset_len, dataset_len, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / dataset_len))
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def compute_acc(logits, label, reduction='mean'):
ret = (torch.argmax(logits, dim=1) == label).float()
if reduction == 'none':
return ret.detach()
elif reduction == 'mean':
return ret.mean().item()
def compute_n_params(model, return_str=True):
tot = 0
for p in model.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return '{:.1f}M'.format(tot / 1e6)
else:
return '{:.1f}K'.format(tot / 1e3)
else:
return tot
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0) | 8,166 | 29.136531 | 94 | py |
DaVinci | DaVinci-main/image_finetune.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
import argparse
import builtins
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from optim.lars import LARS
import math
import PIL
from timm.data import create_transform
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.data.mixup import Mixup
from timm.utils import accuracy as timm_accuracy
from models.model_imageft import DaVinciImageFT
import ruamel.yaml as yaml
from models.tokenization_bert import BertTokenizer
from models.resnet import interpolate_pos_embed
import sys
from pathlib import Path
DATASETS = {
"celeba": datasets.CelebA,
"cifar10": datasets.CIFAR10,
"cifar100": datasets.CIFAR100,
"emnist": datasets.EMNIST,
"fakedata": datasets.FakeData,
"fashionmnist": datasets.FashionMNIST,
"flickr8k": datasets.Flickr8k,
"flickr30k": datasets.Flickr30k,
"inaturalist": datasets.INaturalist,
"kmnist": datasets.KMNIST,
"lfwpeople": datasets.LFWPeople,
"lsun": datasets.LSUN,
"mnist": datasets.MNIST,
"omniglot": datasets.Omniglot,
"places365": datasets.Places365,
"qmnist": datasets.QMNIST,
"semeion": datasets.SEMEION,
"sbu": datasets.SBU,
"stl10": datasets.STL10,
"svhn": datasets.SVHN,
"usps": datasets.USPS,
}
root_dir = Path(__file__).parent.absolute()
model_dir = root_dir / 'models'
sys.path.insert(0, str(root_dir))
sys.path.insert(0, str(model_dir))
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--blr', type=float, default=0.1, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=10, metavar='N',
help='epochs to warmup LR')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='./pretrain_coco_vg_6490601_20220429-004728/model_state_epoch_38.th', type=str,
help='path to moco pretrained checkpoint')
parser.add_argument('--encoder', default='bert-base-uncased')
parser.add_argument('--text_decoder', default='bert-base-uncased')
parser.add_argument('--config', default='./configs/image_ft.yaml')
parser.add_argument('--override_cfg', default="", type=str, help="Use ; to separate keys")
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
best_acc1 = 0
def build_transform(is_train, args):
mean = IMAGENET_DEFAULT_MEAN
std = IMAGENET_DEFAULT_STD
# train transform
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation='bicubic',
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
return transform
# eval transform
t = []
if args.input_size <= 224:
crop_pct = 224 / 256
else:
crop_pct = 1.0
size = int(args.input_size / crop_pct)
t.append(
transforms.Resize(size, interpolation=PIL.Image.BICUBIC), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
# currently support the override of params at max depth 2
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
if args.override_cfg != "":
override_cfg_str = args.override_cfg.replace(";", "\n").replace(":", ": ")
override_cfg = yaml.load(override_cfg_str, Loader=yaml.Loader)
for k, v in override_cfg.items():
if type(v) == dict:
for kk, vv in v.items():
config[k][kk] = vv
else:
config[k] = v
args.blr = config['lr']
args.epochs = config['epochs']
args.batch_size = config['batch_size_train']
eff_batch_size = args.batch_size * 8 # 8GPUs
args.input_size = config['image_res']
args.lr = args.blr * eff_batch_size / 256
print("base lr: %6.6f" % (args.lr * 256 / eff_batch_size))
print("actual lr: %6.6f" % args.lr)
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args, config))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args, config)
def main_worker(gpu, ngpus_per_node, args, config):
global best_acc1
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
tokenizer = BertTokenizer.from_pretrained(args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
model = DaVinciImageFT(config=config, encoder=args.encoder, text_decoder=args.text_decoder, tokenizer=tokenizer)
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location='cpu')
state_dict = checkpoint['model']
for key in list(state_dict.keys())[:]:
new_key = 'davinci.'+key
state_dict[new_key] = state_dict[key]
del state_dict[key]
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['davinci.visual_encoder.pos_embed'],model.davinci.visual_encoder)
state_dict['davinci.visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict,strict=False)
print('loaded checkpoint from %s'%args.pretrained)
print(msg)
# assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy().cuda(args.gpu)
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda(args.gpu)
else:
criterion = torch.nn.CrossEntropyLoss().cuda(args.gpu)
print("criterion = %s" % str(criterion))
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
print("config['optimizer']", config['optimizer'])
if config['optimizer'] == 'lars':
optimizer = LARS(parameters, lr=args.lr, weight_decay=args.weight_decay)
elif config['optimizer'] == 'adamw':
optimizer = torch.optim.AdamW(parameters, lr=args.lr)
else:
optimizer = torch.optim.SGD(parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
print("optimizer = ", optimizer)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
data_path = config['root_dir'] + config['dataset']
traindir = os.path.join(data_path, 'train')
valdir = os.path.join(data_path, 'val')
if config['dataset'] == 'imagenet':
train_transform = build_transform(True, args)
train_dataset = datasets.ImageFolder(traindir, transform=train_transform)
val_transform = build_transform(False, args)
val_dataset = datasets.ImageFolder(valdir, transform=val_transform)
else:
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(config['image_res'], interpolation=3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.Resize(288, interpolation=3),
transforms.CenterCrop(config['image_res']),
transforms.ToTensor(),
normalize,
])
train_dataset = DATASETS[config['dataset']](
traindir, train=True, download=True, transform=train_transform)
val_dataset = DATASETS[config['dataset']](
valdir, train=False, download=True, transform=test_transform)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args, tokenizer)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args, tokenizer, mixup_fn)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args, tokenizer)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
print("best_acc1 = ", best_acc1)
def train(train_loader, model, criterion, optimizer, epoch, args, tokenizer, mixup_fn):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.8f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
lr_log = AverageMeter('lr', ':.8f')
progress = ProgressMeter(
len(train_loader),
[lr_log, batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
#Switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader): # images: [4096, 3, 224, 224], target: [4096]
# measure data loading time
data_time.update(time.time() - end)
# FROM MAE: we use a per iteration (instead of per epoch) lr scheduler
adjust_learning_rate(optimizer, i / len(train_loader) + epoch, args)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
if mixup_fn is not None:
images, target = mixup_fn(images, target)
# compute output
output = model(images, train=True) # output: [4096, 1000]
loss = criterion(output, target)
# measure accuracy and record loss
losses.update(loss.item(), images.size(0))
lr_log.update(optimizer.param_groups[0]["lr"], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args, tokenizer):
eval_criterion = torch.nn.CrossEntropyLoss()
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader): # images [512, 3, 256, 256] target [512]
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images, train=False)
loss = eval_criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def sanity_check(state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
# state_dict_pre = checkpoint['state_dict']
state_dict_pre = checkpoint['model']
for k in list(state_dict.keys()):
# only ignore fc layer
if 'fc.weight' in k or 'fc.bias' in k:
continue
# name in pretrained model
k_pre = k[len('module.davinci.'):]
if (state_dict[k].cpu() != state_dict_pre[k_pre]).all():
print(f"{k} is changed in linear classifier training.")
print("=> sanity check passed.")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate with half-cycle cosine after warmup"""
if epoch < args.warmup_epochs:
lr = args.lr * epoch / args.warmup_epochs
else:
lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
(1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 27,204 | 39.665172 | 140 | py |
DaVinci | DaVinci-main/image_sampling.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
import argparse
import os
import sys
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_image_sampling import DaVinci
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer
from torch.optim import Optimizer
from torch import optim
from torch.distributed.elastic.multiprocessing.errors import record
from util.checkpointer import Checkpointer
from util.hdfs_io import hmkdir, hcopy
from accelerators.apex_ddp_accelerator import ApexDDPAccelerator
# for dall_e import module, we need to add root path
root_dir = Path(__file__).parent.absolute()
model_dir = root_dir / 'models'
sys.path.insert(0, str(root_dir))
sys.path.insert(0, str(model_dir))
MAX_TOKENS = 25
@torch.no_grad()
def train(model, pair_data_loader, optimizer, epoch_info, device, scheduler, config, accelerator, checkpointer, tokenizer):
# eval - image generation
model.eval()
start_epoch, _ = epoch_info
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(
window_size=50, fmt='{value:.8f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(
window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(start_epoch)
print_freq = 50
world_size = int(os.environ.get('WORLD_SIZE', 1))
step_per_epoch = math.ceil(
config['train_dataset_size'] / (config['batch_size']*world_size))
current_step = start_epoch * step_per_epoch
global_step = current_step + 1
for i, (image, visual_token_image, org_texts, fname) in enumerate(metric_logger.log_every(pair_data_loader, print_freq, header, step_per_epoch, epoch_info)):
current_epoch = int(global_step/step_per_epoch)
image = image.to(device, non_blocking=True)
visual_token_image = visual_token_image.to(device,non_blocking=True)
prefix_image = None
prefix_image_small = None
suffix_image_small = visual_token_image
org_texts = org_texts * config["num_images"]
text_full = tokenizer(org_texts, padding='max_length', truncation=True, max_length=MAX_TOKENS, return_tensors="pt").to(device)
loss, logits = model(image, context=None, gen_text=None, text_full=text_full, prefix_image=prefix_image, suffix_image=suffix_image_small,
prefix_image_small=prefix_image_small, visual_token_image=None, use_dalle=True, train=True, decode=False, raw_caption=org_texts, captionindex=i)
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
global_step += 1
train_stats = {k: "{:.3f}".format(
meter.global_avg) for k, meter in metric_logger.meters.items()}
if global_step % step_per_epoch == 0 or global_step % config['checkpoint_frequent'] == 0:
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': current_epoch,
}
with open("./log.txt", "a") as f:
f.write(json.dumps(log_stats) + "\n")
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
@record
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
if utils.is_main_process():
print(f"### val_file: {config['val_file']}")
sys.stdout.flush()
yaml.dump(config, open('./config.yaml', 'w'))
hcopy('./config.yaml', args.output_dir)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
#### Dataset ####
print("Creating dataset")
pair_dataset = [create_dataset('dalle_gen', config)]
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(
pair_dataset, [False], num_tasks, global_rank)
else:
samplers = [None]
pair_data_loader = create_loader(pair_dataset, samplers, batch_size=[
config['batch_size']], num_workers=[4], is_trains=[False], collate_fns=[None])[0]
tokenizer = BertTokenizer.from_pretrained(
args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
#### Model ####
print("Creating model")
model = DaVinci(config=config, encoder=args.encoder, text_decoder=args.text_decoder,
tokenizer=tokenizer, init_deit=True, init_dalle=True, device=device)
model = model.to(device)
print("DAVINCI have {} paramerters in total".format(
sum(x.numel() for x in model.parameters())))
world_size = int(os.environ.get('WORLD_SIZE', 1))
rank = int(os.environ.get('RANK', 0))
local_rank = int(os.environ.get('LOCAL_RANK', 0))
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
update_steps_per_epoch = math.ceil(config['train_dataset_size'] / (
config['batch_size']*world_size) / int(config['accelerator']['GRAD_ACCUMULATE_STEPS']))
arg_sche['num_warmup_steps'] = arg_sche['warmup_epochs'] * \
update_steps_per_epoch
arg_sche['num_training_steps'] = arg_sche['epochs'] * \
update_steps_per_epoch
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
arg_acc = utils.AttrDict(config['accelerator'])
accelerator = ApexDDPAccelerator(arg_acc, logger=None)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
start_epoch = checkpoint['epoch']+1
model.load_state_dict(state_dict, strict=False) # for clip model
print('load checkpoint from %s' % args.checkpoint)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu])
model_without_ddp = model.module
checkpointer = Checkpointer(args.output_dir)
print("Start training")
start_time = time.time()
epoch_info = (start_epoch, max_epoch)
train(model, pair_data_loader, optimizer, epoch_info, device, lr_scheduler, config,
accelerator, checkpointer, tokenizer)
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if utils.is_main_process():
hcopy('./log.txt', args.output_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/Pretrain_davinci.yaml')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--resume', default=False, type=bool)
parser.add_argument('--output_dir', default='Pretrain/')
parser.add_argument('--encoder', default='bert-base-uncased')
parser.add_argument('--text_decoder', default='bert-base-uncased')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
parser.add_argument('--override_cfg', default="",
type=str, help="Use ; to separate keys")
args = parser.parse_args()
# currently support the override of params at max depth 2
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
if args.override_cfg != "":
override_cfg_str = args.override_cfg.replace(
";", "\n").replace(":", ": ")
override_cfg = yaml.load(override_cfg_str, Loader=yaml.Loader)
for k, v in override_cfg.items():
if type(v) == dict:
for kk, vv in v.items():
config[k][kk] = vv
else:
config[k] = v
if not args.output_dir.startswith('hdfs'):
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
hmkdir(args.output_dir)
print("args.output_dir: ", args.output_dir)
main(args, config)
| 9,275 | 38.305085 | 172 | py |
DaVinci | DaVinci-main/gen_coco.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
import argparse
import os, sys
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.davinci_pretrain import DaVinci
from models.resnet import interpolate_pos_embed
from models.tokenization_bert import BertTokenizer
from dataset.utils import save_result
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer
from apex import amp
root_dir = Path(__file__).parent.absolute() # for dall_e import module, we need to add root path
model_dir = root_dir / 'models'
sys.path.insert(0, str(root_dir))
sys.path.insert(0, str(model_dir))
MAX_TOKENS = 25
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_epochs, device, scheduler, config):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.8f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
if args.distributed:
data_loader.sampler.set_epoch(epoch)
for i, (images, texts) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
optimizer.zero_grad()
images = images.to(device,non_blocking=True)
text_input = tokenizer([""] * images.size(0), return_tensors="pt").to(device)
text_target = tokenizer(texts, padding='longest', truncation=True, max_length=MAX_TOKENS, return_tensors="pt").to(device)
loss, logits = model(images, text_input, text_target, train=True, decode=False)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
optimizer.step()
scheduler.step()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Train Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, data_loader, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Evaluation:'
print_freq = 50
result = []
for images, texts, fnames in metric_logger.log_every(data_loader, print_freq, header):
images = images.to(device,non_blocking=True)
text_input = tokenizer([""] * images.size(0), return_tensors="pt").to(device) # text_input["input_ids"]: [12, 1]
text_target = tokenizer(texts, padding='longest', truncation=True, max_length=MAX_TOKENS, return_tensors="pt").to(device) # text_target["input_ids"]: [12, 17]
loss, logits = model(images, text_input, text_target, train=True, decode=False)
decoded = model(images, text_input, text_target, train=False, decode=True) # decoded: [12, 1, 20] [[[101, 1037, 2177, xxxxx, 0, 0, 0]]]
metric_logger.update(loss=loss.item())
decoded_seqs = tokenizer.batch_decode(decoded, skip_special_tokens=True)
for fname, seq in zip(fnames, decoded_seqs):
if "nocaps" in config["test_file"]:
ret = {"image_id": int(fname), "caption": seq[len(config['prompt']):].strip()}
else:
ret = {"images": fname, "generated": seq[len(config['prompt']):].strip(), "beam_id": 0}
result.append(ret)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Valid Averaged stats:", metric_logger.global_avg())
return result
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
warmup_epochs = config['schedular']['warmup_epochs']
#### Dataset ####
print("Creating dataset")
datasets = create_dataset('gen', config)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False, False], num_tasks, global_rank)
else:
samplers = [None, None, None]
train_loader, val_loader, test_loader = create_loader(datasets,samplers,
batch_size=[config['batch_size_train'], config['batch_size_test'], config['batch_size_test']],
num_workers=[4, 4, 4],
is_trains=[True, False, False],
collate_fns=[None, None, None])
tokenizer = BertTokenizer.from_pretrained(args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
#### Model ####
print("Creating model")
model = DaVinci(config=config, encoder=args.encoder, text_decoder=args.text_decoder, tokenizer=tokenizer, init_deit=True, init_dalle=True)
model = model.to(device)
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
step_per_epoch = len(train_loader)
arg_sche['num_warmup_steps'] = arg_sche['warmup_epochs'] * step_per_epoch
arg_sche['num_training_steps'] = arg_sche['epochs'] * step_per_epoch
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%args.checkpoint)
print(msg)
model_without_ddp = model
model, optimizer = amp.initialize(model, optimizer, opt_level="O1") # 这里是“欧一”,不是“零一”
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
print("Start training")
start_time = time.time()
# save the results of only_pt
gen_result = evaluate(model, val_loader, tokenizer, device, config)
result_file = save_result(gen_result, args.output_dir, 'gen_val_result_epoch-1')
for epoch in range(start_epoch, max_epoch):
train_stats = train(model, train_loader, optimizer, tokenizer, epoch, warmup_epochs, device, lr_scheduler, config)
gen_result = evaluate(model, val_loader, tokenizer, device, config)
result_file = save_result(gen_result, args.output_dir, 'gen_val_result_epoch%d'%epoch)
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
}
save_obj = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'config': config,
'epoch': epoch,
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
dist.barrier()
gen_result = evaluate(model, test_loader, tokenizer, device, config)
result_file = save_result(gen_result, args.output_dir, 'gen_test_result_epoch%d'%epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Total time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/gen_coco.yaml')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--resume', default=False, type=bool)
parser.add_argument('--output_dir', default='gen_coco/')
parser.add_argument('--encoder', default='bert-base-uncased')
parser.add_argument('--text_decoder', default='bert-base-uncased')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
parser.add_argument('--override_cfg', default="", type=str, help="Use ; to separate keys")
args = parser.parse_args()
# currently support the override of params at max depth 2
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
if args.override_cfg != "":
override_cfg_str = args.override_cfg.replace(";", "\n").replace(":", ": ")
override_cfg = yaml.load(override_cfg_str, Loader=yaml.Loader)
for k, v in override_cfg.items():
if type(v) == dict:
for kk, vv in v.items():
config[k][kk] = vv
else:
config[k] = v
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config) | 10,773 | 41.25098 | 166 | py |
DaVinci | DaVinci-main/Pretrain.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
import argparse
import os
import sys
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import math
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.davinci_pretrain import DaVinci
from models.resnet import interpolate_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset
from scheduler import create_scheduler
from optim import create_optimizer
from torch.distributed.elastic.multiprocessing.errors import record
from util.checkpointer import Checkpointer
from util.hdfs_io import hmkdir, hcopy
from accelerators.apex_ddp_accelerator import ApexDDPAccelerator
root_dir = Path(__file__).parent.absolute()
model_dir = root_dir / 'models'
sys.path.insert(0, str(root_dir))
sys.path.insert(0, str(model_dir))
MAX_TOKENS = 30
def train(model, pair_data_loader, c4_data_loader, optimizer, epoch_info, device, scheduler, config, accelerator, checkpointer, tokenizer):
model.train()
start_epoch, _ = epoch_info
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.8f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_pair', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_image_generation', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_c4', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_mim', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
header = 'Train Epoch: [{}]'.format(start_epoch)
print_freq = 50
accelerator_gradient_accumulate_steps = int(config['accelerator']['GRAD_ACCUMULATE_STEPS'])
accelerator_clip_grad_norm = float(config['accelerator']['CLIP_GRAD_NORM'])
world_size = int(os.environ.get('WORLD_SIZE', 1))
step_per_epoch = math.ceil(config['train_dataset_size'] / (config['batch_size']*world_size))
current_step = start_epoch * step_per_epoch
global_step = current_step + 1
for i, ((image, visual_token_image, org_texts), (null_image, c4_texts)) in enumerate(metric_logger.log_every(zip(pair_data_loader, c4_data_loader), print_freq, header, step_per_epoch, epoch_info)):
current_epoch = int(global_step/step_per_epoch)
# -----------image-text-pair-------------
image = torch.stack(image)
image = image.to(device,non_blocking=True)
visual_token_image = torch.stack(visual_token_image)
visual_token_image = visual_token_image.to(device,non_blocking=True)
if config["prefix_image"] == "static" or current_epoch > config["max_prefix_image_epoch"]:
prefix_image_length = 0
else:
prefix_image_length = 16 * np.random.randint(0, config["image_res"] // 16 - 1)
if "loss_mim_alpha" in config and config["loss_mim_alpha"] > 0:
prefix_image_length = 16 * np.random.randint(1, config["image_res"] // 16 - 1)
if prefix_image_length == 0:
prefix_image = None
else:
prefix_image = image[:, :, :prefix_image_length, :]
if config["dalle_goal"] == "mask":
suffix_image = visual_token_image[:, :, prefix_image_length:, :]
elif config["dalle_goal"] == "full":
suffix_image = visual_token_image
pre_texts, gen_texts = [], []
for i, text in enumerate(org_texts):
wds = text.split(" ")
pre_len = min(np.random.randint(0, len(wds)), MAX_TOKENS)
pre_texts.append(" ".join(wds[:pre_len]))
gen_texts.append(" ".join(wds[pre_len:]))
text_input = tokenizer(pre_texts, padding='longest', truncation=True, max_length=MAX_TOKENS, return_tensors="pt").to(device)
text_target = tokenizer(gen_texts, padding='longest', truncation=True, max_length=MAX_TOKENS, return_tensors="pt").to(device)
text_full = tokenizer(org_texts, padding='longest', truncation=True, max_length=MAX_TOKENS, return_tensors="pt").to(device)
loss_pair, loss_image_generation, loss_mim, logits = model(image, text_input, text_target, text_full=text_full, prefix_image=prefix_image, suffix_image=suffix_image, use_dalle=True, train=True, decode=False)
# -----------c4-text-only-------------
pre_texts, gen_texts = [], []
for text in c4_texts:
wds = text.split(" ")
pre_len = min(np.random.randint(0, len(wds)), config['enc_max_words'])
pre_texts.append(" ".join(wds[:pre_len]))
gen_texts.append(" ".join(wds[pre_len:]))
text_input = tokenizer(pre_texts, padding='longest', truncation=True, max_length=config['enc_max_tokens'], return_tensors="pt").to(device)
text_target = tokenizer(gen_texts, padding='longest', truncation=True, max_length=config['dec_max_tokens'], return_tensors="pt").to(device)
loss_c4, logits = model(None, text_input, text_target, train=True, decode=False)
loss = config['loss_pair_alpha'] * loss_pair + config['loss_image_generation_alpha'] * loss_image_generation + config['c4_alpha'] * loss_c4 + config['loss_mim_alpha'] * loss_mim
if accelerator_gradient_accumulate_steps > 1:
loss = loss / accelerator_gradient_accumulate_steps
# Backward
accelerator.backward_step(loss, optimizer)
# Optimizer
if global_step % accelerator_gradient_accumulate_steps == 0:
if accelerator_clip_grad_norm > 0:
accelerator.optimizer_step(optimizer, model, accelerator_clip_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
metric_logger.update(loss=loss.item())
metric_logger.update(loss_pair=loss_pair.item())
metric_logger.update(loss_image_generation=loss_image_generation.item())
metric_logger.update(loss_c4=loss_c4.item())
metric_logger.update(loss_mim=loss_mim.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
global_step += 1
train_stats = {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
if global_step % step_per_epoch == 0 or global_step % config['checkpoint_frequent'] == 0:
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': current_epoch,
}
model_without_ddp = model
if hasattr(model, 'module'):
model_without_ddp = model.module
best_so_for = False
save_obj = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': scheduler.state_dict(),
'config': config,
'epoch': current_epoch,
}
checkpointer.save_checkpoint(model_state=save_obj,
epoch=current_epoch,
training_states=optimizer.state_dict(),
is_best_so_far=best_so_for)
with open("./log.txt", "a") as f:
f.write(json.dumps(log_stats) + "\n")
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
@record
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
config['train_file'] = ','.join(config['train_file'])
config['c4_train_file'] = ','.join(config['c4_train_file'])
if utils.is_main_process():
print(f"### train_file: {config['train_file']}")
print(f"### c4_train_file: {config['c4_train_file']}")
sys.stdout.flush()
yaml.dump(config, open('./config.yaml', 'w'))
hcopy('./config.yaml', args.output_dir)
# fix the seed for reproducibility
if 'seed' in config:
args.seed = config['seed']
print("args.seed", args.seed)
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
#### Dataset ####
print("Creating dataset")
pair_dataset, c4_dataset = create_dataset('pretrain', config)
pair_data_loader = torch.utils.data.DataLoader(pair_dataset, batch_size=config['batch_size'],
num_workers=4,
pin_memory=True,
drop_last=False,
collate_fn=pair_dataset.collate_fn
)
c4_data_loader = torch.utils.data.DataLoader(c4_dataset, batch_size=config['batch_size_c4'],
num_workers=4,
pin_memory=True,
drop_last=False,
collate_fn=c4_dataset.collate_fn
)
tokenizer = BertTokenizer.from_pretrained(args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
#### Model ####
print("Creating model")
model = DaVinci(config=config, encoder=args.encoder, text_decoder=args.text_decoder, tokenizer=tokenizer, init_deit=True, init_dalle=True, device=device)
model = model.to(device)
print("DAVINCI have {} paramerters in total".format(sum(x.numel() for x in model.parameters())))
world_size = int(os.environ.get('WORLD_SIZE', 1))
rank = int(os.environ.get('RANK', 0))
local_rank = int(os.environ.get('LOCAL_RANK', 0))
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
update_steps_per_epoch = math.ceil(config['train_dataset_size'] / (config['batch_size']*world_size) / int(config['accelerator']['GRAD_ACCUMULATE_STEPS']))
arg_sche['num_warmup_steps'] = arg_sche['warmup_epochs'] * update_steps_per_epoch
arg_sche['num_training_steps'] = arg_sche['epochs'] * update_steps_per_epoch
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
arg_acc = utils.AttrDict(config['accelerator'])
accelerator = ApexDDPAccelerator(arg_acc, logger=None)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
if args.resume:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = checkpoint['epoch']+1
else:
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict)
print('load checkpoint from %s'%args.checkpoint)
print(msg)
model, optimizer, lr_scheduler = accelerator.set_up(model, optimizer, lr_scheduler, local_rank, world_size, rank)
checkpointer = Checkpointer(args.output_dir)
print("Start training")
start_time = time.time()
epoch_info = (start_epoch, max_epoch)
train(model, pair_data_loader, c4_data_loader, optimizer, epoch_info, device, lr_scheduler, config,
accelerator, checkpointer, tokenizer)
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if utils.is_main_process():
hcopy('./log.txt', args.output_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/Pretrain_davinci.yaml')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--resume', default=False, type=bool)
parser.add_argument('--output_dir', default='Pretrain/')
parser.add_argument('--encoder', default='bert-base-uncased')
parser.add_argument('--text_decoder', default='bert-base-uncased')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
parser.add_argument('--override_cfg', default="", type=str, help="Use ; to separate keys")
args = parser.parse_args()
# currently support the override of params at max depth 2
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
if args.override_cfg != "":
override_cfg_str = args.override_cfg.replace(";", "\n").replace(":", ": ")
override_cfg = yaml.load(override_cfg_str, Loader=yaml.Loader)
for k, v in override_cfg.items():
if type(v) == dict:
for kk, vv in v.items():
config[k][kk] = vv
else:
config[k] = v
if not args.output_dir.startswith('hdfs'):
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
hmkdir(args.output_dir)
print("args.output_dir: ", args.output_dir)
main(args, config) | 14,283 | 45.832787 | 218 | py |
DaVinci | DaVinci-main/image_linprobe.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
#!/usr/bin/env python
import argparse
import builtins
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from optim.lars import LARS
import math
# from models.model_linearprobe import DaVinciLinearProbe
from models.model_linearprobe import DaVinciLinearProbe
import ruamel.yaml as yaml
from models.tokenization_bert import BertTokenizer
from models.resnet import interpolate_pos_embed
import sys
from pathlib import Path
import utils
from optim import create_optimizer
from scheduler import create_scheduler
from PIL import Image
DATASETS = {
"celeba": datasets.CelebA,
"cifar10": datasets.CIFAR10,
"cifar100": datasets.CIFAR100,
"emnist": datasets.EMNIST,
"fakedata": datasets.FakeData,
"fashionmnist": datasets.FashionMNIST,
"flickr8k": datasets.Flickr8k,
"flickr30k": datasets.Flickr30k,
"inaturalist": datasets.INaturalist,
"kmnist": datasets.KMNIST,
"lfwpeople": datasets.LFWPeople,
"lsun": datasets.LSUN,
"mnist": datasets.MNIST,
"omniglot": datasets.Omniglot,
"places365": datasets.Places365,
"qmnist": datasets.QMNIST,
"semeion": datasets.SEMEION,
"sbu": datasets.SBU,
"stl10": datasets.STL10,
"svhn": datasets.SVHN,
"usps": datasets.USPS,
#----below are only supported by torch1.11 + torchvision0.12
"sun397": datasets.SUN397,
"country211": datasets.Country211,
"dtd": datasets.DTD,
"caltech101": datasets.Caltech101,
"caltech256": datasets.Caltech256,
"stanfordcars": datasets.StanfordCars,
"renderedsst2": datasets.RenderedSST2,
"pcam": datasets.PCAM,
"oxfordiiitpet": datasets.OxfordIIITPet,
"flowers102": datasets.Flowers102,
"food101": datasets.Food101,
"gtsrb": datasets.GTSRB,
"fer2013": datasets.FER2013,
"fgvcaircraft": datasets.FGVCAircraft,
"eurosat": datasets.EuroSAT,
"kitti": datasets.Kitti,
}
dataset2nlabels = {
'imagenet': 1000,
'food101': 101,
'cifar10': 10,
'cifar100': 100,
'stanfordcars': 196,
'fgvcaircraft': 102,
'dtd': 47,
'oxfordiiitpet': 37,
'flowers102': 103, # flowers 1 - 102
'mnist': 10,
'stl10': 10,
# 'gtsrb': 43, # data unavailable
# 'kitti': unclear structure
'country211': 211,
}
root_dir = Path(__file__).parent.absolute()
model_dir = root_dir / 'models'
sys.path.insert(0, str(root_dir))
sys.path.insert(0, str(model_dir))
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# parser.add_argument('data', metavar='DIR',
# help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
# parser.add_argument('--epochs', default=100, type=int, metavar='N',
# help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--blr', type=float, default=0.1, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=10, metavar='N',
help='epochs to warmup LR')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='./pretrain_coco_vg_6490601_20220429-004728/model_state_epoch_38.th', type=str,
help='path to moco pretrained checkpoint')
parser.add_argument('--encoder', default='bert-base-uncased')
parser.add_argument('--text_decoder', default='bert-base-uncased')
parser.add_argument('--config', default='./configs/linear_probe.yaml')
parser.add_argument('--override_cfg', default="", type=str, help="Use ; to separate keys")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
# currently support the override of params at max depth 2
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
if args.override_cfg != "":
override_cfg_str = args.override_cfg.replace(";", "\n").replace(":", ": ")
override_cfg = yaml.load(override_cfg_str, Loader=yaml.Loader)
for k, v in override_cfg.items():
if type(v) == dict:
for kk, vv in v.items():
config[k][kk] = vv
else:
config[k] = v
args.epochs = config['schedular']['epochs']
args.batch_size = config['batch_size_train']
print(f"actual lr: {config['optimizer']['lr']}")
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args, config))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args, config)
def main_worker(gpu, ngpus_per_node, args, config):
global best_acc1
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
# print("=> creating model '{}'".format(args.arch))
# model = models.__dict__[args.arch]()
tokenizer = BertTokenizer.from_pretrained(args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
N_LABELS = dataset2nlabels[config['dataset']]
model = DaVinciLinearProbe(config=config, encoder=args.encoder, text_decoder=args.text_decoder, tokenizer=tokenizer, n_labels=N_LABELS)
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
# init the fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location='cpu')
state_dict = checkpoint['model']
# state_dict = checkpoint['state_dict']
for key in list(state_dict.keys())[:]:
new_key = 'davinci.'+key
state_dict[new_key] = state_dict[key]
del state_dict[key]
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['davinci.visual_encoder.pos_embed'],model.davinci.visual_encoder)
state_dict['davinci.visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict,strict=False)
print('loaded checkpoint from %s'%args.pretrained)
print(msg)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
model.fc = torch.nn.Sequential(torch.nn.BatchNorm1d(model.fc.in_features, affine=False, eps=1e-6), model.fc)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
# args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == 2 # fc.weight, fc.bias
print("config['optimizer']:", config['optimizer'])
print("args.world_size: ", args.world_size)
print("args: ", args)
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
update_steps_per_epoch = math.ceil(config['train_dataset_size'] / (config['batch_size_train']*args.world_size) / int(config['accelerator']['GRAD_ACCUMULATE_STEPS']))
arg_sche['num_warmup_steps'] = arg_sche['warmup_epochs'] * update_steps_per_epoch
arg_sche['num_training_steps'] = arg_sche['epochs'] * update_steps_per_epoch
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
max_epoch = config['schedular']['epochs']
warmup_steps = config['schedular']['warmup_epochs']
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
# scheduler.load_state_dict(checkpoint['scheduler'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
data_path = config['root_dir'] + config['dataset']
traindir = os.path.join(data_path, 'train')
valdir = os.path.join(data_path, 'val')
if config['dataset'] == 'imagenet':
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
else:
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
if config['dataset'] == 'mnist':
train_transform = transforms.Compose([
transforms.Resize(config['image_res'], interpolation=3),
transforms.Grayscale(3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.Resize(config['image_res'], interpolation=3),
transforms.Grayscale(3),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.Resize((config['image_res'],config['image_res']),interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.Resize((config['image_res'],config['image_res']),interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
if config['dataset'] == 'imagenet':
train_dataset = datasets.ImageFolder(traindir, train_transform)
val_dataset = datasets.ImageFolder(valdir, test_transform)
else:
if config['dataset'] in ['mnist', 'cifar10', 'cifar100', 'kitti']:
train_dataset = DATASETS[config['dataset']](
traindir, train=True, download=True, transform=train_transform)
val_dataset = DATASETS[config['dataset']](
valdir, train=False, download=True, transform=test_transform)
elif config['dataset'] in ['dtd', 'fgvcaircraft', 'food101', 'stanfordcars']:
train_dataset = DATASETS[config['dataset']](
traindir, split='train', download=True, transform=train_transform)
val_dataset = DATASETS[config['dataset']](
valdir, split='test', download=True, transform=test_transform)
elif config['dataset'] in ['oxfordiiitpet']:
train_dataset = DATASETS[config['dataset']](
traindir, split='trainval', download=True, transform=train_transform)
val_dataset = DATASETS[config['dataset']](
valdir, split='test', download=True, transform=test_transform)
else:
train_dataset = DATASETS[config['dataset']](
traindir, split='train', download=True, transform=train_transform)
val_dataset = DATASETS[config['dataset']](
valdir, split='test', download=True, transform=test_transform)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args, tokenizer)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, lr_scheduler, epoch, args, tokenizer)
# scheduler.step()
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args, tokenizer)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
# 'scheduler': scheduler.state_dict(),
}, is_best)
print("best_acc1 = ", best_acc1)
def train(train_loader, model, criterion, optimizer, scheduler, epoch, args, tokenizer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.8f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
lr_log = AverageMeter('lr', ':.8f')
progress = ProgressMeter(
len(train_loader),
[lr_log, batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
"""
Switch to eval mode:
Under the protocol of linear classification on frozen features/models,
it is not legitimate to change any part of the pre-trained model.
BatchNorm in train mode may revise running mean/std (even if it receives
no gradient), which are part of the model parameters too.
"""
model.eval()
end = time.time()
for i, (images, target) in enumerate(train_loader): # images: [4096, 3, 224, 224], target: [4096]
# measure data loading time
data_time.update(time.time() - end)
# FROM MAE: we use a per iteration (instead of per epoch) lr scheduler
# adjust_learning_rate(optimizer, i / len(train_loader) + epoch, args)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images, train=True) # output: [4096, 1000]
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
lr_log.update(optimizer.param_groups[0]["lr"], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args, tokenizer):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images, train=False)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 25,791 | 39.489796 | 169 | py |
DaVinci | DaVinci-main/VQA.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
import argparse
import os, sys
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_vqa import DaVinciVQA
from models.resnet import interpolate_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, vqa_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer
from apex import amp
root_dir = Path(__file__).parent.absolute()
model_dir = root_dir / 'models'
sys.path.insert(0, str(root_dir))
sys.path.insert(0, str(model_dir))
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_epochs, device, scheduler, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.8f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
for i,(image, question, answer, weights, question_id, n) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image, weights = image.to(device,non_blocking=True), weights.to(device,non_blocking=True)
question_input = tokenizer(question, padding='longest', truncation=True, max_length=25, return_tensors="pt").to(device)
answer_input = tokenizer(answer, padding='longest', return_tensors="pt").to(device)
loss = model(image, question_input, answer_input, train=True, k=n, weights=weights)
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
optimizer.step()
scheduler.step()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Train Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def test(model, data_loader, tokenizer, device, config) :
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Generate VQA test result:'
print_freq = 50
result = []
answer_list = [answer+config['eos'] for answer in data_loader.dataset.answer_list]
answer_input = tokenizer(answer_list, padding='longest', return_tensors='pt').to(device)
for n, (image, question, question_id) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device,non_blocking=True)
question_input = tokenizer(question, padding='longest', return_tensors="pt").to(device)
topk_ids, topk_probs = model(image, question_input, answer_input, train=False, k=config['k_test'])
for ques_id, topk_id, topk_prob in zip(question_id, topk_ids, topk_probs):
ques_id = int(ques_id.item())
_, pred = topk_prob.max(dim=0)
result.append({"question_id":ques_id, "answer":data_loader.dataset.answer_list[topk_id[pred]]})
return result
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
warmup_epochs = config['schedular']['warmup_epochs']
#### Dataset ####
print("Creating vqa datasets")
datasets = create_dataset('vqa', config)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False, False], num_tasks, global_rank)
else:
samplers = [None, None, None]
train_loader, val_loader, test_loader = create_loader(datasets,samplers,
batch_size=[config['batch_size_train'], config['batch_size_test'], config['batch_size_test']],
num_workers=[4,4,4],is_trains=[True, False, False],
collate_fns=[vqa_collate_fn,vqa_collate_fn,None])
tokenizer = BertTokenizer.from_pretrained(args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
#### Model ####
print("Creating model")
model = DaVinciVQA(config=config, encoder=args.encoder, text_decoder=args.text_decoder, tokenizer=tokenizer)
model = model.to(device)
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
step_per_epoch = len(train_loader)
arg_sche['num_warmup_steps'] = arg_sche['warmup_epochs'] * step_per_epoch
arg_sche['num_training_steps'] = arg_sche['epochs'] * step_per_epoch
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
if args.checkpoint:
if args.evaluate:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
msg = model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%args.checkpoint)
print(msg)
else:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
for key in list(state_dict.keys())[:]:
new_key = 'davinci.'+key
state_dict[new_key] = state_dict[key]
del state_dict[key]
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['davinci.visual_encoder.pos_embed'],model.davinci.visual_encoder)
state_dict['davinci.visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%args.checkpoint)
print(msg)
model_without_ddp = model
model, optimizer = amp.initialize(model, optimizer, opt_level="O1") # 这里是“欧一”,不是“零一”
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
print("Start training")
start_time = time.time()
for epoch in range(start_epoch, max_epoch):
if not args.evaluate:
if args.distributed:
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, train_loader, optimizer, tokenizer, epoch, warmup_epochs, device, lr_scheduler, config)
vqa_test_result = test(model, test_loader, tokenizer, device, config)
result_file = save_result(vqa_test_result, args.result_dir, 'vqa_test_result_epoch%d'%epoch)
if args.evaluate:
break
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
}
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
save_obj = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'config': config,
'epoch': epoch,
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/VQA.yaml')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--output_dir', default='output/vqa')
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('--encoder', default='bert-base-uncased')
parser.add_argument('--text_decoder', default='bert-base-uncased')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
parser.add_argument('--override_cfg', default="", type=str, help="Use ; to separate keys")
args = parser.parse_args()
# currently support the override of params at max depth 2
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
if args.override_cfg != "":
override_cfg_str = args.override_cfg.replace(";", "\n").replace(":", ": ")
override_cfg = yaml.load(override_cfg_str, Loader=yaml.Loader)
for k, v in override_cfg.items():
if type(v) == dict:
for kk, vv in v.items():
config[k][kk] = vv
else:
config[k] = v
args.result_dir = os.path.join(args.output_dir, 'result')
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
Path(args.result_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config) | 10,600 | 40.901186 | 140 | py |
DaVinci | DaVinci-main/scheduler/plateau_lr.py | """ Plateau Scheduler
Adapts PyTorch plateau scheduler and allows application of noise, warmup.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from .scheduler import Scheduler
class PlateauLRScheduler(Scheduler):
"""Decay the LR by a factor every time the validation loss plateaus."""
def __init__(self,
optimizer,
decay_rate=0.1,
patience_t=10,
verbose=True,
threshold=1e-4,
cooldown_t=0,
warmup_t=0,
warmup_lr_init=0,
lr_min=0,
mode='max',
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize=True,
):
super().__init__(optimizer, 'lr', initialize=initialize)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
patience=patience_t,
factor=decay_rate,
verbose=verbose,
threshold=threshold,
cooldown=cooldown_t,
mode=mode,
min_lr=lr_min
)
self.noise_range = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
self.restore_lr = None
def state_dict(self):
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
# override the base class step fn completely
def step(self, epoch, metric=None):
if epoch <= self.warmup_t:
lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps]
super().update_groups(lrs)
else:
if self.restore_lr is not None:
# restore actual LR from before our last noise perturbation before stepping base
for i, param_group in enumerate(self.optimizer.param_groups):
param_group['lr'] = self.restore_lr[i]
self.restore_lr = None
self.lr_scheduler.step(metric, epoch) # step the base scheduler
if self.noise_range is not None:
if isinstance(self.noise_range, (list, tuple)):
apply_noise = self.noise_range[0] <= epoch < self.noise_range[1]
else:
apply_noise = epoch >= self.noise_range
if apply_noise:
self._apply_noise(epoch)
def _apply_noise(self, epoch):
g = torch.Generator()
g.manual_seed(self.noise_seed + epoch)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
# apply the noise on top of previous LR, cache the old value so we can restore for normal
# stepping of base scheduler
restore_lr = []
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
restore_lr.append(old_lr)
new_lr = old_lr + old_lr * noise
param_group['lr'] = new_lr
self.restore_lr = restore_lr
| 4,140 | 35.324561 | 97 | py |
DaVinci | DaVinci-main/scheduler/tanh_lr.py | """ TanH Scheduler
TanH schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class TanhLRScheduler(Scheduler):
"""
Hyberbolic-Tangent decay with restarts.
This is described in the paper https://arxiv.org/abs/1806.01593
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lb: float = -6.,
ub: float = 4.,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
assert lb < ub
assert cycle_limit >= 0
assert warmup_t >= 0
assert warmup_lr_init >= 0
self.lb = lb
self.ub = ub
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t)
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
tr = t_curr / t_i
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min * (self.decay_rate ** self.cycle_limit) for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
| 4,045 | 32.438017 | 106 | py |
DaVinci | DaVinci-main/scheduler/cosine_lr.py | """ Cosine Scheduler
Cosine LR schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from .scheduler import Scheduler
from pdb import set_trace as breakpoint
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=True,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
| 4,027 | 33.135593 | 121 | py |
DaVinci | DaVinci-main/scheduler/scheduler.py | from typing import Dict, Any
import torch
class Scheduler:
""" Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
def get_epoch_values(self, epoch: int):
return None
def get_update_values(self, num_updates: int):
return None
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self.get_epoch_values(epoch)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self.get_update_values(num_updates)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
if apply_noise:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
lrs = [v + v * noise for v in lrs]
return lrs
| 4,750 | 43.820755 | 112 | py |
DaVinci | DaVinci-main/scheduler/step_lr.py | """ Step Scheduler
Basic step LR schedule with warmup, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import torch
from .scheduler import Scheduler
class StepLRScheduler(Scheduler):
"""
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
decay_t: float,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
self.decay_t = decay_t
self.decay_rate = decay_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
| 1,902 | 28.734375 | 105 | py |
DaVinci | DaVinci-main/scheduler/scheduler_factory.py | """ Scheduler Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from .cosine_lr import CosineLRScheduler
from .tanh_lr import TanhLRScheduler
from .step_lr import StepLRScheduler
from .plateau_lr import PlateauLRScheduler
from torch.optim.lr_scheduler import LambdaLR
def create_scheduler(args, optimizer):
num_epochs = args.epochs
if getattr(args, 'lr_noise', None) is not None:
lr_noise = getattr(args, 'lr_noise')
if isinstance(lr_noise, (list, tuple)):
noise_range = [n * num_epochs for n in lr_noise]
if len(noise_range) == 1:
noise_range = noise_range[0]
else:
noise_range = lr_noise * num_epochs
else:
noise_range = None
lr_scheduler = None
if args.sched == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
elif args.sched == 'tanh':
lr_scheduler = TanhLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
elif args.sched == 'step':
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=args.decay_epochs,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
elif args.sched == 'linear':
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < args.num_warmup_steps:
return float(current_step) / float(max(1, args.num_warmup_steps))
return max(
0.0, float(args.num_training_steps - current_step) / float(max(1, args.num_training_steps - args.num_warmup_steps))
)
lr_scheduler = LambdaLR(optimizer, lr_lambda, args.last_epoch)
elif args.sched == 'plateau':
mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max'
lr_scheduler = PlateauLRScheduler(
optimizer,
decay_rate=args.decay_rate,
patience_t=args.patience_epochs,
lr_min=args.min_lr,
mode=mode,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cooldown_t=0,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
return lr_scheduler, num_epochs
| 4,603 | 39.034783 | 131 | py |
DaVinci | DaVinci-main/dataset/dalle_transforms.py | # --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------'
import torch
import torchvision.transforms.functional as F
from PIL import Image
import warnings
import math
import random
import numpy as np
class ToNumpy:
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
class ToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype)
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
Image.HAMMING: 'PIL.Image.HAMMING',
Image.BOX: 'PIL.Image.BOX',
}
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
class RandomResizedCropAndInterpolationWithTwoPic:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear', second_interpolation='lanczos'):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if second_size is not None:
if isinstance(second_size, tuple):
self.second_size = second_size
else:
self.second_size = (second_size, second_size)
else:
self.second_size = None
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.second_interpolation = _pil_interp(second_interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1] # img: array(500,333,3) size=499500
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
if self.second_size is None:
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
else:
return F.resized_crop(img, i, j, h, w, self.size, interpolation), \
F.resized_crop(img, i, j, h, w, self.second_size, self.second_interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation])
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0}'.format(interpolate_str)
if self.second_size is not None:
format_string += ', second_size={0}'.format(self.second_size)
format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation])
format_string += ')'
return format_string
| 6,719 | 35.923077 | 118 | py |
DaVinci | DaVinci-main/dataset/nlvr_dataset.py | import json
import os
from torch.utils.data import Dataset
from PIL import Image
from dataset.utils import pre_caption
class nlvr_dataset(Dataset):
def __init__(self, ann_file, transform, image_root):
self.ann = []
for f in ann_file:
self.ann += json.load(open(f,'r'))
self.transform = transform
self.image_root = image_root
self.max_words = 30
def __len__(self):
return len(self.ann)
def __getitem__(self, index):
ann = self.ann[index]
image0_path = os.path.join(self.image_root,ann['images'][0])
image0 = Image.open(image0_path).convert('RGB')
image0 = self.transform(image0)
image1_path = os.path.join(self.image_root,ann['images'][1])
image1 = Image.open(image1_path).convert('RGB')
image1 = self.transform(image1)
sentence = pre_caption(ann['sentence'], self.max_words)
if ann['label']=='True':
label = 1
else:
label = 0
return image0, image1, sentence, label | 1,158 | 27.975 | 82 | py |
DaVinci | DaVinci-main/dataset/utils.py | import re
def pre_question(question,max_ques_words):
question = re.sub(
r"([,.'!?\"()*#:;~])",
'',
question.lower(),
).replace('-', ' ').replace('/', ' ')
question = question.rstrip(' ')
#truncate question
question_words = question.split(' ')
if len(question_words)>max_ques_words:
question = ' '.join(question_words[:max_ques_words])
return question
def pre_caption(caption,max_words):
caption = re.sub(
r"([,.'!?\"()*#:;~])",
'',
caption.lower(),
).replace('-', ' ').replace('/', ' ').replace('<person>', 'person')
caption = re.sub(
r"\s{2,}",
' ',
caption,
)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
#truncate caption
caption_words = caption.split(' ')
if len(caption_words)>max_words:
caption = ' '.join(caption_words[:max_words])
return caption
# from vqaTools.vqaEval import VQAEval
# from refTools.evaluation.refEvaluation import RefEvaluation
import json
import os
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
import utils
from tqdm import tqdm
# def vqa_eval(vqa, result_file, test_ques_path):
# vqaRes = vqa.loadRes(result_file, test_ques_path)
# # create vqaEval object by taking vqa and vqaRes
# vqaEval = VQAEval(vqa, vqaRes, n=2) # n is precision of accuracy (number of places after decimal), default is 2
# # evaluate results
# vqaEval.evaluate()
# # print accuracies
# print("\n")
# print("Overall Accuracy is: %.02f\n" % (vqaEval.accuracy['overall']))
# print("Per Answer Type Accuracy is the following:")
# for ansType in vqaEval.accuracy['perAnswerType']:
# print("%s : %.02f" % (ansType, vqaEval.accuracy['perAnswerType'][ansType]))
# print("\n")
# return vqaEval
def collect_result(result, result_dir, filename, is_json=True, is_list=True):
if is_json:
result_file = os.path.join(result_dir, '%s_rank%d.json'%(filename,utils.get_rank()))
final_result_file = os.path.join(result_dir, '%s.json'%filename)
json.dump(result,open(result_file,'w'))
else:
result_file = os.path.join(result_dir, '%s_rank%d.pth'%(filename,utils.get_rank()))
final_result_file = os.path.join(result_dir, '%s.pth'%filename)
torch.save(result,result_file)
dist.barrier()
result = None
if utils.is_main_process():
# combine results from all processes
if is_list:
result = []
else:
result = {}
for rank in range(utils.get_world_size()):
if is_json:
result_file = os.path.join(result_dir, '%s_rank%d.json'%(filename,rank))
res = json.load(open(result_file,'r'))
else:
result_file = os.path.join(result_dir, '%s_rank%d.pth'%(filename,rank))
res = torch.load(result_file)
if is_list:
result += res
else:
result.update(res)
return result
def save_result(result, result_dir, filename, is_json=True, is_list=True):
if is_json:
result_file = os.path.join(result_dir, '%s_rank%d.json'%(filename,utils.get_rank()))
final_result_file = os.path.join(result_dir, '%s.json'%filename)
json.dump(result,open(result_file,'w'))
else:
result_file = os.path.join(result_dir, '%s_rank%d.pth'%(filename,utils.get_rank()))
final_result_file = os.path.join(result_dir, '%s.pth'%filename)
torch.save(result,result_file)
dist.barrier()
if utils.is_main_process():
# combine results from all processes
if is_list:
result = []
else:
result = {}
for rank in range(utils.get_world_size()):
if is_json:
result_file = os.path.join(result_dir, '%s_rank%d.json'%(filename,rank))
res = json.load(open(result_file,'r'))
else:
result_file = os.path.join(result_dir, '%s_rank%d.pth'%(filename,rank))
res = torch.load(result_file)
if is_list:
result += res
else:
result.update(res)
if is_json:
json.dump(result,open(final_result_file,'w'))
else:
torch.save(result,final_result_file)
print('result file saved to %s'%final_result_file)
dist.barrier()
return final_result_file
# IoU function
def computeIoU(box1, box2):
# each box is of [x1, y1, w, h]
inter_x1 = max(box1[0], box2[0])
inter_y1 = max(box1[1], box2[1])
inter_x2 = min(box1[0]+box1[2]-1, box2[0]+box2[2]-1)
inter_y2 = min(box1[1]+box1[3]-1, box2[1]+box2[3]-1)
if inter_x1 < inter_x2 and inter_y1 < inter_y2:
inter = (inter_x2-inter_x1+1)*(inter_y2-inter_y1+1)
else:
inter = 0
union = box1[2]*box1[3] + box2[2]*box2[3] - inter
return float(inter)/union
| 5,225 | 30.865854 | 118 | py |
DaVinci | DaVinci-main/dataset/vqa_dataset.py | import os
import json
import random
from PIL import Image
from torch.utils.data import Dataset
from dataset.utils import pre_question
from collections import Counter
class vqa_dataset(Dataset):
def __init__(self, ann_file, transform, vqa_root, vg_root, eos='[SEP]', split="train", max_ques_words=30, answer_list='./vqaTools/answer_list.json'):
self.split = split
self.ann = []
for f in ann_file:
self.ann += json.load(open(f,'r'))
self.transform = transform
self.vqa_root = vqa_root
self.vg_root = vg_root
self.max_ques_words = max_ques_words
self.eos = eos
# self.max_ques_words = 50
# self.answer_list = ["oov"]+json.load(open(answer_list,'r'))
# self.answer2id = {ans:i for i, ans in enumerate(self.answer_list)}
if split=='test':
self.max_ques_words = 50 # do not limit question length during test
self.answer_list = json.load(open(answer_list,'r'))
def __len__(self):
return len(self.ann)
def __getitem__(self, index):
ann = self.ann[index]
if ann['dataset']=='vqa':
image_path = os.path.join(self.vqa_root,ann['image'])
elif ann['dataset']=='vg':
image_path = os.path.join(self.vg_root,ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
if self.split == 'test':
question = pre_question(ann['question'],self.max_ques_words)
question_id = ann['question_id']
return image, question, question_id
elif self.split=='train' or self.split=='val':
question = pre_question(ann['question'],self.max_ques_words)
question_id = ann['question_id']
if ann['dataset']=='vqa':
answer_weight = {}
for answer in ann['answer']:
if answer in answer_weight.keys():
answer_weight[answer] += 1/len(ann['answer'])
else:
answer_weight[answer] = 1/len(ann['answer'])
answers = list(answer_weight.keys())
weights = list(answer_weight.values())
elif ann['dataset']=='vg':
answers = [ann['answer']]
weights = [0.5]
# answers = [answer+self.eos for answer in answers]
return image, question, answers, weights, question_id | 2,702 | 36.541667 | 153 | py |
DaVinci | DaVinci-main/dataset/dist_dataset.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from typing import List, Any
import warnings
import random
from itertools import cycle
import torch
from torch.utils.data import IterableDataset
from util.hdfs_io import hopen, hlist_files
class DistLineReadingDataset(IterableDataset): # pylint: disable=W0223
"""
iterate a set of folders.
"""
def __init__(self,
data_path: str,
rank: int = 0,
world_size: int = 1,
shuffle: bool = False,
repeat: bool = False):
super().__init__()
self.shuffle = shuffle
self.rank = rank
self.world_size = world_size
self.files = hlist_files(data_path.split(','))
self.files = [f for f in self.files if f.find('_SUCCESS') < 0]
self.is_hdfs = data_path.startswith('hdfs')
self.repeat = repeat
print('[DATA]--all dataset containing {} files.'.format(len(self.files)))
if len(self.files) % self.world_size != 0:
print('[DATA]--Whole dataset file num %s cannot split to worldsize %s ' %
(len(self.files), self.world_size))
sys.stdout.flush()
def generate(self):
if self.world_size == 1 or len(self.files) == 1:
cur_dataloader_files = self.files
else:
cur_dataloader_files = split_shard(
self.files, self.rank, self.world_size)
while True:
if self.shuffle:
random.shuffle(cur_dataloader_files)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
if len(cur_dataloader_files) % worker_info.num_workers != 0:
print('[DATA]--current dataloader %s file num %s cannot split to worker_num %s ' %
(self.rank, len(cur_dataloader_files), worker_info.num_workers))
cur_worker_files = split_shard(
cur_dataloader_files, worker_info.id, worker_info.num_workers)
if worker_info.id == 0:
print("[DataLoader] --> Rank:{} Workers:[{} ~ {}][{}] Size of process file:{} ...".format(
self.rank, 0, worker_info.num_workers - 1, worker_info.id, len(cur_dataloader_files)))
else:
cur_worker_files = cur_dataloader_files
if self.shuffle:
random.shuffle(cur_worker_files)
for filepath in cur_worker_files:
if self.is_hdfs:
with hopen(filepath, 'r') as reader:
for line in reader:
yield line.decode()
continue
with open(filepath, 'r') as reader:
for line in reader:
yield line
if not self.repeat:
break
def __iter__(self):
return self.generate()
def split_shard(data: List[Any], shard_idx: int, shard_size: int):
num = len(data)
if num < shard_size:
raise RuntimeError("num:{} < shard size:{}".format(num, shard_size))
start_idx = (num * shard_idx) // shard_size
end_idx = (num * (shard_idx + 1)) // shard_size
return data[start_idx: end_idx]
| 3,304 | 35.318681 | 113 | py |
DaVinci | DaVinci-main/dataset/ve_dataset.py | import json
import os
from torch.utils.data import Dataset
from PIL import Image
from dataset.utils import pre_caption
class ve_dataset(Dataset):
def __init__(self, ann_file, transform, image_root, max_words=30):
self.ann = json.load(open(ann_file,'r'))
self.transform = transform
self.image_root = image_root
self.max_words = max_words
self.labels = {'entailment':2,'neutral':1,'contradiction':0}
def __len__(self):
return len(self.ann)
def __getitem__(self, index):
ann = self.ann[index]
image_path = os.path.join(self.image_root,'%s.jpg'%ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
sentence = pre_caption(ann['sentence'], self.max_words)
return image, sentence, self.labels[ann['label']]
| 919 | 28.677419 | 80 | py |
DaVinci | DaVinci-main/dataset/caption_dataset.py | import json
import os
import random
from torch.utils.data import Dataset
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
Image.MAX_IMAGE_PIXELS = None
from dataset.utils import pre_caption
from dataset.dist_dataset import DistLineReadingDataset
import traceback
from base64 import b64decode
import io
import sys
import torch
class re_train_dataset(Dataset):
def __init__(self, ann_file, transform, image_root, max_words=30):
self.ann = []
for f in ann_file:
self.ann += json.load(open(f,'r'))
self.transform = transform
self.image_root = image_root
self.max_words = max_words
self.img_ids = {}
n = 0
for ann in self.ann:
img_id = ann['image_id']
if img_id not in self.img_ids.keys():
self.img_ids[img_id] = n
n += 1
def __len__(self):
return len(self.ann)
def __getitem__(self, index):
ann = self.ann[index]
image_path = os.path.join(self.image_root,ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
caption = pre_caption(ann['caption'], self.max_words)
return image, caption, self.img_ids[ann['image_id']]
class re_eval_dataset(Dataset):
def __init__(self, ann_file, transform, image_root, max_words=30):
self.ann = json.load(open(ann_file,'r'))
self.transform = transform
self.image_root = image_root
self.max_words = max_words
self.text = []
self.image = []
self.txt2img = {}
self.img2txt = {}
txt_id = 0
for img_id, ann in enumerate(self.ann):
self.image.append(ann['image'])
self.img2txt[img_id] = []
for i, caption in enumerate(ann['caption']):
self.text.append(pre_caption(caption,self.max_words))
self.img2txt[img_id].append(txt_id)
self.txt2img[txt_id] = img_id
txt_id += 1
def __len__(self):
return len(self.image)
def __getitem__(self, index):
image_path = os.path.join(self.image_root, self.ann[index]['image'])
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
return image, index
class pretrain_dataset(DistLineReadingDataset):
# def __init__(self, ann_file, transform, max_words=30):
def __init__(self, config, data_path, rank=0, world_size=1, shuffle=True, repeat=True, transform=None, common_transform=None,
patch_transform=None, visual_token_transform=None, max_words=30):
super().__init__(data_path, rank, world_size, shuffle, repeat)
self.config = config
# self.ann = []
# for f in ann_file:
# self.ann += json.load(open(f, 'r'))
self.transform = transform
self.common_transform = common_transform
self.patch_transform = patch_transform
self.visual_token_transform = visual_token_transform
self.max_words = max_words
# def __len__(self):
# return len(self.ann)
def __iter__(self):
for example in self.generate():
try:
ann = json.loads(example)
res = {}
if self.config['caption_name'] in ann:
caption = ann[self.config['caption_name']]
elif "TEXT" in ann:
caption = ann["TEXT"]
if isinstance(caption, list):
caption = random.choice(caption)
if "b64_resized_binary" not in ann and self.config['image_name'] not in ann:
continue
elif "b64_resized_binary" in ann:
image_str = b64decode(ann["b64_resized_binary"])
else:
image_str = b64decode(ann[self.config['image_name']])
try:
image = Image.open(io.BytesIO(image_str)).convert("RGB")
except Exception as e:
print("ERROR: encounter broken data, image reading error", e)
# print("ann = ", ann)
sys.stdout.flush()
continue
# image = self.transform(image)
for_patches, for_visual_tokens = self.common_transform(image)
patch_image = self.patch_transform(for_patches) # resolution = 256
visual_token_image = self.visual_token_transform(for_visual_tokens) # resolution = 128
caption = pre_caption(caption, self.max_words)
res['image'] = patch_image
res['visual_token_image'] = visual_token_image
res['caption'] = caption
yield res
except Exception as e:
print(traceback.format_exc())
print('encounter broken data: %s' % e)
print('-'*20)
sys.stdout.flush()
# def collate_fn(self, batch):
# batch_tensors = []
# for x in zip(*batch):
# # batch = [(1,2,3), (4,5,6)]
# # x will be (1,4), then (2,5), then (3,6).
# if x[0] is None:
# batch_tensors.append(None)
# elif isinstance(x[0], torch.Tensor):
# batch_tensors.append(torch.stack(x))
# else:
# batch_tensors.append(torch.tensor(x, dtype=torch.long))
#
# return batch_tensors
def collate_fn(self, data):
images = []
captions = []
visual_token_images = []
for _, ibatch in enumerate(data):
images.append(ibatch["image"])
visual_token_images.append(ibatch["visual_token_image"])
captions.append(ibatch["caption"])
return (images, visual_token_images, captions)
class pretrain_dataset_c4(DistLineReadingDataset):
def __init__(self, config, data_path, rank=0, world_size=1, shuffle=True, repeat=True, transform=None, max_words=30):
super().__init__(data_path, rank, world_size, shuffle, repeat)
self.config = config
self.transform = transform
self.max_words = max_words
def __iter__(self):
for example in self.generate():
try:
ann = json.loads(example)
res = {}
caption = ann[self.config['caption_name']]
if isinstance(caption, list):
caption = random.choice(caption)
caption = pre_caption(caption, self.max_words)
res['image'] = ""
res['caption'] = caption
yield res
except Exception as e:
print(traceback.format_exc())
print('encounter broken data: %s' % e)
# print(data_item.keys())
print('-'*20)
sys.stdout.flush()
def collate_fn(self, data):
images = []
captions = []
for _, ibatch in enumerate(data):
images.append(ibatch["image"])
captions.append(ibatch["caption"])
return (images, captions)
# class pretrain_dataset(Dataset):
# def __init__(self, ann_file, transform, max_words=30):
# self.ann = []
# for f in ann_file:
# self.ann += json.load(open(f,'r'))
# self.transform = transform
# self.max_words = max_words
#
#
# def __len__(self):
# return len(self.ann)
#
#
# def __getitem__(self, index):
#
# ann = self.ann[index]
#
# if type(ann['caption']) == list:
# caption = pre_caption(random.choice(ann['caption']), self.max_words)
# else:
# caption = pre_caption(ann['caption'], self.max_words)
#
# image = Image.open(ann['image']).convert('RGB')
# image = self.transform(image)
#
# return image, caption
#
| 8,210 | 31.713147 | 129 | py |
DaVinci | DaVinci-main/dataset/__init__.py | import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from dataset.dalle_transforms import RandomResizedCropAndInterpolationWithTwoPic
from PIL import Image
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from dataset.caption_dataset import re_train_dataset, re_eval_dataset, pretrain_dataset, pretrain_dataset_c4
from dataset.nlvr_dataset import nlvr_dataset
from dataset.ve_dataset import ve_dataset
from dataset.vqa_dataset import vqa_dataset
from dataset.gen_dataset import gen_dataset
from dataset.randaugment import RandomAugment
import os
logit_laplace_eps: float = 0.1
def map_pixels(x: torch.Tensor) -> torch.Tensor:
if x.dtype != torch.float:
raise ValueError('expected input to have type float')
return (1 - 2 * logit_laplace_eps) * x + logit_laplace_eps
def create_dataset(dataset, config):
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
common_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
RandomResizedCropAndInterpolationWithTwoPic(
size=config["image_res"], second_size=config["second_input_size"],
interpolation="bicubic", second_interpolation="lanczos",
),
])
patch_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if config['discrete_vae_type'] == 'dall-e':
visual_token_transform = transforms.Compose([
transforms.ToTensor(),
map_pixels,
])
elif config['discrete_vae_type'] == 'vqgan':
visual_token_transform = transforms.Compose([
transforms.ToTensor(),
])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(config['image_res'],scale=(0.5, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.Resize((config['image_res'],config['image_res']),interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
if dataset=='pretrain':
pair_dataset = pretrain_dataset(config, config['train_file'], rank=int(os.environ.get('RANK') or 0),
world_size=int(os.environ.get('WORLD_SIZE') or 1), shuffle=True,
repeat=True,
common_transform=common_transform,
patch_transform=patch_transform,
visual_token_transform=visual_token_transform,
max_words=30)
c4_dataset = pretrain_dataset_c4(config, config['c4_train_file'], rank=int(os.environ.get('RANK') or 0),
world_size=int(os.environ.get('WORLD_SIZE') or 1), shuffle=True,
repeat=True,
transform=None,
max_words=config["enc_dec_max_words"])
return pair_dataset, c4_dataset
elif dataset == 'dalle_gen':
val_dataset = gen_dataset(config['val_file'], test_transform, config['image_root'], 'val', common_transform=common_transform,
patch_transform=patch_transform,
visual_token_transform=visual_token_transform)
return val_dataset
elif dataset=='re':
train_dataset = re_train_dataset(config['train_file'], train_transform, config['image_root'])
val_dataset = re_eval_dataset(config['val_file'], test_transform, config['image_root'])
test_dataset = re_eval_dataset(config['test_file'], test_transform, config['image_root'])
return train_dataset, val_dataset, test_dataset
elif dataset=='vqa':
train_dataset = vqa_dataset(config['train_file'], train_transform, config['vqa_root'], config['vg_root'], split='train', answer_list=config['answer_list'])
val_dataset = vqa_dataset(config['val_file'], test_transform, config['vqa_root'], config['vg_root'], split='val', answer_list=config['answer_list'])
test_dataset = vqa_dataset(config['test_file'], test_transform, config['vqa_root'], config['vg_root'], split='test', answer_list=config['answer_list'])
return train_dataset, val_dataset, test_dataset
elif dataset=='nlvr':
train_dataset = nlvr_dataset(config['train_file'], train_transform, config['image_root'])
val_dataset = nlvr_dataset(config['val_file'], test_transform, config['image_root'])
test_dataset = nlvr_dataset(config['test_file'], test_transform, config['image_root'])
return train_dataset, val_dataset, test_dataset
elif dataset=='ve':
train_dataset = ve_dataset(config['train_file'], train_transform, config['image_root'])
val_dataset = ve_dataset(config['val_file'], test_transform, config['image_root'])
test_dataset = ve_dataset(config['test_file'], test_transform, config['image_root'])
return train_dataset, val_dataset, test_dataset
elif dataset == 'gen':
train_dataset = gen_dataset(config['train_file'], train_transform, config['image_root'], 'train', prompt=config['prompt'])
val_dataset = gen_dataset(config['val_file'], test_transform, config['image_root'], 'val', prompt=config['prompt'])
test_dataset = gen_dataset(config['test_file'], test_transform, config['image_root'], 'test', prompt=config['prompt'])
return train_dataset, val_dataset, test_dataset
def vqa_collate_fn(batch):
image_list, question_list, answer_list, weight_list, question_id_list, n = [], [], [], [], [], []
for image, question, answer, weights, question_id in batch:
image_list.append(image)
question_list.append(question)
question_id_list.append(question_id)
weight_list += weights
answer_list += answer
n.append(len(answer))
return torch.stack(image_list,dim=0), question_list, answer_list, torch.Tensor(weight_list), question_id_list, n
def create_sampler(datasets, shuffles, num_tasks, global_rank):
samplers = []
for dataset,shuffle in zip(datasets,shuffles):
sampler = torch.utils.data.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle)
samplers.append(sampler)
return samplers
def create_loader(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):
loaders = []
for dataset,sampler,bs,n_worker,is_train,collate_fn in zip(datasets,samplers,batch_size,num_workers,is_trains,collate_fns):
if is_train:
shuffle = (sampler is None)
drop_last = True
else:
shuffle = False
drop_last = False
loader = DataLoader(
dataset,
batch_size=bs,
num_workers=n_worker,
pin_memory=True,
sampler=sampler,
shuffle=shuffle,
collate_fn=collate_fn,
drop_last=drop_last,
)
loaders.append(loader)
return loaders | 7,913 | 49.088608 | 166 | py |
DaVinci | DaVinci-main/dataset/gen_dataset.py | import json
import os
from torch.utils.data import Dataset
from PIL import Image
from dataset.utils import pre_caption
class gen_dataset(Dataset):
def __init__(self, ann_file, transform, image_root, split='train', max_words=30, prompt=''):
self.ann = json.load(open(ann_file,'r'))
self.transform = transform
self.image_root = image_root
self.max_words = max_words
self.split = split # 5 captions per image if not in train set
self.prompt = prompt
def __len__(self):
return len(self.ann)
def __getitem__(self, index):
ann = self.ann[index]
image_path = os.path.join(self.image_root, ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
if self.split == 'train':
caption = self.prompt + pre_caption(ann['caption'], self.max_words)
return image, caption
else:
if "nocaps" in image_path:
fname = ann["id"]
else:
fname = ann['image']
caption = self.prompt + pre_caption(ann['caption'][0], self.max_words)
return image, caption, fname
| 1,228 | 31.342105 | 97 | py |
DaVinci | DaVinci-main/models/bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
from einops import rearrange
from einops.layers.torch import Rearrange
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
# For 2D-relative attention
self.ih = 16
self.iw = 16
heads = self.num_attention_heads
self.heads = heads
# parameter table of relative position bias
self.relative_bias_table = nn.Parameter(
torch.zeros((2 * self.ih - 1) * (2 * self.iw - 1), heads))
coords = torch.meshgrid((torch.arange(self.ih), torch.arange(self.iw)))
coords = torch.flatten(torch.stack(coords), 1)
relative_coords = coords[:, :, None] - coords[:, None, :]
relative_coords[0] += self.ih - 1
relative_coords[1] += self.iw - 1
relative_coords[0] *= 2 * self.iw - 1
relative_coords = rearrange(relative_coords, 'c h w -> h w c')
relative_index = relative_coords.sum(-1).flatten().unsqueeze(1)
self.register_buffer("relative_index", relative_index)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = torch.matmul(query_layer/math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores/math.sqrt(self.attention_head_size)
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
# attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores + (relative_position_scores_query + relative_position_scores_key)/math.sqrt(self.attention_head_size)
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
# attention_scores = attention_scores + attention_mask
attention_scores = attention_scores + attention_mask/math.sqrt(self.attention_head_size)
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BertAttention(config, position_embedding_type="absolute")
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BertEncoder):
module.gradient_checkpointing = value
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
# self.post_init()
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config, label_smoothing=0.0):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.label_smoothing = label_smoothing
# Initialize weights and apply final processing
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
reduction="mean",
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0] # [24, 6, 768]
prediction_scores = self.cls(sequence_output) # [24, 6, 30522]
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(label_smoothing=self.label_smoothing, reduction=reduction)
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) # tensor(10.4214)
if reduction == 'none':
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.last_hidden_state, # adjusted for our usage
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
# return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
# "is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
if self.config.pad_token_id is None:
raise ValueError("The PAD token should be defined for generation")
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) | 82,267 | 41.870245 | 213 | py |
DaVinci | DaVinci-main/models/model_vqa.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from models.davinci_pretrain import DaVinci
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
class DaVinciVQA(nn.Module):
def __init__(self,
encoder = None,
text_decoder = None,
tokenizer = None,
config = None,
):
super().__init__()
self.tokenizer = tokenizer
self.davinci = DaVinci(encoder, text_decoder, tokenizer, config, init_deit=False, init_dalle=True)
def forward(self, image, quesiton, answer=None, alpha=0, k=None, weights=None, train=True):
if train:
loss, logits = self.davinci(image,
quesiton,
answer,
is_vqa = True,
k = k,
train=train, decode=False, weights=weights)
return loss
else:
topk_ids, topk_probs = self.davinci(image,
quesiton,
answer,
is_vqa = True,
k = k,
train=train, decode=False)
return topk_ids, topk_probs
| 1,496 | 33.813953 | 114 | py |
DaVinci | DaVinci-main/models/resnet.py | from typing import Type, Any, Callable, Union, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from timm.models.layers import trunc_normal_, DropPath
# from .._internally_replaced_utils import load_state_dict_from_url
try:
from torch.hub import load_state_dict_from_url # noqa: 401
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401
__all__ = [
"ResNet",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
"resnet18emb",
"resnet101emb",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-f37072fd.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-b627a593.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-0676ba61.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-63fe2227.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-394f9c45.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor: # x: [64, 3, 224, 224]
# See note [TorchScript super()]
x = self.conv1(x) # [64, 64, 112, 112]
x = self.bn1(x) # [64, 64, 112, 112]
x = self.relu(x) # [64, 64, 112, 112]
x = self.maxpool(x) # # [64, 64, 56, 56]
x = self.layer1(x) # [64, 64, 56, 56]
x = self.layer2(x) # [64, 128, 28, 28]
x = self.layer3(x) # [64, 256, 14, 14]
x = self.layer4(x) # [64, 512, 7, 7]
x = self.avgpool(x) # [64, 512, 1, 1]
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
class ResNetEmb(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
embed_dim: int = 256,
num_patches: int = 256,
drop_rate: float = 0.,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.flatten = nn.Flatten(start_dim=2)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # [1, 1, 256]
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
self.num_patches = num_patches
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor: # x: [48, 3, 256, 256]
# See note [TorchScript super()]
x = self.conv1(x) # [48, 64, 128, 128]
x = self.bn1(x) # [48, 64, 128, 128]
x = self.relu(x) # [48, 64, 128, 128]
x = self.maxpool(x) # # [48, 64, 64, 64]
x = self.layer1(x) # resnet18 [48, 64, 64, 64] -> resnet 101 [48, 256, 64, 64]
x = self.layer2(x) # [48, 128, 32, 32] -> [48, 512, 32, 32]
x = self.layer3(x) # [48, 256, 16, 16] -> [48, 1024, 16, 16]
x = self.flatten(x) # [48, 1024, 256]
x = x.transpose(2,1) # [48, 256, 1024]
# x = self.layer4(x) # [48, 512, 7, 7]
# x = self.avgpool(x) # [48, 512, 1, 1]
# x = torch.flatten(x, 1)
# x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
B = x.shape[0] # x = [32, 3, 256, 256] B = 32 batch size
x = self._forward_impl(x) # resnet18 [32, 256, 256] -> resnet101 [32, 256, 1024] [bsz, seq_len, hid_dim]
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks [32, 1, 256]
x = torch.cat((cls_tokens, x), dim=1) # [32, 257, 256]
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x) # [12, 257, 256]
return x
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
if orig_size!=new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return new_pos_embed
else:
print('Do not need to reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return pos_embed_checkpoint
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any,
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def _resnetemb(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any,
) -> ResNetEmb:
model = ResNetEmb(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18emb(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNetEmb:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnetemb("resnet18emb", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet101emb(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNetEmb:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnetemb("resnet101emb", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
def resnet152_emb(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNetEmb:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnetemb("resnet152_emb", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet("resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet("resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def wide_resnet101_2_emb(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNetEmb:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnetemb("wide_resnet101_2_emb", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) | 25,975 | 39.148377 | 118 | py |
DaVinci | DaVinci-main/models/xbert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
from einops import rearrange
from einops.layers.torch import Rearrange
import transformers
transformers.logging.set_verbosity_error()
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") #absolute
self.config = config
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size() # [12, 1]
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1] # 1
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] #[[0]]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.save_attention = False
# For 2D-relative attention
self.ih = 16
self.iw = 16
heads = self.num_attention_heads
self.heads = heads
# parameter table of relative position bias
self.relative_bias_table = nn.Parameter(
torch.zeros((2 * self.ih - 1) * (2 * self.iw - 1), heads))
coords = torch.meshgrid((torch.arange(self.ih), torch.arange(self.iw)))
coords = torch.flatten(torch.stack(coords), 1)
relative_coords = coords[:, :, None] - coords[:, None, :]
relative_coords[0] += self.ih - 1
relative_coords[1] += self.iw - 1
relative_coords[0] *= 2 * self.iw - 1
relative_coords = rearrange(relative_coords, 'c h w -> h w c')
relative_index = relative_coords.sum(-1).flatten().unsqueeze(1)
self.register_buffer("relative_index", relative_index)
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
twoD_relative=False,
prefix_image=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask # [12, 1, 1, 258]
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# [bs, num_heads, seq_length, seq_length]
# attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # [12, 12, 15, 258]
attention_scores = torch.matmul(query_layer/math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
# [seq_length, seq_length], 0 on diagonal
distance = position_ids_l - position_ids_r
# [seq_length, seq_length, attention_head_size]
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores/math.sqrt(self.attention_head_size)
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + (relative_position_scores_query + relative_position_scores_key)/math.sqrt(self.attention_head_size)
# attention_scores = attention_scores / math.sqrt(self.attention_head_size) # [24, 12, 270, 270]
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
# attention_scores = attention_scores + attention_mask # attention_scores [48, 12, 2, 3], attention_mask [48, 1, 2, 2]
attention_scores = attention_scores + attention_mask/math.sqrt(self.attention_head_size)
# twoD_relative
if twoD_relative:
# Use "gather" for more efficiency on GPUs
relative_bias = self.relative_bias_table.gather(
0, self.relative_index.repeat(1, self.heads)) # [65536, 12]
relative_bias = rearrange(
relative_bias, '(h w) c -> 1 c h w', h=self.ih*self.iw, w=self.ih*self.iw) # [1, 12, 256, 256]
if prefix_image:
# prefix image
image_length = self.prefix_iw * self.prefix_ih + 1 # 129
else:
# full image
image_length = self.iw * self.ih + 1
attention_scores[:,:,1:image_length,1:image_length] = attention_scores[:,:,1:image_length,1:image_length] + relative_bias[:, :, :image_length-1, :image_length-1]
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
twoD_relative=False,
prefix_image=False
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
twoD_relative,
prefix_image
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_num, has_cross_attention=True):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
# self.has_cross_attention = (layer_num >= config.fusion_layer)
self.has_cross_attention = has_cross_attention
if self.has_cross_attention:
self.layer_num = layer_num
self.crossattention = BertAttention(config, is_cross_attention=True)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
twoD_relative=False,
prefix_image=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
twoD_relative=twoD_relative,
prefix_image=prefix_image,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if self.has_cross_attention:
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
if type(encoder_hidden_states) == list:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
encoder_attention_mask[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
else:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config, has_cross_attention=True):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config, i, has_cross_attention) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
twoD_relative=False,
prefix_image=False,
mode='multi_modal',
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
start_layer = 0
output_layer = self.config.num_hidden_layers
# if mode=='text':
# start_layer = 0
# output_layer = self.config.fusion_layer
# elif mode=='fusion':
# start_layer = self.config.fusion_layer
# output_layer = self.config.num_hidden_layers
# elif mode=='multi_modal':
# start_layer = 0
# output_layer = self.config.num_hidden_layers
for i in range(start_layer, output_layer):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
twoD_relative,
prefix_image,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True, has_cross_attention=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config, has_cross_attention)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, attention_mask.shape,
device, is_decoder)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mode=mode,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False, has_cross_attention=True)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=True,
reduction='mean',
mode='multi_modal',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction)
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=1)*soft_labels,dim=-1)
loss_distill = (loss_distill * (labels!=-100)).sum(1)
lm_loss = (1-alpha)*lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
# import pdb; pdb.set_trace()
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.last_hidden_state, # adjusted for our usage
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
"is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_embeds=encoder_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(prediction_scores, dim=1)*soft_labels,dim=-1)
loss_distill = loss_distill[labels!=-100].mean()
masked_lm_loss = (1-alpha)*masked_lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"The Bert Model that also accepts image embeddings at the beginning",
BERT_START_DOCSTRING,
)
class BertModelImage(BertPreTrainedModel):
"""
"""
def __init__(self, config, add_pooling_layer=True, has_cross_attention=False):
super().__init__(config)
self.config = config
# # position_ids (1, len position emb) is contiguous in memory and exported when serialized
# self.register_buffer("img_position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
# self.img_pos_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config, has_cross_attention)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
input_v_embs=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
prefix_image=False,
# mode='multi_modal',
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions # False
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
) # False
return_dict = return_dict if return_dict is not None else self.config.use_return_dict # True
if is_decoder: # False
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size() # [12, 1]
batch_size, seq_length = input_shape # 12, 1
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
if input_v_embs is not None:
seq_length += input_v_embs.shape[1] # 258 = 257+1
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 # past_key_values is None and past_key_values_length is 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
device, is_decoder)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
if input_v_embs is not None:
seq_length = input_v_embs.shape[1] # 257
# img_pos_ids = self.img_position_ids[:, : seq_length] # self.img_position_ids[1, 512] 从0到511 img_pos_ids [1,257] 从0到256
# img_pos_embeddings = self.img_pos_embeddings(img_pos_ids)
# input_v_embs += img_pos_embeddings
embedding_output = torch.cat([input_v_embs, embedding_output], dim=1) # [12, 258, 768]
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
twoD_relative=True if (input_v_embs is not None and prefix_image is False) else False,
prefix_image=prefix_image,
# mode=mode,
) # encoder_outputs = {last_hidden_state, past_key_values=None, hidden_states=None, attentions=None, cross_attentions=None}
sequence_output = encoder_outputs.last_hidden_state
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
| 99,022 | 43.029791 | 213 | py |
DaVinci | DaVinci-main/models/vit.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.vision_transformer import _cfg, PatchEmbed
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_gradients = None
self.attention_map = None
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def forward(self, x, register_hook=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
if register_hook:
self.save_attention_map(attn)
attn.register_hook(self.save_attn_gradients)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, register_hook=False):
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
# dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
# self.blocks = nn.ModuleList([
# Block(
# dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
# drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
# for i in range(depth)])
# self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward(self, x, register_blk=-1):
B = x.shape[0] # x = [12, 3, 256, 256] B = 12
x = self.patch_embed(x) # [12, 256, 768]
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks [12, 1, 768]
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x) # [12, 257, 768]
# for i,blk in enumerate(self.blocks):
# x = blk(x, register_blk==i)
# x = self.norm(x)
return x
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1] # 768
num_patches = visual_encoder.patch_embed.num_patches # vit: 256
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches #1
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # 14
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5) # 16
if orig_size!=new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # [1, 1, 768]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] # [1, 196, 768]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) # [1, 768, 14, 14]
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) # [1, 768, 16, 16]
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) # [1, 256, 768]
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return new_pos_embed
else:
return pos_embed_checkpoint
| 8,772 | 42.216749 | 120 | py |
DaVinci | DaVinci-main/models/model_nlvr.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from models.xbert import BertConfig, BertModel
from models.davinci_pretrain import DaVinci
import torch
from torch import nn
import torch.nn.functional as F
class DaVinciNLVR(nn.Module):
def __init__(self,
encoder = None,
text_decoder = None,
tokenizer = None,
config = None,
):
super().__init__()
self.last_hidden_id_shift = config['last_hidden_id_shift']
self.tokenizer = tokenizer
self.davinci = DaVinci(encoder, text_decoder, tokenizer, config, init_deit=False, init_dalle=True)
bert_config = BertConfig.from_json_file(config['bert_config'])
self.cls_head = nn.Sequential(
nn.Linear(bert_config.hidden_size * 2, bert_config.hidden_size),
nn.ReLU(),
nn.Linear(bert_config.hidden_size, 2)
)
def forward(self, image0, image1, text, targets, alpha=0, train=True):
dummy_input = self.tokenizer([""] * image0.size(0), return_tensors='pt').to(image0.device)
last_state_ids = text.attention_mask.sum(1) - self.last_hidden_id_shift
output0 = self.davinci(image0,
dummy_input,
text,
last_state_ids = last_state_ids,
is_nlvr = True,
train=train, decode=False)
output1 = self.davinci(image1,
dummy_input,
text,
last_state_ids = last_state_ids,
is_nlvr = True,
train=train, decode=False)
hidden_state = torch.cat([output0, output1], dim=1)
prediction = self.cls_head(hidden_state)
if train:
loss = F.cross_entropy(prediction, targets)
return loss
else:
return prediction
| 2,167 | 38.418182 | 114 | py |
DaVinci | DaVinci-main/models/davinci_pretrain.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from models.xbert import BertConfig, BertModelImage
from models.bert import BertLMHeadModel
from models.resnet import resnet101emb, resnet101, wide_resnet101_2_emb, wide_resnet101_2, interpolate_pos_embed
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import torch
# from dalle_pytorch import OpenAIDiscreteVAE, DALLE
import models.dalle_utils as dalle_utils
def tile(x, dim, n_tile):
init_dim = x.size(dim)
repeat_idx = [1] * x.dim()
repeat_idx[dim] = n_tile
x = x.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))
return torch.index_select(x, dim, order_index.to(x.device))
class DaVinci(nn.Module):
def __init__(self,
encoder = None,
text_decoder = None,
tokenizer = None,
config = None,
init_deit = False,
init_dalle = False,
device = "cuda",
imagenet = False,
):
super().__init__()
self.num_beams = config["num_beams"]
self.max_length = config["max_length"]
self.temperature = config["temperature"]
self.length_penalty = config["length_penalty"]
self.early_stopping=config["early_stopping"]
self.num_return_sequences=config["num_return_sequences"]
self.repetition_penalty = config["repetition_penalty"]
self.top_k = config["top_k"]
self.top_p = config["top_p"]
self.min_length = 5
self.tokenizer = tokenizer
self.IMG_BOS = 2
if "label_smoothing" in config:
self.label_smoothing = config["label_smoothing"]
else:
self.label_smoothing = 0.0
if "prompt" in config:
self.prompt = config["prompt"]
self.prompt_length = len(tokenizer(config["prompt"]).input_ids) - 1
else:
self.prompt_length = 0
if "loss_mim_alpha" in config and config["loss_mim_alpha"] > 0:
self.do_mim = True
else:
self.do_mim = False
num_patches = int((config["image_res"] / 16) ** 2)
if 'huge' in config['bert_config']:
# huge model size with wide_resnet101_2
self.visual_encoder = wide_resnet101_2_emb(embed_dim=1024, num_patches=num_patches, drop_rate=0.0)
else:
# base model size with resnet101
self.visual_encoder = resnet101emb(embed_dim=1024, num_patches=num_patches, drop_rate=0.0)
if init_deit:
print("initializing resnet...")
if 'huge' in config['bert_config']:
pretrained_model = wide_resnet101_2(pretrained=True)
else:
pretrained_model = resnet101(pretrained=True)
model_dict = self.visual_encoder.state_dict()
pretrained_dict = {k: v for k, v in pretrained_model.state_dict().items() if k in model_dict}
model_dict.update(pretrained_dict)
msg = self.visual_encoder.load_state_dict(model_dict)
print(msg)
config_encoder = BertConfig.from_json_file(config['bert_config'])
config_encoder.vocab_size += config_encoder.visual_vocab_size
if config["init_encoder"]:
self.encoder = BertModelImage.from_pretrained(encoder, config=config_encoder, add_pooling_layer=False)
else:
self.encoder = BertModelImage(config=config_encoder, add_pooling_layer=False)
vision_width = config['vision_width']
emb_dim = config_encoder.hidden_size
self.vision_proj = nn.Linear(vision_width, emb_dim)
self.config_decoder = BertConfig.from_json_file(config['bert_config'])
self.config_decoder.is_decoder=True
self.config_decoder.add_cross_attention=True
self.config_decoder.is_encoder_decoder=False
if init_dalle:
self.config_decoder.vocab_size += self.config_decoder.visual_vocab_size
if not imagenet:
self.d_vae = dalle_utils.create_d_vae(
weight_path=config["discrete_vae_weight_path"], d_vae_type=config["discrete_vae_type"],
device=device, image_size=config["second_input_size"])
if config["init_decoder"]:
self.text_decoder = BertLMHeadModel.from_pretrained(text_decoder, config=self.config_decoder, label_smoothing=self.label_smoothing)
else:
self.text_decoder = BertLMHeadModel(config=self.config_decoder, label_smoothing=self.label_smoothing)
# 3-way weight tying
self.text_decoder.cls.predictions.decoder.weight = self.encoder.embeddings.word_embeddings.weight
self.text_decoder.bert.embeddings.word_embeddings.weight = self.encoder.embeddings.word_embeddings.weight
def forward(self, image, context, gen_text=None, last_state_ids=None, train=True, decode=False, num_keep_best=1, do_sample=False, text_full=None, prefix_image=None, suffix_image=None, use_dalle=False, imagenet=False, is_ve=False, is_nlvr=False, is_vqa=False, k=None, weights=None, *args, **kwargs):
if image is not None:
# image-text-pair
image_embeds = self.visual_encoder(image)
image_embeds = self.vision_proj(image_embeds)
context_atts = context.attention_mask
image_atts = torch.ones(((image_embeds.shape[0], image_embeds.shape[1])), device=image_embeds.device)
encoder_attns = torch.cat([image_atts, context_atts], dim=1)
else:
# c4 text only
image_embeds = None
encoder_attns = context.attention_mask
encoder_output = self.encoder(context.input_ids,
input_v_embs = image_embeds,
attention_mask=encoder_attns,
# output_hidden_states = True,
return_dict = True)
encoder_states = encoder_output.last_hidden_state
if use_dalle:
# calculate loss_suffix_text_generation
loss, logits = self.decode_forward(gen_text.input_ids, encoder_states, encoder_attns, gen_text.attention_mask, train, *args, **kwargs)
# producing text embeddings for full caption
vae_context_attns = text_full.attention_mask
if prefix_image is not None:
prefix_image_embeds = self.visual_encoder(prefix_image)
prefix_image_embeds = self.vision_proj(prefix_image_embeds)
prefix_image_atts = torch.ones(((prefix_image_embeds.shape[0], prefix_image_embeds.shape[1])), device=prefix_image_embeds.device)
vae_encoder_attns = torch.cat([prefix_image_atts, vae_context_attns], dim=1)
else:
prefix_image_embeds = None
vae_encoder_attns = vae_context_attns
vae_encoder_output = self.encoder(text_full.input_ids,
input_v_embs = prefix_image_embeds,
attention_mask=vae_encoder_attns,
return_dict = True,
prefix_image = True,)
masked_image_ids = self.d_vae.get_codebook_indices(suffix_image).flatten(1)
offsetted_masked_images_ids = masked_image_ids + self.config_decoder.text_vocab_size
# added <img_bos>
offsetted_masked_images_ids = torch.cat([torch.ones((offsetted_masked_images_ids.shape[0], 1), device=offsetted_masked_images_ids.device)*self.IMG_BOS, offsetted_masked_images_ids], dim=1).long()
loss_image_generation, logits = self.decode_forward(offsetted_masked_images_ids, vae_encoder_output.last_hidden_state, vae_encoder_attns, torch.ones_like(offsetted_masked_images_ids), train, *args, **kwargs)
if self.do_mim and prefix_image_embeds is not None:
dummy_text_input = self.tokenizer([""] * image.size(0), return_tensors="pt").to(image.device)
mim_encoder_attns = torch.cat([prefix_image_atts, dummy_text_input.attention_mask], dim=1)
mim_encoder_output = self.encoder(dummy_text_input.input_ids,
input_v_embs = prefix_image_embeds,
attention_mask=mim_encoder_attns,
return_dict = True,
prefix_image = True,)
mim_masked_image_ids = self.d_vae.get_codebook_indices(suffix_image).flatten(1)
mim_offsetted_masked_images_ids = mim_masked_image_ids + self.config_decoder.text_vocab_size
mim_offsetted_masked_images_ids = torch.cat([torch.ones((mim_offsetted_masked_images_ids.shape[0], 1), device=offsetted_masked_images_ids.device)*self.IMG_BOS, offsetted_masked_images_ids], dim=1).long() # [64, 161]
loss_mim, logits = self.decode_forward(mim_offsetted_masked_images_ids, mim_encoder_output.last_hidden_state, mim_encoder_attns, torch.ones_like(mim_offsetted_masked_images_ids), train, *args, **kwargs)
return loss, loss_image_generation, loss_mim, logits
return loss, loss_image_generation, torch.Tensor([0]).to(image.device), logits
if imagenet == True:
image_features = torch.mean(encoder_states[:, 1:-1, :], 1)
image_cls_features = encoder_states[:, 0, :]
decoder_features = self.task_forward(gen_text.input_ids, encoder_states, encoder_attns, last_state_ids, gen_text.attention_mask)
return torch.cat([image_cls_features, image_features, decoder_features], 1)
if is_ve == True or is_nlvr == True:
return self.task_forward(gen_text.input_ids, encoder_states, encoder_attns, last_state_ids, gen_text.attention_mask)
if is_vqa == True:
if train:
question_states = []
question_atts = []
for b, n in enumerate(k):
question_states += [encoder_output.last_hidden_state[b]]*n
question_atts += [encoder_attns[b]]*n
question_states = torch.stack(question_states,0) #[32,912,768]
question_atts = torch.stack(question_atts,0) #[32,912]
gen_text_targets = gen_text.input_ids.masked_fill(gen_text.input_ids == self.tokenizer.pad_token_id, -100)
gen_text_output = self.text_decoder(gen_text.input_ids,
attention_mask = gen_text.attention_mask,
encoder_hidden_states = question_states,
encoder_attention_mask = question_atts,
labels = gen_text_targets,
return_dict = True,
reduction = 'none',
)
loss = weights * gen_text_output.loss
loss = loss.sum()/image.size(0)
logits = gen_text_output.logits
return loss, logits
else:
topk_ids, topk_probs = self.rank_answer(encoder_output.last_hidden_state, encoder_attns,
gen_text.input_ids, gen_text.attention_mask, k)
return topk_ids, topk_probs
if not decode:
return self.decode_forward(gen_text.input_ids, encoder_states, encoder_attns, gen_text.attention_mask, train, *args, **kwargs)
else:
# -----------------generation method1-------------------
# return self.generate(None, encoder_states, encoder_attns, num_keep_best, do_sample)
# -----------------generation method2-------------------
BSZ = encoder_states.shape[0] # batch_size 12
num_beams = self.num_beams # 2
# # define decoder start token ids
# input_ids = torch.ones((BSZ, 1), device=self.text_decoder.device, dtype=torch.long)
# input_ids = input_ids * self.tokenizer.bos_token_id
# input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
# input_ids = input_ids[:, :-1]
prompt = [self.prompt] * BSZ
image_embeds = encoder_states # [bsz, 578, 768]
if num_beams > 1:
assert (do_sample is False) and (self.num_return_sequences == 1)
image_embeds = image_embeds.repeat_interleave(num_beams, dim=0) # [bsz*2, 578, 768]
if self.num_return_sequences > 1:
assert (do_sample is True) and (num_beams == 1)
image_embeds = image_embeds.repeat_interleave(self.num_return_sequences, dim=0)
prompt = [self.prompt] * image_embeds.size(0)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device) # [bsz*2, 578]
model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask": image_atts}
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
input_ids = input_ids[:, :-1] # [12, 4]
return self.text_decoder.generate(
input_ids = input_ids, # [12, 4]
# encoder_hidden_states=encoder_states, # [6, 578, 768]
# encoder_attention_mask=encoder_attns, # [6, 578]
max_length=self.max_length,
min_length=self.min_length,
# return_dict_in_generate=True,
top_k=self.top_k,
num_beams=self.num_beams,
temperature=self.temperature,
length_penalty=self.length_penalty,
early_stopping=self.early_stopping,
num_return_sequences=self.num_return_sequences,
repetition_penalty=self.repetition_penalty,
do_sample=do_sample,
bos_token_id=self.tokenizer.cls_token_id,
pad_token_id=self.tokenizer.pad_token_id,
eos_token_id=self.tokenizer.sep_token_id,
**model_kwargs
)
def task_forward(self, input_ids, encoder_states, encoder_atts, last_state_ids, attention_mask=None):
gen_text_output = self.text_decoder(input_ids, # [1024, 5]
attention_mask = attention_mask, #[1024, 5]
encoder_hidden_states = encoder_states, # [128, 258, 768]
encoder_attention_mask = encoder_atts, # [128, 258]
return_dict = True,
)
decoder_states = gen_text_output.hidden_states # decoder_states [bsz, 20, 768]
last_states = decoder_states[range(len(last_state_ids)), last_state_ids] # [bsz, 768]
return last_states
def decode_forward(self, input_ids, encoder_states, encoder_atts, attention_mask=None, train=True):
if not train:
gen_text_output = self.text_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
return_dict = True,
)
return gen_text_output.logits
else:
gen_text_targets = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
gen_text_targets[:, :self.prompt_length] = -100
gen_text_output = self.text_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
labels = gen_text_targets,
return_dict = True
)
loss = gen_text_output.loss.mean()
logits = gen_text_output.logits
return loss, logits
def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k):
num_ques = question_states.size(0)
start_ids = answer_ids[0,0].repeat(num_ques,1) # bos token
start_output = self.text_decoder(start_ids,
encoder_hidden_states = question_states,
encoder_attention_mask = question_atts,
return_dict = True,
reduction = 'none'
)
logits = start_output.logits[:,0,:] # first token's logit
# topk_probs: top-k probability
# topk_ids: [num_question, k]
answer_first_token = answer_ids[:,1]
prob_first_token = F.softmax(logits,dim=1).index_select(dim=1, index=answer_first_token)
topk_probs, topk_ids = prob_first_token.topk(k,dim=1)
# answer input: [num_question*k, answer_len]
input_ids = []
input_atts = []
for b, topk_id in enumerate(topk_ids):
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
input_ids = torch.cat(input_ids,dim=0) # [1024, 9]
input_atts = torch.cat(input_atts,dim=0) # [1024, 9]
targets_ids = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
# repeat encoder's output for top-k answers
question_states = tile(question_states, 0, k)
question_atts = tile(question_atts, 0, k)
output = self.text_decoder(input_ids,
attention_mask = input_atts,
encoder_hidden_states = question_states,
encoder_attention_mask = question_atts,
labels = targets_ids,
return_dict = True,
reduction = 'none'
)
answer_loss = output.loss
answer_loss = answer_loss.view(input_ids.size(0),-1)
# topk_prob: first token probability
topk_probs = topk_probs.view(-1,1)
log_probs = torch.cat([topk_probs.log(), -answer_loss],dim=1)
# re-calculate log probabilities for the answer sequences using chain rule
log_probs_sum = log_probs.sum(1)
log_probs_sum = log_probs_sum.view(num_ques,k)
topk_probs = F.softmax(log_probs_sum, dim=-1)
# get top-k after re-ranking
topk_probs, rerank_id = topk_probs.topk(k,dim=1)
topk_ids = torch.gather(topk_ids, 1, rerank_id)
return topk_ids, topk_probs
def decode_visual_forward(self, input_ids, encoder_states, encoder_atts, attention_mask=None, train=True):
if not train:
gen_text_output = self.visual_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
return_dict = True,
)
return gen_text_output.logits
else:
gen_text_targets = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
gen_text_output = self.visual_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
labels = gen_text_targets,
return_dict = True
)
loss = gen_text_output.loss.mean()
logits = gen_text_output.logits
return loss, logits
def generate(self, input_ids, encoder_states, encoder_atts, num_keep_best=1, do_sample=False):
self.num_keep_best = num_keep_best
batch_size = encoder_states.shape[0]
if input_ids is None:
input_ids = torch.full(
(batch_size, 1), self.tokenizer.bos_token_id, dtype=torch.long, device=encoder_states.device
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
assert input_ids.shape[0] == batch_size, "Input batch size must match image features"
cur_len = input_ids.shape[1]
num_expand = self.num_beams
# input_ids = self._expand_for_beams(input_ids, num_expand)
encoder_states = self._expand_for_beams(encoder_states, num_expand)
encoder_atts = self._expand_for_beams(encoder_atts, num_expand)
if self.num_beams > 1:
output = self._generate_beam_search(
input_ids,
encoder_states,
encoder_atts,
cur_len,
self.max_length,
do_sample,
self.temperature,
self.top_k,
self.top_p,
self.repetition_penalty,
self.tokenizer.pad_token_id,
self.tokenizer.eos_token_id,
batch_size,
self.length_penalty,
self.num_beams,
self.tokenizer.vocab_size,
)
else:
output = self._generate_no_beam_search(
input_ids,
encoder_states,
encoder_atts,
cur_len,
self.max_length,
do_sample,
self.temperature,
self.top_k,
self.top_p,
self.repetition_penalty,
self.tokenizer.pad_token_id,
self.tokenizer.eos_token_id,
batch_size,
)
return output
def _expand_for_beams(self, x, num_expand):
if x is None or num_expand == 1:
return x
input_shape = list(x.shape)
expanded_shape = input_shape[:1] + [num_expand] + input_shape[1:]
x = x.unsqueeze(1).expand(expanded_shape)
# (batch_size * num_expand, ...)
x = x.contiguous().view([input_shape[0] * num_expand] + input_shape[1:])
return x
def prepare_inputs_for_generation(self, curr_ids, **kwargs):
# do not consider past history here, as we use a separate decoder
mask_token_id = self.tokenizer.mask_token_id
batch_size = curr_ids.shape[0]
mask_ids = torch.full(
(batch_size, 1), mask_token_id, dtype=torch.long, device=curr_ids.device
)
input_ids = torch.cat([curr_ids, mask_ids], dim=1)
# other params are default, like attention_mask
return {"input_ids": input_ids}
def _generate_no_beam_search(
self,
input_ids,
encoder_states,
encoder_atts,
cur_len,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
pad_token_id,
eos_token_ids,
batch_size,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
if type(eos_token_ids) != list:
eos_token_ids = [eos_token_ids]
assert self.num_keep_best == 1, 'cannot generate >1 sentences in greedy search'
# current position / max lengths / length of generated sentences / unfinished sentences
unfinished_sents = []
cur_unfinished = input_ids.new(batch_size).fill_(1)
# log of scores for each sentence in the batch
logprobs = []
while cur_len < max_length:
# model_inputs = self.prepare_inputs_for_generation(input_ids)
logits = self.decode_forward(input_ids, encoder_states, encoder_atts, attention_mask=None, train=False)
next_token_idx = cur_len - 1
next_token_logits = logits[:, next_token_idx, :]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
for i in range(batch_size):
for previous_token in set(input_ids[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if next_token_logits[i, previous_token] < 0:
next_token_logits[i, previous_token] *= repetition_penalty
else:
next_token_logits[i, previous_token] /= repetition_penalty
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# Compute scores
_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size, vocab_size)
_scores = torch.gather(_scores, -1, next_token.unsqueeze(-1)) # (batch_size, 1)
logprobs.append(_scores) # (batch_size, 1)
unfinished_sents.append(cur_unfinished)
# update generations and finished sentences
tokens_to_add = next_token * cur_unfinished + pad_token_id * (1 - cur_unfinished)
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
#for t in input_ids:
#print(self.tokenizer.convert_ids_to_tokens(t.tolist()))
for eos_token_id in eos_token_ids:
cur_unfinished = cur_unfinished.mul(tokens_to_add.ne(eos_token_id).long())
cur_len = cur_len + 1
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if cur_unfinished.max() == 0:
break
# add eos_token_ids to unfinished sentences
if cur_len == max_length:
input_ids[:, -1].masked_fill_(cur_unfinished.to(dtype=torch.bool), eos_token_ids[0])
logprobs = torch.cat(logprobs, dim=1)
unfinished_sents = torch.stack(unfinished_sents, dim=1).float()
sum_logprobs = (logprobs * unfinished_sents).sum(dim=1)
# return logprobs to keep consistent with beam search output
logprobs = sum_logprobs / unfinished_sents.sum(dim=1)
# pad to the same length, otherwise DataParallel will give error
pad_len = max_length - input_ids.shape[1]
if pad_len > 0:
padding_ids = input_ids.new(batch_size, pad_len).fill_(pad_token_id)
input_ids = torch.cat([input_ids, padding_ids], dim=1)
# (batch_size, n_best, max_len), (batch_size, n_best)
return input_ids.unsqueeze(1), logprobs.unsqueeze(1)
def _generate_beam_search(
self,
input_ids,
encoder_states,
encoder_atts,
cur_len,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
pad_token_id,
eos_token_ids,
batch_size,
length_penalty,
num_beams,
vocab_size,
):
""" Generate sequences for each example with beam search.
"""
if type(eos_token_ids) != list:
eos_token_ids = [eos_token_ids]
# Expand input to num beams
input_ids = input_ids.unsqueeze(1).expand(batch_size, num_beams, cur_len)
input_ids = input_ids.contiguous().view(batch_size * num_beams, cur_len) # (batch_size * num_beams, cur_len)
# generated hypotheses
num_keep_best = self.num_keep_best
generated_hyps = [
BeamHypotheses(num_keep_best, max_length, length_penalty, early_stopping=False) for _ in range(batch_size)
]
# NOTE: Expand >1 words to leave some spare tokens to keep the
# beam size, because some sentences may end here and cannot expand
# in the next level
TOPN_PER_BEAM = 2
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = None
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
logits = self.decode_forward(input_ids, encoder_states, encoder_atts, attention_mask=None, train=False)
next_token_idx = cur_len - 1
scores = logits[:, next_token_idx, :]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
for i in range(batch_size * num_beams):
for previous_token in set(input_ids[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if scores[i, previous_token] < 0:
scores[i, previous_token] *= repetition_penalty
else:
scores[i, previous_token] /= repetition_penalty
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
scores = top_k_top_p_filtering(
scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# Sample [TOPN_PER_BEAM] next words for each beam (so we have some spare tokens and match output of greedy beam search)
next_words = torch.multinomial(F.softmax(scores, dim=-1),
num_samples=TOPN_PER_BEAM) # (batch_size * num_beams, TOPN_PER_BEAM)
# Compute next scores
_scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
_scores = torch.gather(_scores, -1, next_words) # (batch_size * num_beams, TOPN_PER_BEAM)
next_scores = _scores + beam_scores[:, None].expand_as(_scores) # (batch_size * num_beams, TOPN_PER_BEAM)
# Match shape of greedy beam search
beam_indices = torch.arange(num_beams) * vocab_size
beam_indices = beam_indices.repeat(batch_size, TOPN_PER_BEAM).to(next_words.device)
next_words = next_words.view(batch_size, TOPN_PER_BEAM * num_beams) # (batch_size, TOPN_PER_BEAM * num_beams)
next_words = next_words + beam_indices
next_scores = next_scores.view(batch_size, TOPN_PER_BEAM * num_beams) # (batch_size, TOPN_PER_BEAM * num_beams)
else:
# do greedy beam search
scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
assert scores.size() == (batch_size * num_beams, vocab_size)
# Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
_scores = _scores.view(batch_size, num_beams * vocab_size) # (batch_size, num_beams * vocab_size)
next_scores, next_words = torch.topk(_scores, TOPN_PER_BEAM * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_words.size() == (batch_size, TOPN_PER_BEAM * num_beams)
# next batch beam content
# list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch)
next_batch_beam = []
# for each sentence
for batch_ex in range(batch_size):
# if we are done with this sentence
done[batch_ex] = done[batch_ex] or generated_hyps[batch_ex].is_done(next_scores[batch_ex].max().item())
if done[batch_ex]:
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, score in zip(next_words[batch_ex], next_scores[batch_ex]):
# get beam and word IDs
beam_id = idx // vocab_size
word_id = idx % vocab_size
# end of sentence, or next word
if word_id.item() in eos_token_ids or cur_len + 1 == max_length:
generated_hyps[batch_ex].add(
input_ids[batch_ex * num_beams + beam_id, :cur_len].clone(), score.item()
)
else:
next_sent_beam.append((score, word_id, batch_ex * num_beams + beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# update next beam content
if cur_len + 1 == max_length:
assert len(next_sent_beam) == 0
else:
assert len(next_sent_beam) == num_beams
if len(next_sent_beam) == 0:
next_sent_beam = [(0, pad_token_id, 0)] * num_beams # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_ex + 1)
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_words.unsqueeze(1)], dim=-1)
# re-order internal states
if past:
reordered_past = []
for layer_past in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` and `mems` is at 1st position
reordered_layer_past = [layer_past[i].unsqueeze(0).clone().detach() for i in beam_idx]
reordered_layer_past = torch.cat(reordered_layer_past, dim=0)
# check that shape matches
assert reordered_layer_past.shape == layer_past.shape
reordered_past.append(reordered_layer_past)
past = tuple(reordered_past)
# update current length
cur_len = cur_len + 1
# stop when we are done with each sentence
if all(done):
break
# visualize hypotheses
# print([len(x) for x in generated_hyps], cur_len)
# globals().update( locals() );
# !import code; code.interact(local=vars())
# for ii in range(batch_size):
# for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True):
# print("%.3f " % ss + " ".join(self.dico[x] for x in ww.tolist()))
# print("")
# select the best hypotheses
tgt_len = torch.ones(batch_size, num_keep_best, dtype=torch.long)
logprobs = torch.zeros(batch_size, num_keep_best,
dtype=torch.float).fill_(-1e5).to(input_ids.device)
all_best = []
for i, hypotheses in enumerate(generated_hyps):
best = []
hyp_scores = torch.tensor([x[0] for x in hypotheses.hyp])
_, best_indices = torch.topk(hyp_scores,
min(num_keep_best, len(hyp_scores)), largest=True)
for best_idx, hyp_idx in enumerate(best_indices):
conf, best_hyp = hypotheses.hyp[hyp_idx]
best.append(best_hyp)
logprobs[i, best_idx] = conf
tgt_len[i, best_idx] = len(best_hyp) + 1 # +1 for the <EOS> symbol
all_best.append(best)
# generate target batch, pad to the same length
decoded = input_ids.new(batch_size, num_keep_best, max_length).fill_(pad_token_id)
for batch_idx, best in enumerate(all_best):
for best_idx, hypo in enumerate(best):
decoded[batch_idx, best_idx, : tgt_len[batch_idx, best_idx] - 1] = hypo
decoded[batch_idx, best_idx, tgt_len[batch_idx, best_idx] - 1] = eos_token_ids[0]
return decoded, logprobs
def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
class BeamHypotheses(object):
def __init__(self, n_hyp, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
elif self.early_stopping:
return True
else:
return self.worst_score >= best_sum_logprobs / self.max_length ** self.length_penalty
| 42,974 | 48.453395 | 302 | py |
DaVinci | DaVinci-main/models/model_linearprobe.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from models.davinci_pretrain import DaVinci
from torch import nn
class DaVinciLinearProbe(nn.Module):
def __init__(self,
encoder = None,
text_decoder = None,
tokenizer = None,
config = None,
n_labels = None,
):
super().__init__()
self.last_hidden_id_shift = 1
self.tokenizer = tokenizer
self.davinci = DaVinci(encoder, text_decoder, tokenizer, config, init_deit=False, init_dalle=True)
emb_dim = self.davinci.config_decoder.hidden_size
self.fc = nn.Linear(emb_dim * 3, n_labels)
def forward(self, image, text=None, train=True):
dummy_text = self.tokenizer([""] * image.size(0), return_tensors='pt').to(image.device)
text_inputs = self.tokenizer(["a picture of "]*image.size(0), return_tensors="pt").to(image.device)
last_state_ids = text_inputs.attention_mask.sum(1) - self.last_hidden_id_shift
hidden_states = self.davinci(image,
dummy_text,
text_inputs,
last_state_ids = last_state_ids,
imagenet=True,
train=train, decode=False)
logits = self.fc(hidden_states)
return logits
| 1,617 | 43.944444 | 114 | py |
DaVinci | DaVinci-main/models/model_imageft.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from models.davinci_pretrain import DaVinci
from torch import nn
class DaVinciImageFT(nn.Module):
def __init__(self,
encoder = None,
text_decoder = None,
tokenizer = None,
config = None,
):
super().__init__()
self.last_hidden_id_shift = 1
self.tokenizer = tokenizer
self.davinci = DaVinci(encoder, text_decoder, tokenizer, config, init_deit=False, init_dalle=True, imagenet=True)
emb_dim = self.davinci.config_decoder.hidden_size
self.fc = nn.Sequential(
nn.Linear(3*emb_dim, emb_dim),
nn.ReLU(),
nn.Linear(emb_dim, 1000)
)
def forward(self, image, text=None, train=True):
dummy_text = self.tokenizer([""] * image.size(0), return_tensors='pt').to(image.device)
text_inputs = self.tokenizer(["a picture of "]*image.size(0), return_tensors="pt").to(image.device)
last_state_ids = text_inputs.attention_mask.sum(1) - self.last_hidden_id_shift
hidden_states = self.davinci(image,
dummy_text,
text_inputs,
last_state_ids = last_state_ids,
imagenet=True,
train=train, decode=False)
logits = self.fc(hidden_states)
return logits
| 1,713 | 44.105263 | 121 | py |
DaVinci | DaVinci-main/models/dalle_utils.py | # --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import io
import os
import math
import time
import json
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
from .modeling_discrete_vae import Dalle_VAE, DiscreteVAE
from .dalle_pytorch import VQGanVAE # OpenAIDiscreteVAE, DiscreteVAE, DALLE
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
if model_ema is not None:
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
# torch.amp
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if hasattr(args, 'model_ema') and args.model_ema:
_load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
# deepspeed, only support '--auto_resume'.
if args.auto_resume:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt)
print("Auto resume checkpoint: %d" % latest_ckpt)
_, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt)
args.start_epoch = client_states['epoch'] + 1
if model_ema is not None:
if args.model_ema:
_load_checkpoint_for_ema(model_ema, client_states['model_ema'])
def create_d_vae(weight_path, d_vae_type, image_size, device):
if d_vae_type == "dall-e":
return get_dalle_vae(weight_path, image_size, device)
elif d_vae_type == "vqgan":
return get_vqgan_vae(weight_path, device)
elif d_vae_type == "customized":
return get_d_vae(weight_path, image_size, device)
else:
raise NotImplementedError()
def get_dalle_vae(weight_path, image_size, device):
vae = Dalle_VAE(image_size)
vae.load_model(model_dir=weight_path, device=device)
return vae
def get_vqgan_vae(weight_path, device):
vae = VQGanVAE(weight_path+"/vqgan.1024.model.ckpt", weight_path+"/vqgan.1024.config.yml").to(device)
return vae
def get_d_vae(weight_path, image_size, device):
NUM_TOKENS = 8192
NUM_LAYERS = 3
EMB_DIM = 512
HID_DIM = 256
state_dict = torch.load(os.path.join(weight_path, "pytorch_model.bin"), map_location="cpu")["weights"]
model = DiscreteVAE(
image_size=image_size,
num_layers=NUM_LAYERS,
num_tokens=NUM_TOKENS,
codebook_dim=EMB_DIM,
hidden_dim=HID_DIM,
).to(device)
model.load_state_dict(state_dict)
return model
def create_ds_config(args):
args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json")
with open(args.deepspeed_config, mode="w") as writer:
ds_config = {
"train_batch_size": args.batch_size * args.update_freq * get_world_size(),
"train_micro_batch_size_per_gpu": args.batch_size,
"steps_per_print": 1000,
"optimizer": {
"type": "Adam",
"adam_w_mode": True,
"params": {
"lr": args.lr,
"weight_decay": args.weight_decay,
"bias_correction": True,
"betas": [
0.9,
0.999
],
"eps": 1e-8
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 7,
"loss_scale_window": 128
}
}
writer.write(json.dumps(ds_config, indent=2))
| 19,190 | 33.829401 | 128 | py |
DaVinci | DaVinci-main/models/model_glue.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from models.xbert import BertConfig
from models.davinci_pretrain import DaVinci
from torch import nn
import torch.nn.functional as F
class DaVinciGLUE(nn.Module):
def __init__(self,
encoder = None,
text_decoder = None,
tokenizer = None,
config = None,
num_labels = None,
):
super().__init__()
self.last_hidden_id_shift = config['last_hidden_id_shift']
self.tokenizer = tokenizer
self.davinci = DaVinci(encoder, text_decoder, tokenizer, config, init_deit=False, init_dalle=True)
bert_config = BertConfig.from_json_file(config['bert_config'])
self.num_labels = num_labels
self.cls_head = nn.Sequential(
nn.Linear(bert_config.hidden_size, bert_config.hidden_size),
nn.ReLU(),
nn.Linear(bert_config.hidden_size, num_labels)
)
def forward(self, text, targets, alpha=0, train=True):
last_state_ids = text.attention_mask.sum(1) - self.last_hidden_id_shift
output = self.davinci(image=None,
context=text,
gen_text=text,
last_state_ids = last_state_ids,
is_ve = True,
train=train, decode=False)
prediction = self.cls_head(output)
if train:
if self.num_labels == 1:
loss = F.mse_loss(prediction.squeeze(), targets.squeeze())
else:
loss = F.cross_entropy(prediction, targets)
return loss
else:
return prediction
| 1,967 | 39.163265 | 114 | py |
DaVinci | DaVinci-main/models/modeling_discrete_vae.py | # --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on OpenAI DALL-E and lucidrains' DALLE-pytorch code bases
# https://github.com/openai/DALL-E
# https://github.com/lucidrains/DALLE-pytorch
# --------------------------------------------------------'
from math import sqrt
import os
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
class BasicVAE(nn.Module):
def get_codebook_indices(self, images):
raise NotImplementedError()
def decode(self, img_seq):
raise NotImplementedError()
def get_codebook_probs(self, img_seq):
raise NotImplementedError()
def get_image_tokens_size(self):
pass
def get_image_size(self):
pass
class ResBlock(nn.Module):
def __init__(self, chan_in, hidden_size, chan_out):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan_in, hidden_size, 3, padding=1),
nn.ReLU(),
nn.Conv2d(hidden_size, hidden_size, 3, padding=1),
nn.ReLU(),
nn.Conv2d(hidden_size, chan_out, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(BasicVAE):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.
):
super().__init__()
# assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
enc_layers = []
dec_layers = []
enc_in = channels
dec_in = codebook_dim
for layer_id in range(num_layers):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, hidden_dim, 4, stride=2, padding=1), nn.ReLU()))
enc_layers.append(ResBlock(chan_in=hidden_dim, hidden_size=hidden_dim, chan_out=hidden_dim))
enc_in = hidden_dim
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, hidden_dim, 4, stride=2, padding=1), nn.ReLU()))
dec_layers.append(ResBlock(chan_in=hidden_dim, hidden_size=hidden_dim, chan_out=hidden_dim))
dec_in = hidden_dim
enc_layers.append(nn.Conv2d(hidden_dim, num_tokens, 1))
dec_layers.append(nn.Conv2d(hidden_dim, channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
def get_image_size(self):
return self.image_size
def get_image_tokens_size(self):
return self.image_size // 8
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self.forward(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1)
return codebook_indices
@torch.no_grad()
@eval_decorator
def get_codebook_probs(self, images):
logits = self.forward(images, return_logits = True)
return nn.Softmax(dim=1)(logits)
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
qy = F.softmax(logits, dim = -1)
log_qy = torch.log(qy + 1e-10)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
from models.dall_e import load_model
class Dalle_VAE(BasicVAE):
def __init__(self, image_size):
super().__init__()
self.encoder = None
self.decoder = None
self.image_size = image_size
def load_model(self, model_dir, device):
self.encoder = load_model(os.path.join(model_dir, "encoder.pkl"), device)
self.decoder = load_model(os.path.join(model_dir, "decoder.pkl"), device)
def decode(self, img_seq):
bsz = img_seq.size()[0]
img_seq = img_seq.view(bsz, self.image_size // 8, self.image_size // 8) # [10, 16, 16]
z = F.one_hot(img_seq, num_classes=self.encoder.vocab_size).permute(0, 3, 1, 2).float() # [10, 8192, 16, 16]
return self.decoder(z).float()
def get_codebook_indices(self, images):
z_logits = self.encoder(images)
return torch.argmax(z_logits, axis=1)
def get_codebook_probs(self, images):
z_logits = self.encoder(images)
return nn.Softmax(dim=1)(z_logits)
def forward(self, img_seq_prob, no_process=False):
if no_process:
return self.decoder(img_seq_prob.float()).float()
else:
bsz, seq_len, num_class = img_seq_prob.size()
z = img_seq_prob.view(bsz, self.image_size // 8, self.image_size // 8, self.encoder.vocab_size)
return self.decoder(z.permute(0, 3, 1, 2).float()).float()
| 7,573 | 30.823529 | 130 | py |
DaVinci | DaVinci-main/models/model_image_sampling.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from models.xbert import BertConfig, BertModelImage
from models.bert import BertLMHeadModel
from models.resnet import resnet101emb, resnet101
import torch
from torch import nn
import utils
import torchvision
import torch.nn.functional as F
import numpy as np
from transformers import (
LogitsProcessorList,
MinLengthLogitsProcessor,
BeamSearchScorer,
)
import torch
# from dalle_pytorch import OpenAIDiscreteVAE, DALLE
import models.dalle_utils as dalle_utils
import torchvision.transforms as T
import os
from models.dall_e.utils import unmap_pixels
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
LogitsProcessorList,
MinLengthLogitsProcessor,
BeamSearchScorer,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
StoppingCriteriaList,
MaxLengthCriteria
)
from PIL import Image
from torchvision import transforms
import clip
loader = transforms.Compose([transforms.ToTensor()])
unloader = transforms.ToPILImage()
def tensor2pil_img(src_img):
assert isinstance(src_img, torch.Tensor), 'the img type is {}, but torch.Tensor expected'.format(type(src_img))
image = src_img.cpu().clone()
image = image.squeeze(0)
image = unloader(image)
return image
def exists(val):
return val is not None
def save_img(original_image, original_small_image, reconstructed_image, generated_image, d_vae_type, raw_caption, captionindex, imageround, image_per_round, clip_model, clip_preprocess):
save_path = f"/opt/tiger/seq2seq_vlm/seq2seq_vlm_new/dalle_images"
if not os.path.exists(save_path):
os.makedirs(save_path)
if d_vae_type == 'dall-e': # for vqvae
reconstructed_image = unmap_pixels(torch.sigmoid(reconstructed_image[:, :3]))
generated_image = unmap_pixels(torch.sigmoid(generated_image[:, :3]))
bsz = generated_image.shape[0]
image2score = {}
for i in range(bsz):
image = clip_preprocess(tensor2pil_img(generated_image[i])).unsqueeze(0).to(generated_image.device)
text = clip.tokenize(raw_caption[i]).to(generated_image.device)
with torch.no_grad():
logits_per_image, logits_per_text = clip_model(image, text)
image2score[i] = float(logits_per_image)
sorted_image2score = sorted(image2score.items(),key = lambda x:x[1],reverse = True)
for ranking, (imageindex, clipscore) in enumerate(sorted_image2score):
if ranking > 0: break
torchvision.utils.save_image(
generated_image[imageindex].cpu().data,
f"{save_path}/generate_gpu{utils.get_rank()}_captionindex{captionindex}_ranking{ranking}_clipscore{clipscore}_{raw_caption[ranking]}.png",
normalize=True,
nrow=1,
# range=(-0.5, 0.5),
)
class DaVinci(nn.Module):
def __init__(self,
encoder = None,
text_decoder = None,
tokenizer = None,
config = None,
init_deit = True,
init_dalle = False,
device = "cuda",
):
super().__init__()
self.num_beams = config["num_beams"]
self.max_length = config["max_length"]
self.min_length = 10
self.temperature = config["temperature"]
self.length_penalty = config["length_penalty"]
self.early_stopping=config["early_stopping"]
self.num_return_sequences=config["num_return_sequences"]
self.repetition_penalty = config["repetition_penalty"]
self.top_k = config["top_k"]
self.top_p = config["top_p"]
self.tokenizer = tokenizer
self.IMG_BOS = 2
self.num_images = config["num_images"]
self.image_per_round = config["image_per_round"]
num_patches = int((config["image_res"] / 16) ** 2)
self.visual_encoder = resnet101emb(embed_dim=1024, num_patches=num_patches, drop_rate=0.0)
self.clip_model, self.clip_preprocess = clip.load("ViT-B/32", device=device)
if init_deit:
pretrained_model = resnet101(pretrained=True)
model_dict = self.visual_encoder.state_dict()
pretrained_dict = {k: v for k, v in pretrained_model.state_dict().items() if k in model_dict}
model_dict.update(pretrained_dict)
msg = self.visual_encoder.load_state_dict(model_dict)
print(msg)
config_encoder = BertConfig.from_json_file(config['bert_config'])
config_encoder.vocab_size += config_encoder.visual_vocab_size
if config["init_encoder"]:
self.encoder = BertModelImage.from_pretrained(encoder, config=config_encoder, add_pooling_layer=False)
else:
self.encoder = BertModelImage(config=config_encoder, add_pooling_layer=False)
vision_width = config['vision_width']
emb_dim = config_encoder.hidden_size
self.vision_proj = nn.Linear(vision_width, emb_dim)
self.config_decoder = BertConfig.from_json_file(config['bert_config'])
self.config_decoder.is_decoder=True
self.config_decoder.add_cross_attention=True
self.config_decoder.is_encoder_decoder=True
if init_dalle:
self.config_decoder.vocab_size += self.config_decoder.visual_vocab_size
self.d_vae = dalle_utils.create_d_vae(
weight_path=config["discrete_vae_weight_path"], d_vae_type=config["discrete_vae_type"],
device=device, image_size=config["second_input_size"])
self.d_vae_type = config["discrete_vae_type"]
if config["init_decoder"]:
self.text_decoder = BertLMHeadModel.from_pretrained(text_decoder, config=self.config_decoder)
else:
self.text_decoder = BertLMHeadModel(config=self.config_decoder)
self.text_decoder.cls.predictions.decoder.weight = self.encoder.embeddings.word_embeddings.weight
self.text_decoder.bert.embeddings.word_embeddings.weight = self.encoder.embeddings.word_embeddings.weight
def forward(self, image, context, gen_text=None, last_state_ids=None, train=True, decode=False, num_keep_best=1, do_sample=False, text_full=None, prefix_image=None, suffix_image=None, prefix_image_small=None, visual_token_image=None, use_dalle=False, raw_caption=None, captionindex=None, *args, **kwargs):
if use_dalle:
# producing text embeddings for full caption
vae_context_attns = text_full.attention_mask #[20, 15] [bsz, text_seq_len]
if prefix_image is not None:
prefix_image_embeds = self.visual_encoder(prefix_image) # prefix_image [20, 3, 128, 256] prefix_image_embeds [20, 129, 1024]
prefix_image_embeds = self.vision_proj(prefix_image_embeds) # [20, 257, 768] 257 = 256 + 1 (CLS) [20, 129, 768]
prefix_image_atts = torch.ones(((prefix_image_embeds.shape[0], prefix_image_embeds.shape[1])), device=prefix_image_embeds.device) # [20, 129]
vae_encoder_attns = torch.cat([prefix_image_atts, vae_context_attns], dim=1) #[20, 272] [bsz, image_seq_length + text_seq_length] -> [20, 144]
else:
prefix_image_embeds = None
vae_encoder_attns = vae_context_attns
vae_encoder_output = self.encoder(text_full.input_ids, # [22, 19]
input_v_embs = prefix_image_embeds, # None
attention_mask=vae_encoder_attns, # [22, 19]
return_dict = True,
prefix_image = True,) # text_emb [20, 272, 768]
masked_image_ids = self.d_vae.get_codebook_indices(suffix_image).flatten(1) # visual tokens / labels [bsz, 256]
# for imageround in range(int(self.num_images/self.image_per_round)):
if True:
BSZ = text_full.attention_mask.shape[0] # batch_size 32
num_beams = self.num_beams
# define decoder start token ids
input_ids = torch.ones((num_beams * BSZ, 1), device=self.text_decoder.device, dtype=torch.long) # [32,1]
input_ids = input_ids * self.IMG_BOS # <img_bos>
# add encoder_outputs to model keyword arguments
model_kwargs = {
"encoder_outputs": vae_encoder_output,
"encoder_hidden_states": vae_encoder_output.last_hidden_state.repeat_interleave(num_beams, dim=0) # [12*4, 258, 768]
}
is_greedy_gen_mode = False
is_sample_gen_mode = True
is_beam_sample_gen_mode = False
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=self.max_length)])
# instantiate logits processors
logits_processor = LogitsProcessorList([
MinLengthLogitsProcessor(self.min_length, eos_token_id=self.tokenizer.eos_token_id),
])
if is_greedy_gen_mode:
if self.num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {self.num_return_sequences} when doing greedy search."
)
# greedy search
outputs = self.text_decoder.greedy_search(
input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
# pad_token_id=pad_token_id,
# eos_token_id=eos_token_id,
# output_scores=output_scores,
# return_dict_in_generate=return_dict_in_generate,
# synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_sample_gen_mode:
# instantiate logits processors
logits_warper = LogitsProcessorList([
TemperatureLogitsWarper(self.temperature),
TopKLogitsWarper(top_k=self.top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1)),
# TopPLogitsWarper(top_p=self.top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))
])
# sample
outputs = self.text_decoder.sample(
input_ids,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
# pad_token_id=pad_token_id,
# eos_token_id=eos_token_id,
# output_scores=output_scores,
# return_dict_in_generate=return_dict_in_generate,
# synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_beam_sample_gen_mode:
# instantiate beam scorer
beam_scorer = BeamSearchScorer(
batch_size=BSZ,
num_beams=num_beams,
device=self.text_decoder.device,
length_penalty=self.length_penalty,
do_early_stopping=self.early_stopping,
num_beam_hyps_to_keep=self.num_return_sequences,
)
outputs = self.text_decoder.beam_search(input_ids,
beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
max_length=self.max_length,
**model_kwargs) # outputs: tensor[2,7] [bsz, length]
# ---------------generate image---------------
offsetted_visual_tokens = outputs[:, 1:] # [bsz, 256]
# offsetted_visual_tokens = offsetted_samples[:, gen_text.input_ids.shape[1]-1:] # [10, 127]
visual_tokens = offsetted_visual_tokens - self.config_decoder.text_vocab_size # [bsz. 256]
generated_image_ids = visual_tokens
if generated_image_ids.min() < 0:
print("Error, skip")
return torch.tensor(0), torch.tensor(0)
reconstructed_image = None
generated_image = self.d_vae.decode(generated_image_ids)
save_img(image, suffix_image, reconstructed_image, generated_image, self.d_vae_type, raw_caption, captionindex, None, self.image_per_round, self.clip_model, self.clip_preprocess)
return torch.tensor(0), torch.tensor(0)
# return loss, loss_image_generation, logits
if last_state_ids is not None:
# we need to extract hidden state of the last text id for downstream tasks
return self.task_forward(gen_text.input_ids, encoder_states, encoder_attns, last_state_ids, gen_text.attention_mask)
if not decode:
return self.decode_forward(gen_text.input_ids, encoder_states, encoder_attns, gen_text.attention_mask, train, *args, **kwargs)
else:
BSZ = encoder_states.shape[0] # batch_size 12
num_beams = self.num_beams
# define decoder start token ids
input_ids = torch.ones((num_beams * BSZ, 1), device=self.text_decoder.device, dtype=torch.long)
input_ids = input_ids * self.tokenizer.bos_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {
"encoder_outputs": encoder_output,
"encoder_hidden_states": encoder_states.repeat_interleave(num_beams, dim=0) # [12*4, 258, 768]
}
# instantiate beam scorer
beam_scorer = BeamSearchScorer(
batch_size=BSZ,
num_beams=num_beams,
device=self.text_decoder.device,
length_penalty=self.length_penalty,
do_early_stopping=self.early_stopping,
num_beam_hyps_to_keep=self.num_return_sequences,
)
# instantiate logits processors
logits_processor = LogitsProcessorList([
MinLengthLogitsProcessor(5, eos_token_id=self.tokenizer.eos_token_id),
])
outputs = self.text_decoder.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, max_length=self.max_length, **model_kwargs) # outputs: tensor[2,7] [bsz, length]
return outputs
def task_forward(self, input_ids, encoder_states, encoder_atts, last_state_ids, attention_mask=None):
gen_text_output = self.text_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
return_dict = True,
)
decoder_states = gen_text_output.hidden_states
last_states = decoder_states[range(len(last_state_ids)), last_state_ids]
return last_states
def decode_forward(self, input_ids, encoder_states, encoder_atts, attention_mask=None, train=True):
if not train:
gen_text_output = self.text_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
return_dict = True,
)
return gen_text_output.logits
else:
gen_text_targets = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
gen_text_output = self.text_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
labels = gen_text_targets,
return_dict = True
)
loss = gen_text_output.loss.mean()
logits = gen_text_output.logits
return loss, logits
def decode_visual_forward(self, input_ids, encoder_states, encoder_atts, attention_mask=None, train=True):
if not train:
gen_text_output = self.visual_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
return_dict = True,
)
return gen_text_output.logits
else:
gen_text_targets = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
gen_text_output = self.visual_decoder(input_ids,
attention_mask = attention_mask,
encoder_hidden_states = encoder_states,
encoder_attention_mask = encoder_atts,
labels = gen_text_targets,
return_dict = True
)
loss = gen_text_output.loss.mean()
logits = gen_text_output.logits
return loss, logits
def generate(self, input_ids, encoder_states, encoder_atts, num_keep_best=1, do_sample=False):
self.num_keep_best = num_keep_best
batch_size = encoder_states.shape[0]
if input_ids is None:
input_ids = torch.full(
(batch_size, 1), self.tokenizer.bos_token_id, dtype=torch.long, device=encoder_states.device
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
assert input_ids.shape[0] == batch_size, "Input batch size must match image features"
cur_len = input_ids.shape[1]
num_expand = self.num_beams
# input_ids = self._expand_for_beams(input_ids, num_expand)
encoder_states = self._expand_for_beams(encoder_states, num_expand)
encoder_atts = self._expand_for_beams(encoder_atts, num_expand)
if self.num_beams > 1:
output = self._generate_beam_search(
input_ids,
encoder_states,
encoder_atts,
cur_len,
self.max_length,
do_sample,
self.temperature,
self.top_k,
self.top_p,
self.repetition_penalty,
self.tokenizer.pad_token_id,
self.tokenizer.eos_token_id,
batch_size,
self.length_penalty,
self.num_beams,
self.tokenizer.vocab_size,
)
else:
output = self._generate_no_beam_search(
input_ids,
encoder_states,
encoder_atts,
cur_len,
self.max_length,
do_sample,
self.temperature,
self.top_k,
self.top_p,
self.repetition_penalty,
self.tokenizer.pad_token_id,
self.tokenizer.eos_token_id,
batch_size,
)
return output
def _expand_for_beams(self, x, num_expand):
if x is None or num_expand == 1:
return x
input_shape = list(x.shape)
expanded_shape = input_shape[:1] + [num_expand] + input_shape[1:]
x = x.unsqueeze(1).expand(expanded_shape)
# (batch_size * num_expand, ...)
x = x.contiguous().view([input_shape[0] * num_expand] + input_shape[1:])
return x
def prepare_inputs_for_generation(self, curr_ids, **kwargs):
# do not consider past history here, as we use a separate decoder
mask_token_id = self.tokenizer.mask_token_id
batch_size = curr_ids.shape[0]
mask_ids = torch.full(
(batch_size, 1), mask_token_id, dtype=torch.long, device=curr_ids.device
)
input_ids = torch.cat([curr_ids, mask_ids], dim=1)
# other params are default, like attention_mask
return {"input_ids": input_ids}
def _generate_no_beam_search(
self,
input_ids,
encoder_states,
encoder_atts,
cur_len,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
pad_token_id,
eos_token_ids,
batch_size,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
if type(eos_token_ids) != list:
eos_token_ids = [eos_token_ids]
assert self.num_keep_best == 1, 'cannot generate >1 sentences in greedy search'
# current position / max lengths / length of generated sentences / unfinished sentences
unfinished_sents = []
cur_unfinished = input_ids.new(batch_size).fill_(1)
# log of scores for each sentence in the batch
logprobs = []
while cur_len < max_length:
# model_inputs = self.prepare_inputs_for_generation(input_ids)
logits = self.decode_forward(input_ids, encoder_states, encoder_atts, attention_mask=None, train=False)
next_token_idx = cur_len - 1
next_token_logits = logits[:, next_token_idx, :]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
for i in range(batch_size):
for previous_token in set(input_ids[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if next_token_logits[i, previous_token] < 0:
next_token_logits[i, previous_token] *= repetition_penalty
else:
next_token_logits[i, previous_token] /= repetition_penalty
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# Compute scores
_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size, vocab_size)
_scores = torch.gather(_scores, -1, next_token.unsqueeze(-1)) # (batch_size, 1)
logprobs.append(_scores) # (batch_size, 1)
unfinished_sents.append(cur_unfinished)
# update generations and finished sentences
tokens_to_add = next_token * cur_unfinished + pad_token_id * (1 - cur_unfinished)
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
#for t in input_ids:
#print(self.tokenizer.convert_ids_to_tokens(t.tolist()))
for eos_token_id in eos_token_ids:
cur_unfinished = cur_unfinished.mul(tokens_to_add.ne(eos_token_id).long())
cur_len = cur_len + 1
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if cur_unfinished.max() == 0:
break
# add eos_token_ids to unfinished sentences
if cur_len == max_length:
input_ids[:, -1].masked_fill_(cur_unfinished.to(dtype=torch.bool), eos_token_ids[0])
logprobs = torch.cat(logprobs, dim=1)
unfinished_sents = torch.stack(unfinished_sents, dim=1).float()
sum_logprobs = (logprobs * unfinished_sents).sum(dim=1)
# return logprobs to keep consistent with beam search output
logprobs = sum_logprobs / unfinished_sents.sum(dim=1)
# pad to the same length, otherwise DataParallel will give error
pad_len = max_length - input_ids.shape[1]
if pad_len > 0:
padding_ids = input_ids.new(batch_size, pad_len).fill_(pad_token_id)
input_ids = torch.cat([input_ids, padding_ids], dim=1)
# (batch_size, n_best, max_len), (batch_size, n_best)
return input_ids.unsqueeze(1), logprobs.unsqueeze(1)
def _generate_beam_search(
self,
input_ids,
encoder_states,
encoder_atts,
cur_len,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
pad_token_id,
eos_token_ids,
batch_size,
length_penalty,
num_beams,
vocab_size,
):
""" Generate sequences for each example with beam search.
"""
if type(eos_token_ids) != list:
eos_token_ids = [eos_token_ids]
# Expand input to num beams
input_ids = input_ids.unsqueeze(1).expand(batch_size, num_beams, cur_len)
input_ids = input_ids.contiguous().view(batch_size * num_beams, cur_len) # (batch_size * num_beams, cur_len)
# generated hypotheses
num_keep_best = self.num_keep_best
generated_hyps = [
BeamHypotheses(num_keep_best, max_length, length_penalty, early_stopping=False) for _ in range(batch_size)
]
# NOTE: Expand >1 words to leave some spare tokens to keep the
# beam size, because some sentences may end here and cannot expand
# in the next level
TOPN_PER_BEAM = 2
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = None
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
logits = self.decode_forward(input_ids, encoder_states, encoder_atts, attention_mask=None, train=False)
next_token_idx = cur_len - 1
scores = logits[:, next_token_idx, :]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
for i in range(batch_size * num_beams):
for previous_token in set(input_ids[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if scores[i, previous_token] < 0:
scores[i, previous_token] *= repetition_penalty
else:
scores[i, previous_token] /= repetition_penalty
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
scores = top_k_top_p_filtering(
scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# Sample [TOPN_PER_BEAM] next words for each beam (so we have some spare tokens and match output of greedy beam search)
next_words = torch.multinomial(F.softmax(scores, dim=-1),
num_samples=TOPN_PER_BEAM) # (batch_size * num_beams, TOPN_PER_BEAM)
# Compute next scores
_scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
_scores = torch.gather(_scores, -1, next_words) # (batch_size * num_beams, TOPN_PER_BEAM)
next_scores = _scores + beam_scores[:, None].expand_as(_scores) # (batch_size * num_beams, TOPN_PER_BEAM)
# Match shape of greedy beam search
beam_indices = torch.arange(num_beams) * vocab_size
beam_indices = beam_indices.repeat(batch_size, TOPN_PER_BEAM).to(next_words.device)
next_words = next_words.view(batch_size, TOPN_PER_BEAM * num_beams) # (batch_size, TOPN_PER_BEAM * num_beams)
next_words = next_words + beam_indices
next_scores = next_scores.view(batch_size, TOPN_PER_BEAM * num_beams) # (batch_size, TOPN_PER_BEAM * num_beams)
else:
# do greedy beam search
scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
assert scores.size() == (batch_size * num_beams, vocab_size)
# Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
_scores = _scores.view(batch_size, num_beams * vocab_size) # (batch_size, num_beams * vocab_size)
next_scores, next_words = torch.topk(_scores, TOPN_PER_BEAM * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_words.size() == (batch_size, TOPN_PER_BEAM * num_beams)
# next batch beam content
# list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch)
next_batch_beam = []
# for each sentence
for batch_ex in range(batch_size):
# if we are done with this sentence
done[batch_ex] = done[batch_ex] or generated_hyps[batch_ex].is_done(next_scores[batch_ex].max().item())
if done[batch_ex]:
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, score in zip(next_words[batch_ex], next_scores[batch_ex]):
# get beam and word IDs
beam_id = idx // vocab_size
word_id = idx % vocab_size
# end of sentence, or next word
if word_id.item() in eos_token_ids or cur_len + 1 == max_length:
generated_hyps[batch_ex].add(
input_ids[batch_ex * num_beams + beam_id, :cur_len].clone(), score.item()
)
else:
next_sent_beam.append((score, word_id, batch_ex * num_beams + beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# update next beam content
if cur_len + 1 == max_length:
assert len(next_sent_beam) == 0
else:
assert len(next_sent_beam) == num_beams
if len(next_sent_beam) == 0:
next_sent_beam = [(0, pad_token_id, 0)] * num_beams # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_ex + 1)
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_words.unsqueeze(1)], dim=-1)
# re-order internal states
if past:
reordered_past = []
for layer_past in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` and `mems` is at 1st position
reordered_layer_past = [layer_past[i].unsqueeze(0).clone().detach() for i in beam_idx]
reordered_layer_past = torch.cat(reordered_layer_past, dim=0)
# check that shape matches
assert reordered_layer_past.shape == layer_past.shape
reordered_past.append(reordered_layer_past)
past = tuple(reordered_past)
# update current length
cur_len = cur_len + 1
# stop when we are done with each sentence
if all(done):
break
# visualize hypotheses
# print([len(x) for x in generated_hyps], cur_len)
# globals().update( locals() );
# !import code; code.interact(local=vars())
# for ii in range(batch_size):
# for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True):
# print("%.3f " % ss + " ".join(self.dico[x] for x in ww.tolist()))
# print("")
# select the best hypotheses
tgt_len = torch.ones(batch_size, num_keep_best, dtype=torch.long)
logprobs = torch.zeros(batch_size, num_keep_best,
dtype=torch.float).fill_(-1e5).to(input_ids.device)
all_best = []
for i, hypotheses in enumerate(generated_hyps):
best = []
hyp_scores = torch.tensor([x[0] for x in hypotheses.hyp])
_, best_indices = torch.topk(hyp_scores,
min(num_keep_best, len(hyp_scores)), largest=True)
for best_idx, hyp_idx in enumerate(best_indices):
conf, best_hyp = hypotheses.hyp[hyp_idx]
best.append(best_hyp)
logprobs[i, best_idx] = conf
tgt_len[i, best_idx] = len(best_hyp) + 1 # +1 for the <EOS> symbol
all_best.append(best)
# generate target batch, pad to the same length
decoded = input_ids.new(batch_size, num_keep_best, max_length).fill_(pad_token_id)
for batch_idx, best in enumerate(all_best):
for best_idx, hypo in enumerate(best):
decoded[batch_idx, best_idx, : tgt_len[batch_idx, best_idx] - 1] = hypo
decoded[batch_idx, best_idx, tgt_len[batch_idx, best_idx] - 1] = eos_token_ids[0]
return decoded, logprobs
def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
class BeamHypotheses(object):
def __init__(self, n_hyp, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
elif self.early_stopping:
return True
else:
return self.worst_score >= best_sum_logprobs / self.max_length ** self.length_penalty
| 39,775 | 46.522103 | 309 | py |
DaVinci | DaVinci-main/models/model_ve.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from models.xbert import BertConfig, BertModel
from models.davinci_pretrain import DaVinci
from torch import nn
import torch.nn.functional as F
class DaVinciVE(nn.Module):
def __init__(self,
encoder = None,
text_decoder = None,
tokenizer = None,
config = None,
):
super().__init__()
self.last_hidden_id_shift = config['last_hidden_id_shift']
self.tokenizer = tokenizer
self.davinci = DaVinci(encoder, text_decoder, tokenizer, config, init_deit=False, init_dalle=True)
bert_config = BertConfig.from_json_file(config['bert_config'])
self.cls_head = nn.Sequential(
nn.Linear(bert_config.hidden_size, bert_config.hidden_size),
nn.ReLU(),
nn.Linear(bert_config.hidden_size, 3)
)
def forward(self, image, text, targets, alpha=0, train=True):
dummy_input = self.tokenizer([""] * image.size(0), return_tensors='pt').to(image.device)
last_state_ids = text.attention_mask.sum(1) - self.last_hidden_id_shift
output = self.davinci(image,
dummy_input,
text,
last_state_ids = last_state_ids,
is_ve = True,
train=train, decode=False)
prediction = self.cls_head(output)
if train:
loss = F.cross_entropy(prediction, targets)
return loss
else:
return prediction
| 1,853 | 39.304348 | 114 | py |
DaVinci | DaVinci-main/models/dall_e/utils.py | import attr
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
logit_laplace_eps: float = 0.1
@attr.s(eq=False)
class Conv2d(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1)
kw: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 2 == 1)
use_float16: bool = attr.ib(default=True)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
w = torch.empty((self.n_out, self.n_in, self.kw, self.kw), dtype=torch.float32,
device=self.device, requires_grad=self.requires_grad)
w.normal_(std=1 / math.sqrt(self.n_in * self.kw ** 2))
b = torch.zeros((self.n_out,), dtype=torch.float32, device=self.device,
requires_grad=self.requires_grad)
self.w, self.b = nn.Parameter(w), nn.Parameter(b)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.use_float16 and 'cuda' in self.w.device.type:
if x.dtype != torch.float16:
x = x.half()
w, b = self.w.half(), self.b.half()
else:
if x.dtype != torch.float32:
x = x.float()
w, b = self.w, self.b
return F.conv2d(x, w, b, padding=(self.kw - 1) // 2)
def map_pixels(x: torch.Tensor) -> torch.Tensor:
if x.dtype != torch.float:
raise ValueError('expected input to have type float')
return (1 - 2 * logit_laplace_eps) * x + logit_laplace_eps
def unmap_pixels(x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError('expected input to be 4d')
if x.dtype != torch.float:
raise ValueError('expected input to have type float')
return torch.clamp((x - logit_laplace_eps) / (1 - 2 * logit_laplace_eps), 0, 1)
| 1,771 | 29.551724 | 81 | py |
DaVinci | DaVinci-main/models/dall_e/encoder.py | import attr
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from functools import partial
from models.dall_e.utils import Conv2d
@attr.s(eq=False, repr=False)
class EncoderBlock(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 4 ==0)
n_layers: int = attr.ib(validator=lambda i, a, x: x >= 1)
device: torch.device = attr.ib(default=None)
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_hid = self.n_out // 4
self.post_gain = 1 / (self.n_layers ** 2)
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
self.id_path = make_conv(self.n_in, self.n_out, 1) if self.n_in != self.n_out else nn.Identity()
self.res_path = nn.Sequential(OrderedDict([
('relu_1', nn.ReLU()),
('conv_1', make_conv(self.n_in, self.n_hid, 3)),
('relu_2', nn.ReLU()),
('conv_2', make_conv(self.n_hid, self.n_hid, 3)),
('relu_3', nn.ReLU()),
('conv_3', make_conv(self.n_hid, self.n_hid, 3)),
('relu_4', nn.ReLU()),
('conv_4', make_conv(self.n_hid, self.n_out, 1)),]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
@attr.s(eq=False, repr=False)
class Encoder(nn.Module):
group_count: int = 4
n_hid: int = attr.ib(default=256, validator=lambda i, a, x: x >= 64)
n_blk_per_group: int = attr.ib(default=2, validator=lambda i, a, x: x >= 1)
input_channels: int = attr.ib(default=3, validator=lambda i, a, x: x >= 1)
vocab_size: int = attr.ib(default=8192, validator=lambda i, a, x: x >= 512)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
use_mixed_precision: bool = attr.ib(default=True)
def __attrs_post_init__(self) -> None:
super().__init__()
blk_range = range(self.n_blk_per_group)
n_layers = self.group_count * self.n_blk_per_group
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
make_blk = partial(EncoderBlock, n_layers=n_layers, device=self.device,
requires_grad=self.requires_grad)
self.blocks = nn.Sequential(OrderedDict([
('input', make_conv(self.input_channels, 1 * self.n_hid, 7)),
('group_1', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(1 * self.n_hid, 1 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_2', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(1 * self.n_hid if i == 0 else 2 * self.n_hid, 2 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_3', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(2 * self.n_hid if i == 0 else 4 * self.n_hid, 4 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_4', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(4 * self.n_hid if i == 0 else 8 * self.n_hid, 8 * self.n_hid)) for i in blk_range],
]))),
('output', nn.Sequential(OrderedDict([
('relu', nn.ReLU()),
('conv', make_conv(8 * self.n_hid, self.vocab_size, 1, use_float16=False)),
]))),
]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError(f'input shape {x.shape} is not 4d')
if x.shape[1] != self.input_channels:
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.input_channels}')
if x.dtype != torch.float32:
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
| 3,782 | 39.244681 | 117 | py |
DaVinci | DaVinci-main/models/dall_e/decoder.py | import attr
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from functools import partial
from models.dall_e.utils import Conv2d
@attr.s(eq=False, repr=False)
class DecoderBlock(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 4 ==0)
n_layers: int = attr.ib(validator=lambda i, a, x: x >= 1)
device: torch.device = attr.ib(default=None)
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_hid = self.n_out // 4
self.post_gain = 1 / (self.n_layers ** 2)
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
self.id_path = make_conv(self.n_in, self.n_out, 1) if self.n_in != self.n_out else nn.Identity()
self.res_path = nn.Sequential(OrderedDict([
('relu_1', nn.ReLU()),
('conv_1', make_conv(self.n_in, self.n_hid, 1)),
('relu_2', nn.ReLU()),
('conv_2', make_conv(self.n_hid, self.n_hid, 3)),
('relu_3', nn.ReLU()),
('conv_3', make_conv(self.n_hid, self.n_hid, 3)),
('relu_4', nn.ReLU()),
('conv_4', make_conv(self.n_hid, self.n_out, 3)),]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
@attr.s(eq=False, repr=False)
class Decoder(nn.Module):
group_count: int = 4
n_init: int = attr.ib(default=128, validator=lambda i, a, x: x >= 8)
n_hid: int = attr.ib(default=256, validator=lambda i, a, x: x >= 64)
n_blk_per_group: int = attr.ib(default=2, validator=lambda i, a, x: x >= 1)
output_channels: int = attr.ib(default=3, validator=lambda i, a, x: x >= 1)
vocab_size: int = attr.ib(default=8192, validator=lambda i, a, x: x >= 512)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
use_mixed_precision: bool = attr.ib(default=True)
def __attrs_post_init__(self) -> None:
super().__init__()
blk_range = range(self.n_blk_per_group)
n_layers = self.group_count * self.n_blk_per_group
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
make_blk = partial(DecoderBlock, n_layers=n_layers, device=self.device,
requires_grad=self.requires_grad)
self.blocks = nn.Sequential(OrderedDict([
('input', make_conv(self.vocab_size, self.n_init, 1, use_float16=False)),
('group_1', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(self.n_init if i == 0 else 8 * self.n_hid, 8 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_2', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(8 * self.n_hid if i == 0 else 4 * self.n_hid, 4 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_3', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(4 * self.n_hid if i == 0 else 2 * self.n_hid, 2 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_4', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(2 * self.n_hid if i == 0 else 1 * self.n_hid, 1 * self.n_hid)) for i in blk_range],
]))),
('output', nn.Sequential(OrderedDict([
('relu', nn.ReLU()),
('conv', make_conv(1 * self.n_hid, 2 * self.output_channels, 1)),
]))),
]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError(f'input shape {x.shape} is not 4d')
if x.shape[1] != self.vocab_size:
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.vocab_size}')
if x.dtype != torch.float32:
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
| 3,943 | 40.515789 | 117 | py |
DaVinci | DaVinci-main/models/dall_e/__init__.py | import io, requests
import torch
import torch.nn as nn
from models.dall_e.encoder import Encoder
from models.dall_e.decoder import Decoder
from models.dall_e.utils import map_pixels, unmap_pixels
def load_model(path: str, device: torch.device = None) -> nn.Module:
if path.startswith('http://') or path.startswith('https://'):
resp = requests.get(path)
resp.raise_for_status()
with io.BytesIO(resp.content) as buf:
return torch.load(buf, map_location=device)
else:
with open(path, 'rb') as f:
return torch.load(f, map_location=device)
| 616 | 31.473684 | 68 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/reversible.py | import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}):
super().__init__()
self.args_route = args_route
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).mean(dim=0)
| 5,390 | 33.120253 | 165 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/dalle_pytorch.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from models.dalle_pytorch import distributed_utils
from models.dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from models.dalle_pytorch.transformer import Transformer, DivideMax
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class always():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return self.val
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
def prob_mask_like(shape, prob, device):
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.,
normalization = ((0.5,) * 3, (0.5,) * 3)
):
super().__init__()
assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = normalization
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
img = self.norm(img)
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
num_visual_tokens = 512,
visual_enc_depth = 6,
visual_heads = 8,
visual_image_size = 256,
visual_patch_size = 32,
channels = 3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal = False, seq_len = text_seq_len, dim = dim_text, depth = text_enc_depth, heads = text_heads, rotary_emb = False)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal = False, seq_len = num_patches, dim = dim_image, depth = visual_enc_depth, heads = visual_heads, rotary_emb = False)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device = device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens = 10000,
text_seq_len = 256,
depth,
heads = 8,
dim_head = 64,
reversible = False,
attn_dropout = 0.,
ff_dropout = 0,
sparse_attn = False,
attn_types = None,
loss_img_weight = 7,
stable = False,
sandwich_norm = False,
shift_tokens = True,
rotary_emb = True
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = vae.num_tokens
image_fmap_size = (vae.image_size // (2 ** vae.num_layers))
image_seq_len = image_fmap_size ** 2
num_text_tokens = num_text_tokens + text_seq_len # reserve unique padding tokens for each position (text seq len)
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) if not rotary_emb else always(0) # +1 for <bos>
self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape = (image_fmap_size, image_fmap_size)) if not rotary_emb else always(0)
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
set_requires_grad(self.vae, False) # freeze VAE from being trained
self.transformer = Transformer(
dim = dim,
causal = True,
seq_len = seq_len,
depth = depth,
heads = heads,
dim_head = dim_head,
reversible = reversible,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
attn_types = attn_types,
image_fmap_size = image_fmap_size,
sparse_attn = sparse_attn,
stable = stable,
sandwich_norm = sandwich_norm,
shift_tokens = shift_tokens,
rotary_emb = rotary_emb
)
self.stable = stable
if stable:
self.norm_by_max = DivideMax(dim = -1)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask, persistent=False)
self.loss_img_weight = loss_img_weight
@torch.no_grad()
@eval_decorator
def generate_texts(
self,
tokenizer,
text = None,
*,
filter_thres = 0.5,
temperature = 1.
):
text_seq_len = self.text_seq_len
if text is None or text == "":
text_tokens = torch.tensor([[0]]).cuda()
else:
text_tokens = torch.tensor(tokenizer.tokenizer.encode(text)).cuda().unsqueeze(0)
for _ in range(text_tokens.shape[1], text_seq_len):
device = text_tokens.device
tokens = self.text_emb(text_tokens)
tokens += self.text_pos_emb(torch.arange(text_tokens.shape[1], device = device))
seq_len = tokens.shape[1]
output_transf = self.transformer(tokens)
if self.stable:
output_transf = self.norm_by_max(output_transf)
logits = self.to_logits(output_transf)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
text_tokens = torch.cat((text_tokens, sample[:, None]), dim=-1)
padding_tokens = set(np.arange(self.text_seq_len) + (self.num_text_tokens - self.text_seq_len))
texts = [tokenizer.tokenizer.decode(text_token, pad_tokens=padding_tokens) for text_token in text_tokens]
return text_tokens, texts
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip = None,
filter_thres = 0.5,
temperature = 1.,
img = None,
num_init_img_tokens = None,
cond_scale = 1.
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
text = text[:, :text_seq_len] # make sure text is within bounds
out = text
if exists(img):
image_size = vae.image_size
assert img.shape[1] == 3 and img.shape[2] == image_size and img.shape[3] == image_size, f'input image must have the correct image size {image_size}'
indices = vae.get_codebook_indices(img)
num_img_tokens = default(num_init_img_tokens, int(0.4375 * image_seq_len)) # OpenAI used 14 * 32 initial tokens to prime
assert num_img_tokens < image_seq_len, 'number of initial image tokens for priming must be less than the total image token sequence length'
indices = indices[:, :num_img_tokens]
out = torch.cat((out, indices), dim = -1)
for cur_len in range(out.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self(text, image)
if cond_scale != 1:
# discovery by Katherine Crowson
# https://twitter.com/RiversHaveWings/status/1478093658716966912
null_cond_logits = self(text, image, null_cond_prob = 1.)
logits = null_cond_logits + (logits - null_cond_logits) * cond_scale
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sample -= (num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample[:, None]), dim=-1)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss = False)
return images, scores
return images
def forward(
self,
text,
image = None,
return_loss = False,
null_cond_prob = 0.
):
assert text.shape[-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
batch, device, total_seq_len = text.shape[0], text.device, self.total_seq_len
# randomly remove text condition with <null_cond_prob> probability
if null_cond_prob > 0:
null_mask = prob_mask_like((batch,), null_cond_prob, device = device)
text *= rearrange(~null_mask, 'b -> b 1')
# make sure padding in text tokens get unique padding token id
text_range = torch.arange(self.text_seq_len, device = device) + (self.num_text_tokens - self.text_seq_len)
text = torch.where(text == 0, text_range, text)
# add <bos>
text = F.pad(text, (1, 0), value = 0)
tokens = self.text_emb(text)
tokens += self.text_pos_emb(torch.arange(text.shape[1], device = device))
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 4
if is_raw_image:
image_size = self.vae.image_size
assert tuple(image.shape[1:]) == (3, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = self.vae.get_codebook_indices(image)
image_len = image.shape[1]
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(image_emb)
tokens = torch.cat((tokens, image_emb), dim = 1)
seq_len += image_len
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
if self.stable:
alpha = 0.1
tokens = tokens * alpha + tokens.detach() * (1 - alpha)
out = self.transformer(tokens)
if self.stable:
out = self.norm_by_max(out)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim = 1)
logits = rearrange(logits, 'b n c -> b c n')
loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len])
loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:])
loss = (loss_text + self.loss_img_weight * loss_img) / (self.loss_img_weight + 1)
return loss
| 21,183 | 33.501629 | 170 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/vae.py | import io
import sys
import os
import requests
import PIL
import warnings
import hashlib
import urllib
import yaml
from pathlib import Path
from tqdm import tqdm
from math import sqrt, log
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel, GumbelVQ
import importlib
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from models.dalle_pytorch import distributed_utils
# constants
CACHE_PATH = os.path.expanduser("~/.cache/dalle")
OPENAI_VAE_ENCODER_PATH = 'https://cdn.openai.com/dall-e/encoder.pkl'
OPENAI_VAE_DECODER_PATH = 'https://cdn.openai.com/dall-e/decoder.pkl'
VQGAN_VAE_PATH = 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1'
VQGAN_VAE_CONFIG_PATH = 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1'
# helpers methods
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location = torch.device('cpu'))
def map_pixels(x, eps = 0.1):
return (1 - 2 * eps) * x + eps
def unmap_pixels(x, eps = 0.1):
return torch.clamp((x - eps) / (1 - 2 * eps), 0, 1)
def download(url, filename = None, root = CACHE_PATH):
if (
not distributed_utils.is_distributed
or distributed_utils.backend.is_local_root_worker()
):
os.makedirs(root, exist_ok = True)
filename = default(filename, os.path.basename(url))
download_target = os.path.join(root, filename)
download_target_tmp = os.path.join(root, f'tmp.{filename}')
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if (
distributed_utils.is_distributed
and not distributed_utils.backend.is_local_root_worker()
and not os.path.isfile(download_target)
):
# If the file doesn't exist yet, wait until it's downloaded by the root worker.
distributed_utils.backend.local_barrier()
if os.path.isfile(download_target):
return download_target
with urllib.request.urlopen(url) as source, open(download_target_tmp, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
os.rename(download_target_tmp, download_target)
if (
distributed_utils.is_distributed
and distributed_utils.backend.is_local_root_worker()
):
distributed_utils.backend.local_barrier()
return download_target
def make_contiguous(module):
with torch.no_grad():
for param in module.parameters():
param.set_(param.contiguous())
# pretrained Discrete VAE from OpenAI
class OpenAIDiscreteVAE(nn.Module):
def __init__(self):
super().__init__()
self.enc = load_model(download(OPENAI_VAE_ENCODER_PATH))
self.dec = load_model(download(OPENAI_VAE_DECODER_PATH))
make_contiguous(self)
self.num_layers = 3
self.image_size = 256
self.num_tokens = 8192
@torch.no_grad()
def get_codebook_indices(self, img):
img = map_pixels(img)
z_logits = self.enc.blocks(img)
z = torch.argmax(z_logits, dim = 1)
return rearrange(z, 'b h w -> b (h w)')
def decode(self, img_seq):
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(sqrt(n)))
z = F.one_hot(img_seq, num_classes = self.num_tokens)
z = rearrange(z, 'b h w c -> b c h w').float()
x_stats = self.dec(z).float()
x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))
return x_rec
def forward(self, img):
raise NotImplemented
# VQGAN from Taming Transformers paper
# https://arxiv.org/abs/2012.09841
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
class VQGanVAE(nn.Module):
def __init__(self, vqgan_model_path=None, vqgan_config_path=None):
super().__init__()
if vqgan_model_path is None:
model_filename = 'vqgan.1024.model.ckpt'
config_filename = 'vqgan.1024.config.yml'
download(VQGAN_VAE_CONFIG_PATH, config_filename)
download(VQGAN_VAE_PATH, model_filename)
config_path = str(Path(CACHE_PATH) / config_filename)
model_path = str(Path(CACHE_PATH) / model_filename)
else:
model_path = vqgan_model_path
config_path = vqgan_config_path
config = OmegaConf.load(config_path)
model = instantiate_from_config(config["model"])
state = torch.load(model_path, map_location = 'cpu')['state_dict']
model.load_state_dict(state, strict = False)
print(f"Loaded VQGAN from {model_path} and {config_path}")
self.model = model
# f as used in https://github.com/CompVis/taming-transformers#overview-of-pretrained-models
f = config.model.params.ddconfig.resolution / config.model.params.ddconfig.attn_resolutions[0]
self.num_layers = int(log(f)/log(2))
self.image_size = 256
self.num_tokens = config.model.params.n_embed
self.is_gumbel = isinstance(self.model, GumbelVQ)
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(
self, self.model.quantize.embed.weight if self.is_gumbel else self.model.quantize.embedding.weight)
@torch.no_grad()
def get_codebook_indices(self, img):
b = img.shape[0]
img = (2 * img) - 1
_, _, [_, _, indices] = self.model.encode(img)
if self.is_gumbel:
return rearrange(indices, 'b h w -> b (h w)', b=b)
return rearrange(indices, '(b n) -> b n', b = b)
def decode(self, img_seq):
b, n = img_seq.shape
one_hot_indices = F.one_hot(img_seq, num_classes = self.num_tokens).float()
z = one_hot_indices @ self.model.quantize.embed.weight if self.is_gumbel \
else (one_hot_indices @ self.model.quantize.embedding.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h = int(sqrt(n)))
img = self.model.decode(z)
img = (img.clamp(-1., 1.) + 1) * 0.5
return img
def forward(self, img):
raise NotImplemented
| 7,302 | 32.045249 | 111 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/distributed_utils.py | """
Utility functions for optional distributed execution.
To use,
1. set the `BACKENDS` to the ones you want to make available,
2. in the script, wrap the argument parser with `wrap_arg_parser`,
3. in the script, set and use the backend by calling
`set_backend_from_args`.
You can check whether a backend is in use with the `using_backend`
function.
"""
from models.dalle_pytorch.distributed_backends import \
DeepSpeedBackend, \
DummyBackend, \
HorovodBackend
_DEFAULT_BACKEND = DummyBackend()
"""Which backend to use by default. Assumed to be _not_ distributed."""
BACKENDS = [
_DEFAULT_BACKEND,
DeepSpeedBackend(),
HorovodBackend(),
]
is_distributed = None
"""Whether we are distributed."""
backend = None
"""Backend in usage."""
def wrap_arg_parser(parser):
"""Add arguments to support optional distributed backend usage."""
parser.add_argument(
'--distributed_backend',
'--distr_backend',
type=str,
default=None,
help='which distributed backend to use. Do not distribute by default',
)
for distr_backend in BACKENDS:
parser = distr_backend.wrap_arg_parser(parser)
return parser
def set_backend_from_args(args):
"""Set and return the backend based on the given `args`."""
global is_distributed, backend
# Handle this specially for backwards compatibility.
if args.deepspeed:
args.distributed_backend = DeepSpeedBackend.BACKEND_NAME
if not args.distributed_backend:
is_distributed = False
backend = _DEFAULT_BACKEND
return backend
backend_name = args.distributed_backend.lower()
for distr_backend in BACKENDS:
if distr_backend.BACKEND_NAME.lower() == backend_name:
backend = distr_backend
if not backend.has_backend():
raise ModuleNotFoundError(
f'{backend.BACKEND_NAME} backend selected but '
'module not available'
)
print(f'Using {backend.BACKEND_NAME} for distributed execution')
is_distributed = True
return backend
raise ValueError(
'unknown backend; please check `distributed_utils.BACKENDS`')
def require_set_backend():
"""Raise an `AssertionError` when the backend has not been set."""
assert backend is not None, (
'distributed backend is not set. Please call '
'`distributed_utils.set_backend_from_args` at the start of your script'
)
def using_backend(test_backend):
"""Return whether the backend is set to `test_backend`.
`test_backend` may be a string of the name of the backend or
its class.
"""
require_set_backend()
if isinstance(test_backend, str):
return backend.BACKEND_NAME == test_backend
return isinstance(backend, test_backend)
| 2,846 | 28.350515 | 79 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/transformer.py | from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from models.dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from models.dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
from rotary_embedding_torch import RotaryEmbedding, broadcat
from g_mlp_pytorch import gMLPBlock
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
if isinstance(val, list):
val = tuple(val)
return val if isinstance(val, tuple) else (val,) * depth
# classes
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# token shift classes
class PreShiftToken(nn.Module):
def __init__(self, fn, image_size, seq_len):
super().__init__()
self.fn = fn
self.image_size = image_size
self.seq_len = seq_len
def forward(self, x, **kwargs):
n = x.shape[1]
seq_len, image_size = self.seq_len, self.image_size
img_seq_len = image_size ** 2
text_len = seq_len - img_seq_len + 1
padding = seq_len - n + 1
# get text and image tokens
x_text, x_img = x[:, :text_len], x[:, text_len:]
x_img = F.pad(x_img, (0, 0, 0, padding))
x_img = rearrange(x_img, 'b (h w) d -> b h w d', h = image_size)
# shift 1 from the left for text tokens
x_text_shift, x_text_pass = x_text.chunk(2, dim = -1)
x_text_shift = F.pad(x_text_shift, (0, 0, 1, -1))
x_text = torch.cat((x_text_shift, x_text_pass), dim = -1)
# shift from top, left for image tokens
x_img_shift_top, x_img_shift_left, *x_img_pass = x_img.chunk(4, dim = -1)
x_img_shift_left = F.pad(x_img_shift_left, (0, 0, 1, -1))
x_img_shift_top = F.pad(x_img_shift_top, (0, 0, 0, 0, 1, -1))
x_img = torch.cat((x_img_shift_top, x_img_shift_left, *x_img_pass), dim = -1)
# merge text and image sequence back together
x_img = rearrange(x_img, 'b h w d -> b (h w) d')
x = torch.cat((x_text, x_img[:, :-padding]), dim = 1)
return self.fn(x, **kwargs)
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
attn_types = None,
image_fmap_size = None,
sparse_attn = False,
stable = False,
sandwich_norm = False,
shift_tokens = False,
rotary_emb = True
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
attn_types = default(attn_types, ('full',))
attn_types = cast_tuple(attn_types)
attn_type_layer = islice(cycle(attn_types), depth)
for ind, sparse_attn, attn_type in zip(range(depth), sparse_layer, attn_type_layer):
if attn_type == 'full':
attn_class = partial(Attention, stable = stable)
elif attn_type == 'sparse':
attn_class = SparseAttention
elif attn_type == 'axial_row':
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 0, image_size = image_fmap_size, stable = stable)
elif attn_type == 'axial_col':
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 1, image_size = image_fmap_size, stable = stable)
elif attn_type == 'conv_like':
attn_class = partial(SparseConvCausalAttention, seq_len = seq_len, image_size = image_fmap_size, stable = stable)
elif attn_type == 'mlp':
attn_class = partial(gMLPBlock, seq_len = seq_len)
else:
raise ValueError(f'attention type "{attn_type}" is not valid')
if attn_type != 'mlp':
attn = attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
else:
attn = attn_class(dim = dim, causal = causal, dim_ff = dim * 4)
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
if shift_tokens:
attn, ff = map(lambda t: PreShiftToken(t, image_size = image_fmap_size, seq_len = seq_len), (attn, ff))
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {'mask': route_attn, 'rotary_pos_emb': route_attn}
self.layers = execute_type(layers, args_route = attn_route_map)
# generate positional embeddings for rotary
pos_emb = None
if rotary_emb:
assert 'mlp' not in attn_types, 'you cannot use gMLPs if rotary embedding is turned on'
rot_dim = dim_head // 3
img_seq_len = (image_fmap_size ** 2)
text_len = seq_len - img_seq_len + 1
text_pos_emb = RotaryEmbedding(dim = rot_dim)
img_axial_pos_emb = RotaryEmbedding(dim = rot_dim, freqs_for = 'pixel')
text_freqs = text_pos_emb(torch.arange(text_len))
img_to_text_freqs = text_pos_emb(torch.full((img_seq_len,), 8192)) # image is given a position far away from text
text_freqs = torch.cat((text_freqs, img_to_text_freqs), dim = 0)
img_freqs_axial = img_axial_pos_emb(torch.linspace(-1, 1, steps = image_fmap_size))
img_freqs = broadcat((rearrange(img_freqs_axial, 'i d -> i () d'), rearrange(img_freqs_axial, 'j d -> () j d')), dim = -1)
img_freqs = rearrange(img_freqs, 'h w d -> (h w) d')
text_axial_freqs = img_axial_pos_emb(torch.full((text_len,), -10.)) # text is given a position of -10 apart from the image axial positions, which is from range [-1, 1]
text_axial_freqs = torch.cat((text_axial_freqs, text_axial_freqs), dim = -1)
img_freqs = torch.cat((text_axial_freqs, img_freqs), dim = 0)
pos_emb = torch.cat((text_freqs, img_freqs), dim = -1)
pos_emb = rearrange(pos_emb, 'n d -> () n d')
self.register_buffer('pos_emb', pos_emb)
def forward(self, x, **kwargs):
return self.layers(x, rotary_pos_emb = self.pos_emb, **kwargs)
| 8,295 | 34.758621 | 180 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/tokenizer.py | # take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
# to give users a quick easy start to training DALL-E without doing BPE
import torch
import youtokentome as yttm
from tokenizers import Tokenizer
from tokenizers.processors import ByteLevel
from transformers import BertTokenizer
import html
import os
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
# OpenAI simple tokenizer
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.vocab_size = 49408
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens, remove_start_end = True, pad_tokens = {}):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
if remove_start_end:
tokens = [token for token in tokens if token not in (49406, 40407, 0)]
text = ''.join([self.decoder[token] for token in tokens if token not in pad_tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
tokenizer = SimpleTokenizer()
# huggingface tokenizer
class HugTokenizer:
def __init__(self, bpe_path = None):
bpe_path = Path(bpe_path)
assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
tokenizer = Tokenizer.from_file(str(bpe_path))
tokenizer.post_processor = ByteLevel(trim_offsets = True)
self.tokenizer = tokenizer
self.vocab_size = tokenizer.get_vocab_size()
def decode(self, tokens, pad_tokens = {}):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
ignore_ids = pad_tokens.union({0})
tokens = [token for token in tokens if token not in ignore_ids]
return self.tokenizer.decode(tokens, skip_special_tokens = True)
def encode(self, text):
return self.tokenizer.encode(text).ids
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
# chinese tokenizer
class ChineseTokenizer:
def __init__(self):
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size
def decode(self, tokens, pad_tokens = {}):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
ignore_ids = pad_tokens.union({0})
tokens = [token for token in tokens if token not in ignore_ids]
return self.tokenizer.decode(tokens)
def encode(self, text):
return torch.tensor(self.tokenizer.encode(text, add_special_tokens = False))
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
# yttm tokenizer
class YttmTokenizer:
def __init__(self, bpe_path = None):
bpe_path = Path(bpe_path)
assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
tokenizer = yttm.BPE(model = str(bpe_path))
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size()
def decode(self, tokens, pad_tokens = {}):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
return self.tokenizer.decode(tokens, ignore_ids = pad_tokens.union({0}))
def encode(self, texts):
encoded = self.tokenizer.encode(texts, output_type = yttm.OutputType.ID)
return list(map(torch.tensor, encoded))
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = self.encode(texts)
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 9,420 | 34.284644 | 120 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/__init__.py | from models.dalle_pytorch.dalle_pytorch import DALLE, CLIP, DiscreteVAE
from models.dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from pkg_resources import get_distribution
# __version__ = get_distribution('dalle_pytorch').version
| 239 | 39 | 71 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/attention.py | from inspect import isfunction
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from rotary_embedding_torch import apply_rotary_emb
# helpers
def exists(val):
return val is not None
def uniq(arr):
return{el: True for el in arr}.keys()
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def stable_softmax(t, dim = -1, alpha = 32 ** 2):
t = t / alpha
t = t - torch.amax(t, dim = dim, keepdim = True).detach()
return (t * alpha).softmax(dim = dim)
def apply_pos_emb(pos_emb, qkv):
n = qkv[0].shape[-2]
pos_emb = pos_emb[..., :n, :]
return tuple(map(lambda t: apply_rotary_emb(pos_emb, t), qkv))
# classes
class Attention(nn.Module):
def __init__(self, dim, seq_len, causal = True, heads = 8, dim_head = 64, dropout = 0., stable = False):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.seq_len = seq_len
self.scale = dim_head ** -0.5
self.stable = stable
self.causal = causal
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, device = *x.shape, self.heads, x.device
softmax = torch.softmax if not self.stable else stable_softmax
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
q = q * self.scale
dots = torch.einsum('b h i d, b h j d -> b h i j', q, k)
mask_value = max_neg_value(dots)
if exists(mask):
mask = rearrange(mask, 'b j -> b () () j')
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal:
i, j = dots.shape[-2:]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots.masked_fill_(mask, mask_value)
attn = softmax(dots, dim=-1)
out = torch.einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
# sparse attention with convolutional pattern, as mentioned in the blog post. customizable kernel size and dilation
class SparseConvCausalAttention(nn.Module):
def __init__(self, dim, seq_len, image_size = 32, kernel_size = 5, dilation = 1, heads = 8, dim_head = 64, dropout = 0., stable = False, **kwargs):
super().__init__()
assert kernel_size % 2 == 1, 'kernel size must be odd'
inner_dim = dim_head * heads
self.seq_len = seq_len
self.heads = heads
self.scale = dim_head ** -0.5
self.image_size = image_size
self.kernel_size = kernel_size
self.dilation = dilation
self.stable = stable
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, img_size, kernel_size, dilation, seq_len, device = *x.shape, self.heads, self.image_size, self.kernel_size, self.dilation, self.seq_len, x.device
softmax = torch.softmax if not self.stable else stable_softmax
img_seq_len = img_size ** 2
text_len = seq_len + 1 - img_seq_len
# padding
padding = seq_len - n + 1
mask = default(mask, lambda: torch.ones(b, text_len, device = device).bool())
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = mask[:, :text_len]
# derive query / keys / values
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
q *= self.scale
((q_text, q_img), (k_text, k_img), (v_text, v_img)) = map(lambda t: (t[:, :-img_seq_len], t[:, -img_seq_len:]), (q, k, v))
# text attention
dots_text = einsum('b i d, b j d -> b i j', q_text, k_text)
mask_value = max_neg_value(dots_text)
i, j = dots_text.shape[-2:]
text_causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots_text.masked_fill_(text_causal_mask, mask_value)
attn_text = softmax(dots_text, dim = -1)
out_text = einsum('b i j, b j d -> b i d', attn_text, v_text)
# image attention
effective_kernel_size = (kernel_size - 1) * dilation + 1
padding = effective_kernel_size // 2
k_img, v_img = map(lambda t: rearrange(t, 'b (h w) c -> b c h w', h = img_size), (k_img, v_img))
k_img, v_img = map(lambda t: F.unfold(t, kernel_size, padding = padding, dilation = dilation), (k_img, v_img))
k_img, v_img = map(lambda t: rearrange(t, 'b (d j) i -> b i j d', j = kernel_size ** 2), (k_img, v_img))
# let image attend to all of text
dots_image = einsum('b i d, b i j d -> b i j', q_img, k_img)
dots_image_to_text = einsum('b i d, b j d -> b i j', q_img, k_text)
# calculate causal attention for local convolution
i, j = dots_image.shape[-2:]
img_seq = torch.arange(img_seq_len, device = device)
k_img_indices = rearrange(img_seq.float(), '(h w) -> () () h w', h = img_size)
k_img_indices = F.pad(k_img_indices, (padding,) * 4, value = img_seq_len) # padding set to be max, so it is never attended to
k_img_indices = F.unfold(k_img_indices, kernel_size, dilation = dilation)
k_img_indices = rearrange(k_img_indices, 'b j i -> b i j')
# mask image attention
q_img_indices = rearrange(img_seq, 'i -> () i ()')
causal_mask = q_img_indices < k_img_indices
# concat text mask with image causal mask
causal_mask = repeat(causal_mask, '() i j -> b i j', b = b * h)
mask = repeat(mask, 'b j -> (b h) i j', i = i, h = h)
mask = torch.cat((~mask, causal_mask), dim = -1)
# image can attend to all of text
dots = torch.cat((dots_image_to_text, dots_image), dim = -1)
dots.masked_fill_(mask, mask_value)
attn = softmax(dots, dim = -1)
# aggregate
attn_image_to_text, attn_image = attn[..., :text_len], attn[..., text_len:]
out_image_to_image = einsum('b i j, b i j d -> b i d', attn_image, v_img)
out_image_to_text = einsum('b i j, b j d -> b i d', attn_image_to_text, v_text)
out_image = out_image_to_image + out_image_to_text
# combine attended values for both text and image
out = torch.cat((out_text, out_image), dim = 1)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
out = self.to_out(out)
return out[:, :n]
# sparse axial causal attention
class SparseAxialCausalAttention(nn.Module):
def __init__(self, dim, seq_len, image_size = 32, axis = 0, heads = 8, dim_head = 64, dropout = 0., stable = False, **kwargs):
super().__init__()
assert axis in {0, 1}, 'axis must be either 0 (along height) or 1 (along width)'
self.axis = axis
inner_dim = dim_head * heads
self.seq_len = seq_len
self.heads = heads
self.scale = dim_head ** -0.5
self.image_size = image_size
self.stable = stable
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, img_size, axis, seq_len, device = *x.shape, self.heads, self.image_size, self.axis, self.seq_len, x.device
softmax = torch.softmax if not self.stable else stable_softmax
img_seq_len = img_size ** 2
text_len = seq_len + 1 - img_seq_len
# padding
padding = seq_len - n + 1
mask = default(mask, lambda: torch.ones(b, text_len, device = device).bool())
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = mask[:, :text_len]
# derive queries / keys / values
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
q *= self.scale
((q_text, q_img), (k_text, k_img), (v_text, v_img)) = map(lambda t: (t[:, :-img_seq_len], t[:, -img_seq_len:]), (q, k, v))
# text attention
dots_text = einsum('b i d, b j d -> b i j', q_text, k_text)
mask_value = max_neg_value(dots_text)
i, j = dots_text.shape[-2:]
text_causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots_text.masked_fill_(text_causal_mask, mask_value)
attn_text = softmax(dots_text, dim = -1)
out_text = einsum('b i j, b j d -> b i d', attn_text, v_text)
# image attention
split_axis_einops = 'b (h w) c -> b h w c' if axis == 0 else 'b (h w) c -> b w h c'
merge_axis_einops = 'b x n d -> b (x n) d' if axis == 0 else 'b x n d -> b (n x) d'
# split out axis
q_img, k_img, v_img = map(lambda t: rearrange(t, split_axis_einops, h = img_size), (q_img, k_img, v_img))
# similarity
dots_image_to_image = einsum('b x i d, b x j d -> b x i j', q_img, k_img)
dots_image_to_text = einsum('b x i d, b j d -> b x i j', q_img, k_text)
dots = torch.cat((dots_image_to_text, dots_image_to_image), dim = -1)
# mask so image has full attention to text, but causal along axis
bh, x, i, j = dots.shape
causal_mask = torch.ones(i, img_size, device = device).triu_(img_size - i + 1).bool()
causal_mask = repeat(causal_mask, 'i j -> b x i j', b = bh, x = x)
mask = repeat(mask, 'b j -> (b h) x i j', h = h, x = x, i = i)
mask = torch.cat((~mask, causal_mask), dim = -1)
dots.masked_fill_(mask, mask_value)
# attention.
attn = softmax(dots, dim = -1)
# aggregate
attn_image_to_text, attn_image_to_image = attn[..., :text_len], attn[..., text_len:]
out_image_to_image = einsum('b x i j, b x j d -> b x i d', attn_image_to_image, v_img)
out_image_to_text = einsum('b x i j, b j d -> b x i d', attn_image_to_text, v_text)
out_image = out_image_to_image + out_image_to_text
# merge back axis
out_image = rearrange(out_image, merge_axis_einops, x = img_size)
# combine attended values for both text and image
out = torch.cat((out_text, out_image), dim = 1)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
out = self.to_out(out)
return out[:, :n]
# microsoft sparse attention CUDA kernel
class SparseAttention(Attention):
def __init__(
self,
*args,
block_size = 16,
text_seq_len = 256,
num_random_blocks = None,
**kwargs
):
super().__init__(*args, **kwargs)
from deepspeed.ops.sparse_attention import SparseSelfAttention, VariableSparsityConfig
self.block_size = block_size
num_random_blocks = default(num_random_blocks, self.seq_len // block_size // 4)
global_block_indices = list(range(ceil(text_seq_len / block_size)))
self.attn_fn = SparseSelfAttention(
sparsity_config = VariableSparsityConfig(
num_heads = self.heads,
block = self.block_size,
num_random_blocks = num_random_blocks,
global_block_indices = global_block_indices,
attention = 'unidirectional' if self.causal else 'bidirectional'
),
max_seq_length = self.seq_len,
attn_mask_mode = 'add'
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, device = *x.shape, self.heads, x.device
remainder = n % self.block_size
mask = default(mask, lambda: torch.ones(b, n, device = device).bool())
if remainder > 0:
padding = self.block_size - remainder
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = F.pad(mask, (0, padding), value = False)
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
key_pad_mask = None
if exists(mask):
key_pad_mask = ~mask
attn_mask = None
if self.causal:
i, j = q.shape[-2], k.shape[-2]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
attn_mask = torch.zeros(i, j, device = device).to(q)
mask_value = max_neg_value(q) / 2
attn_mask.masked_fill_(mask, mask_value)
out = self.attn_fn(q, k, v, attn_mask = attn_mask, key_padding_mask = key_pad_mask)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out[:, :n]
| 13,544 | 34.181818 | 165 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/loader.py | from pathlib import Path
from random import randint, choice
import PIL
from torch.utils.data import Dataset
from torchvision import transforms as T
class TextImageDataset(Dataset):
def __init__(self,
folder,
text_len=256,
image_size=128,
truncate_captions=False,
resize_ratio=0.75,
tokenizer=None,
shuffle=False
):
"""
@param folder: Folder containing images and text files matched by their paths' respective "stem"
@param truncate_captions: Rather than throw an exception, captions which are too long will be truncated.
"""
super().__init__()
self.shuffle = shuffle
path = Path(folder)
text_files = [*path.glob('**/*.txt')]
image_files = [
*path.glob('**/*.png'), *path.glob('**/*.jpg'),
*path.glob('**/*.jpeg'), *path.glob('**/*.bmp')
]
text_files = {text_file.stem: text_file for text_file in text_files}
image_files = {image_file.stem: image_file for image_file in image_files}
keys = (image_files.keys() & text_files.keys())
self.keys = list(keys)
self.text_files = {k: v for k, v in text_files.items() if k in keys}
self.image_files = {k: v for k, v in image_files.items() if k in keys}
self.text_len = text_len
self.truncate_captions = truncate_captions
self.resize_ratio = resize_ratio
self.tokenizer = tokenizer
self.image_transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB')
if img.mode != 'RGB' else img),
T.RandomResizedCrop(image_size,
scale=(self.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor()
])
def __len__(self):
return len(self.keys)
def random_sample(self):
return self.__getitem__(randint(0, self.__len__() - 1))
def sequential_sample(self, ind):
if ind >= self.__len__() - 1:
return self.__getitem__(0)
return self.__getitem__(ind + 1)
def skip_sample(self, ind):
if self.shuffle:
return self.random_sample()
return self.sequential_sample(ind=ind)
def __getitem__(self, ind):
key = self.keys[ind]
text_file = self.text_files[key]
image_file = self.image_files[key]
descriptions = text_file.read_text().split('\n')
descriptions = list(filter(lambda t: len(t) > 0, descriptions))
try:
description = choice(descriptions)
except IndexError as zero_captions_in_file_ex:
print(f"An exception occurred trying to load file {text_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
tokenized_text = self.tokenizer.tokenize(
description,
self.text_len,
truncate_text=self.truncate_captions
).squeeze(0)
try:
image_tensor = self.image_transform(PIL.Image.open(image_file))
except (PIL.UnidentifiedImageError, OSError) as corrupt_image_exceptions:
print(f"An exception occurred trying to load file {image_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
# Success
return tokenized_text, image_tensor
| 3,456 | 33.57 | 112 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/distributed_backends/deepspeed_backend.py | import json
import os
import torch
from .distributed_backend import DistributedBackend
class DeepSpeedBackend(DistributedBackend):
"""Distributed backend using the DeepSpeed engine."""
BACKEND_MODULE_NAME = 'deepspeed'
BACKEND_NAME = 'DeepSpeed'
def wrap_arg_parser(self, parser):
if not self.has_backend():
parser.add_argument(
'--deepspeed',
type=lambda _: False,
help=(
'whether to use DeepSpeed '
"(ignored since it's not available)"
),
)
else:
parser = self.backend_module.add_config_arguments(parser)
parser.add_argument(
'--local_rank',
type=int,
default=-1,
help='local rank passed from distributed launcher',
)
return parser
def _initialize(self):
self.backend_module.init_distributed()
if torch.cuda.is_available():
torch.cuda.set_device(self._get_local_rank())
@staticmethod
def _require_torch_distributed_init():
"""Raise an error when `torch.distributed` has not been
initialized yet.
"""
assert torch.distributed.is_initialized(), \
('`torch.distributed` is not initialized; please call '
'`DeepSpeedBackend.initialize` at the start of your script')
def _get_world_size(self):
self._require_torch_distributed_init()
return torch.distributed.get_world_size()
def _get_rank(self):
self._require_torch_distributed_init()
return torch.distributed.get_rank()
def _get_local_rank(self):
self._require_torch_distributed_init()
return int(os.environ['LOCAL_RANK'])
def _local_barrier(self):
self._require_torch_distributed_init()
torch.distributed.barrier()
def _check_args(self, args, optimizer, lr_scheduler, kwargs):
"""Return an appropriate optimizer and learning rate scheduler
after checking the values passed to `distribute`.
"""
self._check_argvs(args, optimizer, lr_scheduler, kwargs)
(optimizer, lr_scheduler) = self._check_config(
args, optimizer, lr_scheduler, kwargs)
return (optimizer, lr_scheduler)
def _check_argvs(self, args, optimizer, lr_scheduler, kwargs):
"""Apply several sanity checks to the given command
line arguments.
"""
has_json_config = (hasattr(args, 'deepspeed_config')
and args.deepspeed_config is not None)
has_dict_config = 'config_params' in kwargs
if (
# No config given
(not has_json_config and not has_dict_config)
# JSON config file does not exist
or (not has_dict_config
and not os.path.isfile(args.deepspeed_config))
):
# Let DeepSpeed handle these argument errors.
return
if not args.deepspeed:
print(
'WARNING: DeepSpeed backend was selected; setting '
'`args.deepspeed = True`'
)
args.deepspeed = True
if has_json_config and has_dict_config:
print(
'WARNING: DeepSpeed config was given as both JSON file and '
'Python dictionary. Python dictionary takes precedence.'
)
def _check_config(self, args, optimizer, lr_scheduler, kwargs):
"""Return an appropriate optimizer and learning rate scheduler
for the DeepSpeed configuration.
"""
if 'config_params' in kwargs:
config = kwargs['config_params']
else:
with open(args.deepspeed_config, 'r') as json_config_file:
config = json.load(json_config_file)
if 'optimizer' in config and optimizer is not None:
print(
'WARNING: Optimizer encountered in both DeepSpeed config and '
'keyword arguments. Optimizer in DeepSpeed config '
'takes precedence.'
)
optimizer = None
if 'scheduler' in config and lr_scheduler is not None:
print(
'WARNING: Learning rate scheduler encountered in both '
'DeepSpeed config and keyword arguments. Learning rate '
'scheduler in DeepSpeed config takes precedence.'
)
# For the LR scheduler, the JSON config already has
# precedence. We do this for forward compatibility.
lr_scheduler = None
return (optimizer, lr_scheduler)
def _distribute(
self,
args=None,
model=None,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
**kwargs,
):
"""Return a distributed model engine, optimizer, dataloader, and
learning rate scheduler. These are obtained by wrapping the
given values with the backend.
For the other or other possible arguments,
see `deepspeed.initialize`.
"""
(optimizer, lr_scheduler) = self._check_args(
args, optimizer, lr_scheduler, kwargs)
return self.backend_module.initialize(
args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
**kwargs,
)
def _average_all(self, tensor):
self._require_torch_distributed_init()
# We copy because modification happens in-place
averaged = tensor.detach().clone()
# We use `all_reduce` because it is better supported than `reduce`
torch.distributed.all_reduce(averaged, torch.distributed.ReduceOp.SUM)
return averaged / self.get_world_size()
| 5,987 | 33.813953 | 78 | py |
DaVinci | DaVinci-main/models/dalle_pytorch/distributed_backends/horovod_backend.py | import torch
from .distributed_backend import DistributedBackend
class HorovodBackend(DistributedBackend):
"""Distributed backend using Horovod."""
BACKEND_MODULE_NAME = 'horovod.torch'
BACKEND_NAME = 'Horovod'
def wrap_arg_parser(self, parser):
return parser
def check_batch_size(self, batch_size):
# Horovod uses the local batch size to determine the effective
# batch size.
pass
def _initialize(self):
self.backend_module.init()
if torch.cuda.is_available():
torch.cuda.set_device(self._get_local_rank())
def _get_world_size(self):
return self.backend_module.size()
def _get_rank(self):
return self.backend_module.rank()
def _get_local_rank(self):
return self.backend_module.local_rank()
def _local_barrier(self):
# Actually a global barrier but works for our purposes.
self.backend_module.join()
def _distribute(
self,
_args=None,
model=None,
optimizer=None,
_model_parameters=None,
training_data=None,
lr_scheduler=None,
**_kwargs,
):
optimizer = self.backend_module.DistributedOptimizer(optimizer)
self.backend_module.broadcast_parameters(
model.state_dict(), root_rank=self.ROOT_RANK)
self.backend_module.broadcast_optimizer_state(
optimizer, root_rank=self.ROOT_RANK)
return (model, optimizer, training_data, lr_scheduler)
def _average_all(self, tensor):
# Reduce op is average by default
averaged = self.backend_module.allreduce(tensor)
return averaged
| 1,703 | 27.881356 | 71 | py |
DaVinci | DaVinci-main/models/DALLE-pytorch/train_dalle.py | import argparse
from pathlib import Path
import time
from glob import glob
import os
import shutil
import torch
import wandb # Quit early if user doesn't have wandb installed.
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from dalle_pytorch import OpenAIDiscreteVAE, VQGanVAE, DiscreteVAE, DALLE
from dalle_pytorch import distributed_utils
from dalle_pytorch.loader import TextImageDataset
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, ChineseTokenizer, YttmTokenizer
# libraries needed for webdataset support
import webdataset as wds
from torchvision import transforms as T
from PIL import Image
from io import BytesIO
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--vae_path', type=str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type=str,
help='path to your partially trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--image_text_folder', type=str, required=True,
help='path to your folder of images and text for learning the DALL-E')
parser.add_argument('--wds', type = str, default='',
help = 'Comma separated list of WebDataset (1) image and (2) text column names. Must contain 2 values, e.g. img,cap.')
parser.add_argument('--truncate_captions', dest='truncate_captions', action='store_true',
help='Captions passed in which exceed the max token length will be truncated if this is set.')
parser.add_argument('--random_resize_crop_lower_ratio', dest='resize_ratio', type=float, default=0.75,
help='Random resized crop lower ratio')
parser.add_argument('--chinese', dest='chinese', action='store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--hug', dest='hug', action='store_true')
parser.add_argument('--bpe_path', type=str,
help='path to your BPE json file')
parser.add_argument('--dalle_output_file_name', type=str, default = "dalle",
help='output_file_name')
parser.add_argument('--fp16', action='store_true',
help='(experimental) - Enable DeepSpeed 16 bit precision. Reduces VRAM.')
parser.add_argument('--amp', action='store_true',
help='Apex "O1" automatic mixed precision. More stable than 16 bit precision. Can\'t be used in conjunction with deepspeed zero stages 1-3.')
parser.add_argument('--wandb_name', default='dalle_train_transformer',
help='Name W&B will use when saving results.\ne.g. `--wandb_name "coco2017-full-sparse"`')
parser.add_argument('--wandb_entity', default=None,
help='(optional) Name of W&B team/entity to log to.')
parser.add_argument('--stable_softmax', dest='stable_softmax', action='store_true',
help='Prevent values from becoming too large during softmax. Helps with stability in fp16 and Mixture of Quantization training.')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--flops_profiler', dest = 'flops_profiler', action='store_true', help = 'Exits after printing detailed flops/runtime analysis of forward/backward')
train_group.add_argument('--epochs', default = 20, type = int, help = 'Number of epochs')
train_group.add_argument('--save_every_n_steps', default = 1000, type = int, help = 'Save a checkpoint every n steps')
train_group.add_argument('--keep_n_checkpoints', default = None, type = int, help = '(Careful) Deletes old deepspeed checkpoints if there are more than n')
train_group.add_argument('--batch_size', default = 4, type = int, help = 'Batch size')
train_group.add_argument('--ga_steps', default = 1, type = int, help = 'Number of steps to accumulate gradients across per each iteration. DeepSpeed only.')
train_group.add_argument('--learning_rate', default = 3e-4, type = float, help = 'Learning rate')
train_group.add_argument('--clip_grad_norm', default = 0.5, type = float, help = 'Clip gradient norm')
train_group.add_argument('--lr_decay', dest = 'lr_decay', action = 'store_true')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--dim', default = 512, type = int, help = 'Model dimension')
model_group.add_argument('--text_seq_len', default = 256, type = int, help = 'Text sequence length')
model_group.add_argument('--depth', default = 2, type = int, help = 'Model depth')
model_group.add_argument('--heads', default = 8, type = int, help = 'Model number of heads')
model_group.add_argument('--dim_head', default = 64, type = int, help = 'Model head dimension')
train_group.add_argument('--ff_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
train_group.add_argument('--attn_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
model_group.add_argument('--reversible', dest = 'reversible', action='store_true')
model_group.add_argument('--loss_img_weight', default = 7, type = int, help = 'Image loss weight')
model_group.add_argument('--attn_types', default = 'full', type = str, help = 'comma separated list of attention types. attention type can be: full or sparse or axial_row or axial_col or conv_like.')
model_group.add_argument('--shift_tokens', help = 'Use the shift tokens feature', action = 'store_true')
model_group.add_argument('--rotary_emb', help = 'Use rotary embeddings', action = 'store_true')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
def get_trainable_params(model):
return [params for params in model.parameters() if params.requires_grad]
def get_pkg_version():
from pkg_resources import get_distribution
return get_distribution('dalle_pytorch').version
def cp_path_to_dir(cp_path, tag):
"""Convert a checkpoint path to a directory with `tag` inserted.
If `cp_path` is already a directory, return it unchanged.
"""
if not isinstance(cp_path, Path):
cp_path = Path(cp_path)
if cp_path.is_dir():
return cp_path
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = Path(f'{path_sans_extension}-{tag}-cp')
return cp_dir
# constants
WEBDATASET_IMAGE_TEXT_COLUMNS = tuple(args.wds.split(','))
ENABLE_WEBDATASET = True if len(WEBDATASET_IMAGE_TEXT_COLUMNS) == 2 else False
DALLE_OUTPUT_FILE_NAME = args.dalle_output_file_name + ".pt"
VAE_PATH = args.vae_path
VQGAN_MODEL_PATH = args.vqgan_model_path
VQGAN_CONFIG_PATH = args.vqgan_config_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
GRAD_CLIP_NORM = args.clip_grad_norm
LR_DECAY = args.lr_decay
SAVE_EVERY_N_STEPS = args.save_every_n_steps
KEEP_N_CHECKPOINTS = args.keep_n_checkpoints
MODEL_DIM = args.dim
TEXT_SEQ_LEN = args.text_seq_len
DEPTH = args.depth
HEADS = args.heads
DIM_HEAD = args.dim_head
REVERSIBLE = args.reversible
LOSS_IMG_WEIGHT = args.loss_img_weight
FF_DROPOUT = args.ff_dropout
ATTN_DROPOUT = args.attn_dropout
STABLE = args.stable_softmax
SHIFT_TOKENS = args.shift_tokens
ROTARY_EMB = args.rotary_emb
ATTN_TYPES = tuple(args.attn_types.split(','))
DEEPSPEED_CP_AUX_FILENAME = 'auxiliary.pt'
if not ENABLE_WEBDATASET:
# quit early if you used the wrong folder name
assert Path(args.image_text_folder).exists(), f'The path {args.image_text_folder} was not found.'
else:
# quit early if no tar files were found
if Path(args.image_text_folder).is_dir():
DATASET = [str(p) for p in Path(args.image_text_folder).glob("**/*") if ".tar" in str(p).lower()] # .name
assert len(DATASET) > 0, 'The directory ({}) does not contain any WebDataset/.tar files.'.format(args.image_text_folder)
print('Found {} WebDataset .tar(.gz) file(s) under given path {}!'.format(len(DATASET), args.image_text_folder))
elif ('http://' in args.image_text_folder.lower()) | ('https://' in args.image_text_folder.lower()):
DATASET = f"pipe:curl -L -s {args.image_text_folder} || true"
print('Found {} http(s) link under given path!'.format(len(DATASET), args.image_text_folder))
elif 'gs://' in args.image_text_folder.lower():
DATASET = f"pipe:gsutil cat {args.image_text_folder} || true"
print('Found {} GCS link under given path!'.format(len(DATASET), args.image_text_folder))
elif '.tar' in args.image_text_folder:
DATASET = args.image_text_folder
print('Found WebDataset .tar(.gz) file under given path {}!'.format(args.image_text_folder))
else:
raise Exception('No folder, no .tar(.gz) and no url pointing to tar files provided under {}.'.format(args.image_text_folder))
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
is_root = distr_backend.is_root_worker()
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
if using_deepspeed:
cp_dir = cp_path_to_dir(dalle_path, 'ds')
assert cp_dir.is_dir(), \
f'DeepSpeed checkpoint directory {cp_dir} not found'
dalle_path = cp_dir / DEEPSPEED_CP_AUX_FILENAME
else:
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path), map_location='cpu')
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
opt_state = loaded_obj.get('opt_state')
scheduler_state = loaded_obj.get('scheduler_state')
if vae_params is not None:
vae = DiscreteVAE(**vae_params)
elif args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
IMAGE_SIZE = vae.image_size
resume_epoch = loaded_obj.get('epoch', 0)
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
assert not vae_path.is_dir(), \
('Cannot load VAE model from directory; please use a '
'standard *.pt checkpoint. '
'Currently, merging a DeepSpeed-partitioned VAE into a DALLE '
'model is not supported.')
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
if is_root:
print('using pretrained VAE for encoding images to tokens')
vae_params = None
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
IMAGE_SIZE = vae.image_size
dalle_params = dict(
num_text_tokens=tokenizer.vocab_size,
text_seq_len=TEXT_SEQ_LEN,
dim=MODEL_DIM,
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD,
reversible=REVERSIBLE,
loss_img_weight=LOSS_IMG_WEIGHT,
attn_types=ATTN_TYPES,
ff_dropout=FF_DROPOUT,
attn_dropout=ATTN_DROPOUT,
stable=STABLE,
shift_tokens=SHIFT_TOKENS,
rotary_emb=ROTARY_EMB,
)
resume_epoch = 0
# configure OpenAI VAE for float16s
if isinstance(vae, OpenAIDiscreteVAE) and args.fp16:
vae.enc.blocks.output.conv.use_float16 = True
# helpers
def group_weight(model):
group_decay, group_no_decay = [], []
for params in model.named_parameters():
if 'transformer' in params[0]:
if 'bias' in params[0] or 'norm' in params[0]:
group_no_decay.append(params[1])
continue
group_decay.append(params[1])
assert len(list(model.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
# create dataset and dataloader
is_shuffle = not distributed_utils.using_backend(distributed_utils.HorovodBackend)
imagepreproc = T.Compose([
T.Lambda(lambda img: img.convert('RGB')
if img.mode != 'RGB' else img),
T.RandomResizedCrop(IMAGE_SIZE,
scale=(args.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor(),
])
def imagetransform(b):
return Image.open(BytesIO(b))
def tokenize(s):
return tokenizer.tokenize(
s.decode('utf-8'),
TEXT_SEQ_LEN,
truncate_text=args.truncate_captions).squeeze(0)
if ENABLE_WEBDATASET:
DATASET_SIZE = int(1e9) # You need to set a nominal length for the Dataset in order to avoid warnings from DataLoader
myimg, mycap = WEBDATASET_IMAGE_TEXT_COLUMNS
image_text_mapping = {
myimg: imagetransform,
mycap: tokenize
}
image_mapping = {
myimg: imagepreproc
}
def filter_dataset(item): # For e.g. C@H which (rarely) has no caption available.
if mycap not in item:
return False
if myimg not in item:
return False
return True
w_dataset = wds.WebDataset(DATASET, handler=wds.warn_and_continue)
filtered_dataset = w_dataset.select(filter_dataset)
ds = filtered_dataset.map_dict(**image_text_mapping).map_dict(**image_mapping).to_tuple(mycap, myimg).batched(BATCH_SIZE / distr_backend.get_world_size(), partial=True)
else:
ds = TextImageDataset(
args.image_text_folder,
text_len=TEXT_SEQ_LEN,
image_size=IMAGE_SIZE,
resize_ratio=args.resize_ratio,
truncate_captions=args.truncate_captions,
tokenizer=tokenizer,
shuffle=is_shuffle,
)
assert len(ds) > 0, 'dataset is empty'
if is_root:
if not ENABLE_WEBDATASET:
print(f'{len(ds)} image-text pairs found for training')
# data sampler
data_sampler = None
if not is_shuffle:
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds,
num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank()
)
# WebLoader for WebDataset and DeepSpeed compatibility
if ENABLE_WEBDATASET:
dl = wds.WebLoader(ds, batch_size=None, shuffle=False, num_workers=4) # optionally add num_workers=2 (n) argument
number_of_batches = DATASET_SIZE // (BATCH_SIZE * distr_backend.get_world_size())
dl = dl.slice(number_of_batches)
dl.length = number_of_batches
else:
# Regular DataLoader for image-text-folder datasets
dl = DataLoader(ds, batch_size=BATCH_SIZE, shuffle=is_shuffle, drop_last=True, sampler=data_sampler)
# initialize DALL-E
dalle = DALLE(vae=vae, **dalle_params)
if not using_deepspeed:
if args.fp16:
dalle = dalle.half()
dalle = dalle.cuda()
if RESUME and not using_deepspeed:
dalle.load_state_dict(weights)
# optimizer
opt = Adam(get_trainable_params(dalle), lr=LEARNING_RATE)
if RESUME and opt_state:
opt.load_state_dict(opt_state)
# scheduler
scheduler = None
if LR_DECAY:
scheduler = ReduceLROnPlateau(
opt,
mode="min",
factor=0.5,
patience=10,
cooldown=10,
min_lr=1e-6,
verbose=True,
)
if RESUME and scheduler_state:
scheduler.load_state_dict(scheduler_state)
# experiment tracker
if is_root:
model_config = dict(
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD
)
run = wandb.init(
project=args.wandb_name,
entity=args.wandb_entity,
resume=False,
config=model_config,
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {
'train_batch_size': BATCH_SIZE,
'gradient_accumulation_steps': args.ga_steps,
'gradient_clipping': GRAD_CLIP_NORM,
'fp16': {
'enabled': args.fp16,
},
'amp': {
'enabled': args.amp,
'opt_level': 'O1',
},
"flops_profiler": {
"enabled": args.flops_profiler,
"profile_step": 200,
"module_depth": -1,
"top_modules": 1,
"detailed": True,
"output_file": None # TODO Can't get this to work.
},
}
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2:
print(f"Checkpoints made with DeepSpeed ZeRO Stages 2 and 3 will be stored in deepspeed checkpoint folder")
print(f"As such, they will require DeepSpeed as a dependency in order to resume from or generate with.")
print("See the deespeed conversion script for details on how to convert your ZeRO stage 2/3 checkpoint to a single file.")
print("If using a single GPU, consider running with apex automatic mixed precision instead for a similar speedup to ZeRO.")
time.sleep(2)
(distr_dalle, distr_opt, distr_dl, distr_scheduler) = distr_backend.distribute(
args=args,
model=dalle,
optimizer=opt,
model_parameters=get_trainable_params(dalle),
training_data=(
(None if ENABLE_WEBDATASET else ds)
if using_deepspeed
else dl
),
# Do not pass the LR scheduler to DeepSpeed so we can manually
# advance it.
lr_scheduler=scheduler if LR_DECAY and not using_deepspeed else None,
config_params=deepspeed_config,
)
# Prefer scheduler in `deepspeed_config`.
if LR_DECAY and distr_scheduler is None:
distr_scheduler = scheduler
avoid_model_calls = using_deepspeed and args.fp16
if RESUME and using_deepspeed:
distr_dalle.load_checkpoint(str(cp_dir))
def save_model(path, epoch=0):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'epoch': epoch,
'version': get_pkg_version(),
'vae_class_name': vae.__class__.__name__
}
if using_deepspeed:
cp_dir = cp_path_to_dir(path, 'ds')
if KEEP_N_CHECKPOINTS is not None and is_root:
checkpoints = sorted(glob(str(cp_dir / "global*")), key=os.path.getmtime, reverse=True)
for checkpoint in checkpoints[KEEP_N_CHECKPOINTS:]:
shutil.rmtree(checkpoint)
distr_dalle.save_checkpoint(cp_dir, client_state=save_obj)
if not is_root:
return
# Save auxiliary values so we can reuse the standard routine
# for loading.
save_obj = {
**save_obj,
# Save a nonsense value that directs the user to
# further help.
'weights': (
'To get a working standard checkpoint, '
'look into consolidating DeepSpeed checkpoints.'
),
}
torch.save(save_obj, str(cp_dir / DEEPSPEED_CP_AUX_FILENAME))
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2: # see https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
return
if not is_root:
return
save_obj = {
**save_obj,
'weights': dalle.state_dict(),
'opt_state': opt.state_dict(),
'scheduler_state': (scheduler.state_dict() if scheduler else None)
}
torch.save(save_obj, path)
def save_artifact(model_config, model_path, name = 'trained-dalle'):
model_artifact = wandb.Artifact(name, type='model', metadata=dict(model_config))
model_artifact.add_file(model_path)
run.log_artifact(model_artifact)
# training
# Saves a checkpoint before training begins to fail early when mis-configured.
# See https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
save_model(DALLE_OUTPUT_FILE_NAME, epoch=resume_epoch)
for epoch in range(resume_epoch, EPOCHS):
if data_sampler:
data_sampler.set_epoch(epoch)
for i, (text, images) in enumerate((dl if ENABLE_WEBDATASET else distr_dl)):
if i % 10 == 0 and is_root:
t = time.time()
if args.fp16:
images = images.half()
text, images = map(lambda t: t.cuda(), (text, images))
loss = distr_dalle(text, images, return_loss=True)
if using_deepspeed:
distr_dalle.backward(loss)
distr_dalle.step()
# Gradients are automatically zeroed after the step
else:
loss.backward()
clip_grad_norm_(distr_dalle.parameters(), GRAD_CLIP_NORM)
distr_opt.step()
distr_opt.zero_grad()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
log = {}
if i % 10 == 0 and is_root:
print(epoch, i, f'loss - {avg_loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item()
}
if i % SAVE_EVERY_N_STEPS == 0:
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if i % 100 == 0 and is_root:
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
if not avoid_model_calls:
# CUDA index errors when we don't guard this
image = dalle.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9
if not avoid_model_calls:
log['image'] = wandb.Image(image, caption=decoded_text)
if i % 10 == 9 and is_root:
sample_per_sec = BATCH_SIZE * 10 / (time.time() - t)
log["sample_per_sec"] = sample_per_sec
print(epoch, i, f'sample_per_sec - {sample_per_sec}')
if i == 201 and args.flops_profiler:
raise StopIteration("Profiler has finished running. Stopping training early.")
if is_root:
wandb.log(log)
if LR_DECAY:
distr_scheduler.step(avg_loss)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if is_root:
# save trained model to wandb as an artifact every epoch's end
save_artifact(model_config, DALLE_OUTPUT_FILE_NAME)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if is_root:
wandb.save(DALLE_OUTPUT_FILE_NAME)
save_artifact(model_config, DALLE_OUTPUT_FILE_NAME)
wandb.finish()
| 22,831 | 33.333835 | 199 | py |
DaVinci | DaVinci-main/models/DALLE-pytorch/setup.py | from setuptools import setup, find_packages
setup(
name = 'dalle-pytorch',
packages = find_packages(),
include_package_data = True,
version = '1.2.1',
license='MIT',
description = 'DALL-E - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/dalle-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'text-to-image'
],
install_requires=[
'axial_positional_embedding',
'DALL-E',
'einops>=0.3.2',
'ftfy',
'g-mlp-pytorch',
'pillow',
'regex',
'rotary-embedding-torch',
'taming-transformers-rom1504',
'tokenizers',
'torch>=1.6',
'torchvision',
'transformers',
'tqdm',
'youtokentome',
'WebDataset'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 1,052 | 22.4 | 65 | py |
DaVinci | DaVinci-main/models/DALLE-pytorch/generate.py | import argparse
from pathlib import Path
from tqdm import tqdm
# torch
import torch
from einops import repeat
# vision imports
from PIL import Image
from torchvision.utils import make_grid, save_image
# dalle related classes and utils
from dalle_pytorch import DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE, DALLE
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, YttmTokenizer, ChineseTokenizer
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--dalle_path', type = str, required = True,
help='path to your trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--text', type = str, required = True,
help='your text prompt')
parser.add_argument('--num_images', type = int, default = 128, required = False,
help='number of images')
parser.add_argument('--batch_size', type = int, default = 4, required = False,
help='batch size')
parser.add_argument('--top_k', type = float, default = 0.9, required = False,
help='top k filter threshold')
parser.add_argument('--outputs_dir', type = str, default = './outputs', required = False,
help='output directory')
parser.add_argument('--bpe_path', type = str,
help='path to your huggingface BPE json file')
parser.add_argument('--hug', dest='hug', action = 'store_true')
parser.add_argument('--chinese', dest='chinese', action = 'store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--gentxt', dest='gentxt', action='store_true')
args = parser.parse_args()
# helper fns
def exists(val):
return val is not None
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# load DALL-E
dalle_path = Path(args.dalle_path)
assert dalle_path.exists(), 'trained DALL-E must exist'
load_obj = torch.load(str(dalle_path))
dalle_params, vae_params, weights, vae_class_name, version = load_obj.pop('hparams'), load_obj.pop('vae_params'), load_obj.pop('weights'), load_obj.pop('vae_class_name', None), load_obj.pop('version', None)
# friendly print
if exists(version):
print(f'Loading a model trained with DALLE-pytorch version {version}')
else:
print('You are loading a model trained on an older version of DALL-E pytorch - it may not be compatible with the most recent version')
# load VAE
if args.taming:
vae = VQGanVAE(args.vqgan_model_path, args.vqgan_config_path)
elif vae_params is not None:
vae = DiscreteVAE(**vae_params)
else:
vae = OpenAIDiscreteVAE()
assert not (exists(vae_class_name) and vae.__class__.__name__ != vae_class_name), f'you trained DALL-E using {vae_class_name} but are trying to generate with {vae.__class__.__name__} - please make sure you are passing in the correct paths and settings for the VAE to use for generation'
# reconstitute DALL-E
dalle = DALLE(vae = vae, **dalle_params).cuda()
dalle.load_state_dict(weights)
# generate images
image_size = vae.image_size
texts = args.text.split('|')
for j, text in tqdm(enumerate(texts)):
if args.gentxt:
text_tokens, gen_texts = dalle.generate_texts(tokenizer, text=text, filter_thres = args.top_k)
text = gen_texts[0]
else:
text_tokens = tokenizer.tokenize([text], dalle.text_seq_len).cuda()
text_tokens = repeat(text_tokens, '() n -> b n', b = args.num_images)
outputs = []
for text_chunk in tqdm(text_tokens.split(args.batch_size), desc = f'generating images for - {text}'):
output = dalle.generate_images(text_chunk, filter_thres = args.top_k)
outputs.append(output)
outputs = torch.cat(outputs)
# save all images
file_name = text
outputs_dir = Path(args.outputs_dir) / file_name.replace(' ', '_')[:(100)]
outputs_dir.mkdir(parents = True, exist_ok = True)
for i, image in tqdm(enumerate(outputs), desc = 'saving images'):
save_image(image, outputs_dir / f'{i}.jpg', normalize=True)
with open(outputs_dir / 'caption.txt', 'w') as f:
f.write(file_name)
print(f'created {args.num_images} images at "{str(outputs_dir)}"')
| 4,657 | 31.573427 | 286 | py |
DaVinci | DaVinci-main/models/DALLE-pytorch/train_vae.py | import math
from math import sqrt
import argparse
from pathlib import Path
# torch
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
# vision imports
from torchvision import transforms as T
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
# dalle classes and utils
from dalle_pytorch import distributed_utils
from dalle_pytorch import DiscreteVAE
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--image_folder', type = str, required = True,
help='path to your folder of images for learning the discrete VAE and its codebook')
parser.add_argument('--image_size', type = int, required = False, default = 128,
help='image size')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--epochs', type = int, default = 20, help = 'number of epochs')
train_group.add_argument('--batch_size', type = int, default = 8, help = 'batch size')
train_group.add_argument('--learning_rate', type = float, default = 1e-3, help = 'learning rate')
train_group.add_argument('--lr_decay_rate', type = float, default = 0.98, help = 'learning rate decay')
train_group.add_argument('--starting_temp', type = float, default = 1., help = 'starting temperature')
train_group.add_argument('--temp_min', type = float, default = 0.5, help = 'minimum temperature to anneal to')
train_group.add_argument('--anneal_rate', type = float, default = 1e-6, help = 'temperature annealing rate')
train_group.add_argument('--num_images_save', type = int, default = 4, help = 'number of images to save')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--num_tokens', type = int, default = 8192, help = 'number of image tokens')
model_group.add_argument('--num_layers', type = int, default = 3, help = 'number of layers (should be 3 or above)')
model_group.add_argument('--num_resnet_blocks', type = int, default = 2, help = 'number of residual net blocks')
model_group.add_argument('--smooth_l1_loss', dest = 'smooth_l1_loss', action = 'store_true')
model_group.add_argument('--emb_dim', type = int, default = 512, help = 'embedding dimension')
model_group.add_argument('--hidden_dim', type = int, default = 256, help = 'hidden dimension')
model_group.add_argument('--kl_loss_weight', type = float, default = 0., help = 'KL loss weight')
args = parser.parse_args()
# constants
IMAGE_SIZE = args.image_size
IMAGE_PATH = args.image_folder
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
LR_DECAY_RATE = args.lr_decay_rate
NUM_TOKENS = args.num_tokens
NUM_LAYERS = args.num_layers
NUM_RESNET_BLOCKS = args.num_resnet_blocks
SMOOTH_L1_LOSS = args.smooth_l1_loss
EMB_DIM = args.emb_dim
HIDDEN_DIM = args.hidden_dim
KL_LOSS_WEIGHT = args.kl_loss_weight
STARTING_TEMP = args.starting_temp
TEMP_MIN = args.temp_min
ANNEAL_RATE = args.anneal_rate
NUM_IMAGES_SAVE = args.num_images_save
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
# data
ds = ImageFolder(
IMAGE_PATH,
T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize(IMAGE_SIZE),
T.CenterCrop(IMAGE_SIZE),
T.ToTensor()
])
)
if distributed_utils.using_backend(distributed_utils.HorovodBackend):
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds, num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank())
else:
data_sampler = None
dl = DataLoader(ds, BATCH_SIZE, shuffle = not data_sampler, sampler=data_sampler)
vae_params = dict(
image_size = IMAGE_SIZE,
num_layers = NUM_LAYERS,
num_tokens = NUM_TOKENS,
codebook_dim = EMB_DIM,
hidden_dim = HIDDEN_DIM,
num_resnet_blocks = NUM_RESNET_BLOCKS
)
vae = DiscreteVAE(
**vae_params,
smooth_l1_loss = SMOOTH_L1_LOSS,
kl_div_loss_weight = KL_LOSS_WEIGHT
)
if not using_deepspeed:
vae = vae.cuda()
assert len(ds) > 0, 'folder does not contain any images'
if distr_backend.is_root_worker():
print(f'{len(ds)} images found for training')
# optimizer
opt = Adam(vae.parameters(), lr = LEARNING_RATE)
sched = ExponentialLR(optimizer = opt, gamma = LR_DECAY_RATE)
if distr_backend.is_root_worker():
# weights & biases experiment tracking
import wandb
model_config = dict(
num_tokens = NUM_TOKENS,
smooth_l1_loss = SMOOTH_L1_LOSS,
num_resnet_blocks = NUM_RESNET_BLOCKS,
kl_loss_weight = KL_LOSS_WEIGHT
)
run = wandb.init(
project = 'dalle_train_vae',
job_type = 'train_model',
config = model_config
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {'train_batch_size': BATCH_SIZE}
(distr_vae, distr_opt, distr_dl, distr_sched) = distr_backend.distribute(
args=args,
model=vae,
optimizer=opt,
model_parameters=vae.parameters(),
training_data=ds if using_deepspeed else dl,
lr_scheduler=sched if not using_deepspeed else None,
config_params=deepspeed_config,
)
using_deepspeed_sched = False
# Prefer scheduler in `deepspeed_config`.
if distr_sched is None:
distr_sched = sched
elif using_deepspeed:
# We are using a DeepSpeed LR scheduler and want to let DeepSpeed
# handle its scheduling.
using_deepspeed_sched = True
def save_model(path):
save_obj = {
'hparams': vae_params,
}
if using_deepspeed:
cp_path = Path(path)
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = str(path_sans_extension) + '-ds-cp'
distr_vae.save_checkpoint(cp_dir, client_state=save_obj)
# We do not return so we do get a "normal" checkpoint to refer to.
if not distr_backend.is_root_worker():
return
save_obj = {
**save_obj,
'weights': vae.state_dict()
}
torch.save(save_obj, path)
# starting temperature
global_step = 0
temp = STARTING_TEMP
for epoch in range(EPOCHS):
for i, (images, _) in enumerate(distr_dl):
images = images.cuda()
loss, recons = distr_vae(
images,
return_loss = True,
return_recons = True,
temp = temp
)
if using_deepspeed:
# Gradients are automatically zeroed after the step
distr_vae.backward(loss)
distr_vae.step()
else:
distr_opt.zero_grad()
loss.backward()
distr_opt.step()
logs = {}
if i % 100 == 0:
if distr_backend.is_root_worker():
k = NUM_IMAGES_SAVE
with torch.no_grad():
codes = vae.get_codebook_indices(images[:k])
hard_recons = vae.decode(codes)
images, recons = map(lambda t: t[:k], (images, recons))
images, recons, hard_recons, codes = map(lambda t: t.detach().cpu(), (images, recons, hard_recons, codes))
images, recons, hard_recons = map(lambda t: make_grid(t.float(), nrow = int(sqrt(k)), normalize = True, range = (-1, 1)), (images, recons, hard_recons))
logs = {
**logs,
'sample images': wandb.Image(images, caption = 'original images'),
'reconstructions': wandb.Image(recons, caption = 'reconstructions'),
'hard reconstructions': wandb.Image(hard_recons, caption = 'hard reconstructions'),
'codebook_indices': wandb.Histogram(codes),
'temperature': temp
}
wandb.save('./vae.pt')
save_model(f'./vae.pt')
# temperature anneal
temp = max(temp * math.exp(-ANNEAL_RATE * global_step), TEMP_MIN)
# lr decay
# Do not advance schedulers from `deepspeed_config`.
if not using_deepspeed_sched:
distr_sched.step()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
if distr_backend.is_root_worker():
if i % 10 == 0:
lr = distr_sched.get_last_lr()[0]
print(epoch, i, f'lr - {lr:6f} loss - {avg_loss.item()}')
logs = {
**logs,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item(),
'lr': lr
}
wandb.log(logs)
global_step += 1
if distr_backend.is_root_worker():
# save trained model to wandb as an artifact every epoch's end
model_artifact = wandb.Artifact('trained-vae', type = 'model', metadata = dict(model_config))
model_artifact.add_file('vae.pt')
run.log_artifact(model_artifact)
if distr_backend.is_root_worker():
# save final vae and cleanup
save_model('./vae-final.pt')
wandb.save('./vae-final.pt')
model_artifact = wandb.Artifact('trained-vae', type = 'model', metadata = dict(model_config))
model_artifact.add_file('vae-final.pt')
run.log_artifact(model_artifact)
wandb.finish()
| 9,491 | 29.037975 | 168 | py |
DaVinci | DaVinci-main/util/checkpointer.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
from typing import Union, Dict, List, Tuple, Any, Callable
import logging
import os
import re
import time
import torch
from .hdfs_io import hexists, hmkdir, hcopy
from .torch_io import save as hdfs_torch_save
logger = logging.getLogger(__name__)
class Checkpointer:
"""
这个类主要是将training checkpointer和state存储到hdfs上.
"""
def __init__(self,
serialization_dir: str = ".output",
keep_serialized_model_every_num_seconds: int = None,
num_serialized_models_to_keep: int = 20) -> None:
self._serialization_dir = serialization_dir
self._keep_serialized_model_every_num_seconds = keep_serialized_model_every_num_seconds
self._num_serialized_models_to_keep = num_serialized_models_to_keep
if not hexists(self._serialization_dir):
hmkdir(self._serialization_dir)
self._last_permanent_saved_checkpoint_time = time.time()
self._serialized_paths: List[Tuple[str, str]] = []
def save_checkpoint(self,
epoch: Union[int, str],
model_state: Dict[str, Any],
training_states: Dict[str, Any],
is_best_so_far: bool = False) -> None:
"""
保存 checkpoint到本地local和remote hdfs中:
args:
epoch: 当前训练的epoch数
model_state: 当前训练model的参数
training_states: 当前训练的参数
is_best_so_far: 当前是否save的checkpoint是否为最优
"""
if self._serialization_dir is not None:
model_path = os.path.join(
self._serialization_dir, "model_state_epoch_{}.th".format(epoch))
training_path = os.path.join(self._serialization_dir,
"training_state_latest.th")
hdfs_torch_save(model_state, model_path)
hdfs_torch_save({**training_states, "epoch": epoch}, training_path)
if is_best_so_far:
logger.info("Best validation performance so far. "
"Copying weights to '%s/best.th'.", self._serialization_dir)
hcopy(model_path, os.path.join(
self._serialization_dir, "best.th"))
if self._num_serialized_models_to_keep and self._num_serialized_models_to_keep >= 0:
self._serialized_paths.append((model_path, training_path))
if len(self._serialized_paths) > self._num_serialized_models_to_keep:
paths_to_remove = self._serialized_paths.pop(0)
# Check to see if we should keep this checkpoint, if it has been longer
# then self._keep_serialized_model_every_num_seconds since the last
# kept checkpoint.
remove_path = True
if self._keep_serialized_model_every_num_seconds is not None:
save_time = paths_to_remove[0]
time_since_checkpoint_kept = save_time - \
self._last_permanent_saved_checkpoint_time
if time_since_checkpoint_kept > self._keep_serialized_model_every_num_seconds:
# We want to keep this checkpoint.
remove_path = False
self._last_permanent_saved_checkpoint_time = save_time
def find_latest_checkpoint(self) -> Tuple[str, str]:
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (self._serialization_dir is not None and
any("model_state_epoch_" in x for x in os.listdir(self._serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(self._serialization_dir)
model_checkpoints = [
x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
re.search(r"model_state_epoch_([0-9\.\-]+)\.th", x).group(1)
for x in model_checkpoints
]
int_epochs: Any = []
print(found_epochs)
for pieces in found_epochs:
int_epochs.append([int(float(pieces)), '0'])
last_epoch = sorted(int_epochs, reverse=True)[0]
if last_epoch[1] == '0':
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])
model_path = os.path.join(self._serialization_dir,
"model_state_epoch_{}.th".format(epoch_to_load))
training_state_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch_to_load))
return (model_path, training_state_path)
def restore_checkpoint(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes a training state (typically consisting of an epoch count and optimizer state),
which is serialized separately from model parameters. This function should only be used to
continue training - if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return empty dicts.
Returns
-------
states: Tuple[Dict[str, Any], Dict[str, Any]]
The model state and the training state.
"""
# latest_checkpoint = self.find_latest_checkpoint()
# if latest_checkpoint is None:
# # No checkpoint to restore, start at 0
# return {}, {}
# model_path, training_state_path = latest_checkpoint
# # Load the parameters onto CPU, then transfer to GPU.
# # This avoids potential OOM on GPU for large models that
# # load parameters onto GPU then make a new GPU copy into the parameter
# # buffer. The GPU transfer happens implicitly in load_state_dict.
# model_state = torch.load(model_path, map_location=device_mapping(-1))
# training_state = torch.load(
# training_state_path, map_location=device_mapping(-1))
# return model_state, training_state
def best_model_state(self) -> Dict[str, Any]:
"""
load最优的model参数
"""
if self._serialization_dir:
logger.info("loading best weights")
best_model_state_path = os.path.join(
self._serialization_dir, 'best.th')
return torch.load(best_model_state_path)
else:
logger.info("cannot load best weights without `serialization_dir`, "
"so you're just getting the last weights")
return {}
| 7,516 | 44.283133 | 114 | py |
DaVinci | DaVinci-main/util/torch_io.py | # Write and Paint: Generative Vision-Language Models are Unified Modal Learners (https://arxiv.org/abs/2206.07699)
# Github: https://github.com/shizhediao/DaVinci
# Copyright (c) 2023, ByteDance Inc.
# All rights reserved.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' torch model hdfs io warpper '''
import io
import torch
from .hdfs_io import hopen
def load(filepath: str, **kwargs):
""" load model """
if not filepath.startswith("hdfs://"):
return torch.load(filepath, **kwargs)
with hopen(filepath, "rb") as reader:
accessor = io.BytesIO(reader.read())
state_dict = torch.load(accessor, **kwargs)
del accessor
return state_dict
def save(obj, filepath: str, **kwargs):
""" save model """
if filepath.startswith("hdfs://"):
with hopen(filepath, "wb") as writer:
torch.save(obj, writer, **kwargs)
else:
torch.save(obj, filepath, **kwargs)
| 943 | 26.764706 | 114 | py |
DaVinci | DaVinci-main/optim/adahessian.py | """ AdaHessian Optimizer
Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py
Originally licensed MIT, Copyright 2020, David Samuel
"""
import torch
class Adahessian(torch.optim.Optimizer):
"""
Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning"
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate (default: 0.1)
betas ((float, float), optional): coefficients used for computing running averages of gradient and the
squared hessian trace (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0)
hessian_power (float, optional): exponent of the hessian trace (default: 1.0)
update_each (int, optional): compute the hessian trace approximation only after *this* number of steps
(to save time) (default: 1)
n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1)
"""
def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0,
hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= hessian_power <= 1.0:
raise ValueError(f"Invalid Hessian power value: {hessian_power}")
self.n_samples = n_samples
self.update_each = update_each
self.avg_conv_kernel = avg_conv_kernel
# use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training
self.seed = 2147483647
self.generator = torch.Generator().manual_seed(self.seed)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power)
super(Adahessian, self).__init__(params, defaults)
for p in self.get_params():
p.hess = 0.0
self.state[p]["hessian step"] = 0
@property
def is_second_order(self):
return True
def get_params(self):
"""
Gets all parameters in all param_groups with gradients
"""
return (p for group in self.param_groups for p in group['params'] if p.requires_grad)
def zero_hessian(self):
"""
Zeros out the accumalated hessian traces.
"""
for p in self.get_params():
if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0:
p.hess.zero_()
@torch.no_grad()
def set_hessian(self):
"""
Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter.
"""
params = []
for p in filter(lambda p: p.grad is not None, self.get_params()):
if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step
params.append(p)
self.state[p]["hessian step"] += 1
if len(params) == 0:
return
if self.generator.device != params[0].device: # hackish way of casting the generator to the right device
self.generator = torch.Generator(params[0].device).manual_seed(self.seed)
grads = [p.grad for p in params]
for i in range(self.n_samples):
# Rademacher distribution {-1.0, 1.0}
zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params]
h_zs = torch.autograd.grad(
grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1)
for h_z, z, p in zip(h_zs, zs, params):
p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z)
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None)
"""
loss = None
if closure is not None:
loss = closure()
self.zero_hessian()
self.set_hessian()
for group in self.param_groups:
for p in group['params']:
if p.grad is None or p.hess is None:
continue
if self.avg_conv_kernel and p.dim() == 4:
p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()
# Perform correct stepweight decay as in AdamW
p.mul_(1 - group['lr'] * group['weight_decay'])
state = self.state[p]
# State initialization
if len(state) == 1:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of Hessian diagonal square values
state['exp_hessian_diag_sq'] = torch.zeros_like(p)
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1)
exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
k = group['hessian_power']
denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps'])
# make update
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| 6,535 | 40.630573 | 129 | py |
DaVinci | DaVinci-main/optim/radam.py | """RAdam Optimizer.
Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam
Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265
"""
import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| 5,924 | 37.72549 | 111 | py |
DaVinci | DaVinci-main/optim/nvnovograd.py | """ Nvidia NovoGrad Optimizer.
Original impl by Nvidia from Jasper example:
- https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper
Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks`
- https://arxiv.org/abs/1905.11286
"""
import torch
from torch.optim.optimizer import Optimizer
import math
class NvNovoGrad(Optimizer):
"""
Implements Novograd algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0.98))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging: gradient averaging
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
"""
def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8,
weight_decay=0, grad_averaging=False, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad)
super(NvNovoGrad, self).__init__(params, defaults)
def __setstate__(self, state):
super(NvNovoGrad, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Sparse gradients are not supported.')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_(1 - beta2, norm)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.data.add_(-group['lr'], exp_avg)
return loss
| 4,795 | 39.302521 | 99 | py |
DaVinci | DaVinci-main/optim/adamp.py | """
AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py
Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217
Code: https://github.com/clovaai/AdamP
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer, required
import math
class AdamP(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)
super(AdamP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), -1)
def _layer_view(self, x):
return x.view(1, -1)
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return dot.abs() / x_norm / y_norm
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
beta1, beta2 = group['betas']
nesterov = group['nesterov']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Adam
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
if nesterov:
perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom
else:
perturb = exp_avg / denom
# Projection
wd_ratio = 1
if len(p.shape) > 1:
perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if group['weight_decay'] > 0:
p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio)
# Step
p.data.add_(-step_size, perturb)
return loss
| 3,689 | 33.166667 | 123 | py |
DaVinci | DaVinci-main/optim/nadam.py | import torch
from torch.optim import Optimizer
class Nadam(Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
Originally taken from: https://github.com/pytorch/pytorch/pull/1408
NOTE: Has potential issues but does work well on some problems.
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, schedule_decay=schedule_decay)
super(Nadam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
t = state['step']
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
momentum_cache_t = beta1 * \
(1. - 0.5 * (0.96 ** (t * schedule_decay)))
momentum_cache_t_1 = beta1 * \
(1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1. - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1. - beta2, grad, grad)
exp_avg_sq_prime = exp_avg_sq / (1. - beta2 ** t)
denom = exp_avg_sq_prime.sqrt_().add_(eps)
p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new), grad, denom)
p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next), exp_avg, denom)
return loss
| 3,758 | 41.235955 | 108 | py |
DaVinci | DaVinci-main/optim/adamw.py | """ AdamW Optimizer
Impl copied from PyTorch master
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 4,965 | 41.084746 | 116 | py |
DaVinci | DaVinci-main/optim/adafactor.py | """ Adafactor Optimizer
Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py
Original header/copyright below.
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import math
class Adafactor(torch.optim.Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate depending on the
*scale_parameter*, *relative_step* and *warmup_init* options.
To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
`relative_step=False`.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constants for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient (default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True)
relative_step (bool): if True, time-dependent learning rate is computed
instead of external learning rate (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0,
decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):
relative_step = lr is None
if warmup_init and not relative_step:
raise ValueError('warmup_init requires relative_step=True')
beta1 = None if betas is None else betas[0] # make it compat with standard betas arg
defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate,
beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,
relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
@staticmethod
def _get_lr(param_group, param_state):
if param_group['relative_step']:
min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2
lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps_scale'], param_state['RMS'])
param_group['lr'] = lr_t * param_scale
return param_group['lr']
@staticmethod
def _get_options(param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group['beta1'] is not None
return factored, use_first_moment
@staticmethod
def _rms(tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state['step'] = 0
if use_first_moment:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)
state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_data_fp32)
lr_t = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = grad ** 2 + group['eps']
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))
exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))
#exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) # pytorch 1.6+
#exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)
#exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) # pytorch 1.6+
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(lr_t)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group["beta1"]).add_(1 - group["beta1"], update)
#exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) # pytorch 1.6+
update = exp_avg
if group['weight_decay'] != 0:
p_data_fp32.add_(-group["weight_decay"] * lr_t, p_data_fp32)
#p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * lr_t) # pytorch 1.6+
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss | 8,126 | 45.706897 | 114 | py |
DaVinci | DaVinci-main/optim/rmsprop_tf.py | """ RMSProp modified to behave like Tensorflow impl
Originally cut & paste from PyTorch RMSProp
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
Modifications Copyright 2020 Ross Wightman
"""
import torch
from torch.optim import Optimizer
class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing (decay) constant (default: 0.9)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101
lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer
update as per defaults in Tensorflow
"""
def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,
decoupled_decay=False, lr_in_momentum=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,
decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
one_minus_alpha = 1. - group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
if 'decoupled_decay' in group and group['decoupled_decay']:
p.data.add_(-group['weight_decay'], p.data)
else:
grad = grad.add(group['weight_decay'], p.data)
# Tensorflow order of ops for updating squared avg
square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg)
# square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(one_minus_alpha, grad - grad_avg)
# grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original
avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt
else:
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
if group['momentum'] > 0:
buf = state['momentum_buffer']
# Tensorflow accumulates the LR scaling in the momentum buffer
if 'lr_in_momentum' in group and group['lr_in_momentum']:
buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg)
p.data.add_(-buf)
else:
# PyTorch scales the param update by LR
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_(-group['lr'], buf)
else:
p.data.addcdiv_(-group['lr'], grad, avg)
return loss
| 6,127 | 43.729927 | 117 | py |
DaVinci | DaVinci-main/optim/novograd.py | """NovoGrad Optimizer.
Original impl by Masashi Kimura (Convergence Lab): https://github.com/convergence-lab/novograd
Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks`
- https://arxiv.org/abs/1905.11286
"""
import torch
from torch.optim.optimizer import Optimizer
import math
class NovoGrad(Optimizer):
def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(NovoGrad, self).__init__(params, defaults)
self._lr = lr
self._beta1 = betas[0]
self._beta2 = betas[1]
self._eps = eps
self._wd = weight_decay
self._grad_averaging = grad_averaging
self._momentum_initialized = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
if not self._momentum_initialized:
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('NovoGrad does not support sparse gradients')
v = torch.norm(grad)**2
m = grad/(torch.sqrt(v) + self._eps) + self._wd * p.data
state['step'] = 0
state['v'] = v
state['m'] = m
state['grad_ema'] = None
self._momentum_initialized = True
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['step'] += 1
step, v, m = state['step'], state['v'], state['m']
grad_ema = state['grad_ema']
grad = p.grad.data
g2 = torch.norm(grad)**2
grad_ema = g2 if grad_ema is None else grad_ema * \
self._beta2 + g2 * (1. - self._beta2)
grad *= 1.0 / (torch.sqrt(grad_ema) + self._eps)
if self._grad_averaging:
grad *= (1. - self._beta1)
g2 = torch.norm(grad)**2
v = self._beta2*v + (1. - self._beta2)*g2
m = self._beta1*m + (grad / (torch.sqrt(v) + self._eps) + self._wd * p.data)
bias_correction1 = 1 - self._beta1 ** step
bias_correction2 = 1 - self._beta2 ** step
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
state['v'], state['m'] = v, m
state['grad_ema'] = grad_ema
p.data.add_(-step_size, m)
return loss
| 2,925 | 36.512821 | 107 | py |
DaVinci | DaVinci-main/optim/sgdp.py | """
SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py
Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217
Code: https://github.com/clovaai/AdamP
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer, required
import math
class SGDP(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,
nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)
super(SGDP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), -1)
def _layer_view(self, x):
return x.view(1, -1)
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return dot.abs() / x_norm / y_norm
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['momentum'] = torch.zeros_like(p.data)
# SGD
buf = state['momentum']
buf.mul_(momentum).add_(1 - dampening, grad)
if nesterov:
d_p = grad + momentum * buf
else:
d_p = buf
# Projection
wd_ratio = 1
if len(p.shape) > 1:
d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if weight_decay != 0:
p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))
# Step
p.data.add_(-group['lr'], d_p)
return loss
| 3,231 | 32.319588 | 115 | py |
DaVinci | DaVinci-main/optim/lars.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# LARS optimizer, implementation from MoCo v3:
# https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
import torch
class LARS(torch.optim.Optimizer):
"""
LARS optimizer, no rate scaling or weight decay for parameters <= 1D.
"""
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if p.ndim > 1: # if not normalization gamma/beta or bias
dp = dp.add(p, alpha=g['weight_decay'])
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['trust_coefficient'] * param_norm / update_norm), one),
one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr']) | 1,851 | 38.404255 | 113 | py |
DaVinci | DaVinci-main/optim/lookahead.py | """ Lookahead Optimizer Wrapper.
Implementation modified from: https://github.com/alphadl/lookahead.pytorch
Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch.optim.optimizer import Optimizer
from collections import defaultdict
class Lookahead(Optimizer):
def __init__(self, base_optimizer, alpha=0.5, k=6):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0)
self.base_optimizer = base_optimizer
self.param_groups = self.base_optimizer.param_groups
self.defaults = base_optimizer.defaults
self.defaults.update(defaults)
self.state = defaultdict(dict)
# manually add our defaults to the param groups
for name, default in defaults.items():
for group in self.param_groups:
group.setdefault(name, default)
def update_slow(self, group):
for fast_p in group["params"]:
if fast_p.grad is None:
continue
param_state = self.state[fast_p]
if 'slow_buffer' not in param_state:
param_state['slow_buffer'] = torch.empty_like(fast_p.data)
param_state['slow_buffer'].copy_(fast_p.data)
slow = param_state['slow_buffer']
slow.add_(group['lookahead_alpha'], fast_p.data - slow)
fast_p.data.copy_(slow)
def sync_lookahead(self):
for group in self.param_groups:
self.update_slow(group)
def step(self, closure=None):
#assert id(self.param_groups) == id(self.base_optimizer.param_groups)
loss = self.base_optimizer.step(closure)
for group in self.param_groups:
group['lookahead_step'] += 1
if group['lookahead_step'] % group['lookahead_k'] == 0:
self.update_slow(group)
return loss
def state_dict(self):
fast_state_dict = self.base_optimizer.state_dict()
slow_state = {
(id(k) if isinstance(k, torch.Tensor) else k): v
for k, v in self.state.items()
}
fast_state = fast_state_dict['state']
param_groups = fast_state_dict['param_groups']
return {
'state': fast_state,
'slow_state': slow_state,
'param_groups': param_groups,
}
def load_state_dict(self, state_dict):
fast_state_dict = {
'state': state_dict['state'],
'param_groups': state_dict['param_groups'],
}
self.base_optimizer.load_state_dict(fast_state_dict)
# We want to restore the slow state, but share param_groups reference
# with base_optimizer. This is a bit redundant but least code
slow_state_new = False
if 'slow_state' not in state_dict:
print('Loading state_dict from optimizer without Lookahead applied.')
state_dict['slow_state'] = defaultdict(dict)
slow_state_new = True
slow_state_dict = {
'state': state_dict['slow_state'],
'param_groups': state_dict['param_groups'], # this is pointless but saves code
}
super(Lookahead, self).load_state_dict(slow_state_dict)
self.param_groups = self.base_optimizer.param_groups # make both ref same container
if slow_state_new:
# reapply defaults to catch missing lookahead specific ones
for name, default in self.defaults.items():
for group in self.param_groups:
group.setdefault(name, default)
| 3,815 | 40.032258 | 93 | py |
DaVinci | DaVinci-main/optim/optim_factory.py | """ Optimizer Factory w/ Custom Weight Decay
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import optim as optim
from .adafactor import Adafactor
from .adahessian import Adahessian
from .adamp import AdamP
from .lookahead import Lookahead
from .nadam import Nadam
from .novograd import NovoGrad
from .nvnovograd import NvNovoGrad
from .radam import RAdam
from .rmsprop_tf import RMSpropTF
from .sgdp import SGDP
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def create_optimizer(args, model, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = add_weight_decay(model, weight_decay, skip)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
if hasattr(args, 'opt_args') and args.opt_args is not None:
opt_args.update(args.opt_args)
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| 4,764 | 37.739837 | 100 | py |
DaVinci | DaVinci-main/accelerators/accelerator.py | # -*- coding: utf-8 -*-
'''
Created on Feb-19-21 16:36
accelerator.py
Description: accelerators的基类,便于后续其他加速方案的接入。
'''
from logging import Logger
import torch
from torch.optim import Optimizer
Net = torch.nn.Module
class Accelerator:
"""
Accelerator是所有accelerators的基类,新添加的accelerator需要继承该类。
"""
def __init__(self, cfg, logger) -> None:
self.cfg = cfg
self.logger = logger
def set_up(self, model: Net):
raise NotImplementedError("Set Up method not implement in Accelerator, please check! ")
def broadcast(self):
raise NotImplementedError("Broadcast method not implement in Accelerator, please check! ")
def backward_step(self, loss: torch.Tensor):
loss.backward()
def optimizer_step(self, optimizer: Optimizer, model: Net, grad_norm: float) -> float:
total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(),
grad_norm)
return float(total_norm)
| 1,012 | 24.974359 | 98 | py |
DaVinci | DaVinci-main/accelerators/apex_ddp_accelerator.py | # -*- coding: utf-8 -*-
'''
Created on Nov-18-20 15:21
ddp_accelerator.py
@author: liuzhen.nlp
Description:
'''
import os
import random
import sys
from typing import Tuple, Union, Optional, Any
import numpy as np
import torch
import torch.distributed as distributed
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
Net = torch.nn.Module
from .accelerator import Accelerator
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
from apex.parallel import convert_syncbn_model
except ImportError:
print('no apex! Please install from https://www.github.com/nvidia/apex')
class ApexDDPAccelerator(Accelerator):
"""
ApexDDPAccelerator 使用apex DistributedDataParallel进行分布式加速训练
"""
def __init__(self, cfg, logger):
super().__init__(cfg, logger)
self.accelerator_rng_seed = self.cfg.RNG_SEED
self.accelerator_syncbn = self.cfg.SYNCBN
self.accelerator_fp16_opt_level = self.cfg.FP16_OPT_LEVEL
self.accelerator_fp16_loss_scale = self.cfg.FP16_LOSS_SCALE
def set_up(self, model: Net, optimizer: Optimizer, lr_scheduler: LambdaLR,
local_rank: int, world_size: int, rank: int) -> Tuple[Apex_DDP, Optimizer, LambdaLR]:
"""
初始化ApexDDPAccelerator,包括process_group和apex_ddp的初始化
"""
torch.backends.cudnn.benchmark = False
random.seed(self.accelerator_rng_seed)
np.random.seed(self.accelerator_rng_seed)
torch.random.manual_seed(self.accelerator_rng_seed)
torch.cuda.manual_seed_all(self.accelerator_rng_seed)
master_address = os.environ.get('MASTER_ADDR', "127.0.0.1")
master_port = int(os.environ.get('MASTER_PORT', 34171))
torch.cuda.set_device(local_rank)
model = model.cuda()
if not torch.distributed.is_initialized():
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
print(
f'ApexDDPAccelerator distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
sys.stdout.flush()
self.broadcast(model)
apex_model, optimizer = self.configure_ddp(model, optimizer)
if self.accelerator_syncbn:
apex_model = self.configure_sync_batchnorm(apex_model)
return apex_model, optimizer, lr_scheduler
def broadcast(self, model: Net, src=0) -> None:
"""
将model的参数做broadcast
"""
for v in model.state_dict().values():
distributed.broadcast(v, src)
def configure_ddp(self, model: Net, optimizer: Optimizer) -> Tuple[Apex_DDP, Optimizer]:
"""
初始化apex_ddp
"""
model, optimizer = amp.initialize(model, optimizer,
opt_level=self.accelerator_fp16_opt_level,
keep_batchnorm_fp32=None, # from True to None
loss_scale=self.accelerator_fp16_loss_scale,
max_loss_scale=1024.0,
min_loss_scale=1.0)
apex_model = Apex_DDP(model, delay_allreduce=True)
self.ddp_model = apex_model
return apex_model, optimizer
def configure_sync_batchnorm(self, model: Net) -> Net:
"""
将model中的``torch.nn.modules.batchnorm._BatchNorm`` 转为 :class:`apex.parallel.SyncBatchNorm`.
"""
model = convert_syncbn_model(model)
return model
def backward_step(self, loss: torch.Tensor, optimizer: Optimizer):
"""
backward step
"""
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
def optimizer_step(self, optimizer: Optimizer, model: Net, grad_norm: float) -> float:
"""
Gradient clipping
"""
total_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer),
grad_norm)
return float(total_norm)
| 4,241 | 34.35 | 110 | py |
DaVinci | DaVinci-main/taming/main.py | import argparse, os, sys, datetime, glob, importlib
from omegaconf import OmegaConf
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.utils.data import random_split, DataLoader, Dataset
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
"-n",
"--name",
type=str,
const=True,
default="",
nargs="?",
help="postfix for logdir",
)
parser.add_argument(
"-r",
"--resume",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-t",
"--train",
type=str2bool,
const=True,
default=False,
nargs="?",
help="train",
)
parser.add_argument(
"--no-test",
type=str2bool,
const=True,
default=False,
nargs="?",
help="disable test",
)
parser.add_argument("-p", "--project", help="name of new or path to existing project")
parser.add_argument(
"-d",
"--debug",
type=str2bool,
nargs="?",
const=True,
default=False,
help="enable post-mortem debugging",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=23,
help="seed for seed_everything",
)
parser.add_argument(
"-f",
"--postfix",
type=str,
default="",
help="post-postfix for default name",
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, validation=None, test=None,
wrap=False, num_workers=None):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else batch_size*2
if train is not None:
self.dataset_configs["train"] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs["validation"] = validation
self.val_dataloader = self._val_dataloader
if test is not None:
self.dataset_configs["test"] = test
self.test_dataloader = self._test_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(
(k, instantiate_from_config(self.dataset_configs[k]))
for k in self.dataset_configs)
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
return DataLoader(self.datasets["train"], batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=True)
def _val_dataloader(self):
return DataLoader(self.datasets["validation"],
batch_size=self.batch_size,
num_workers=self.num_workers)
def _test_dataloader(self):
return DataLoader(self.datasets["test"], batch_size=self.batch_size,
num_workers=self.num_workers)
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_pretrain_routine_start(self, trainer, pl_module):
if trainer.global_rank == 0:
# Create logdirs and save configs
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
print("Project config")
print(self.config.pretty())
OmegaConf.save(self.config,
os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))
print("Lightning config")
print(self.lightning_config.pretty())
OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))
else:
# ModelCheckpoint callback created log directory --- remove it
if not self.resume and os.path.exists(self.logdir):
dst, name = os.path.split(self.logdir)
dst = os.path.join(dst, "child_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True):
super().__init__()
self.batch_freq = batch_frequency
self.max_images = max_images
self.logger_log_images = {
pl.loggers.WandbLogger: self._wandb,
pl.loggers.TestTubeLogger: self._testtube,
}
self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
if not increase_log_steps:
self.log_steps = [self.batch_freq]
self.clamp = clamp
@rank_zero_only
def _wandb(self, pl_module, images, batch_idx, split):
raise ValueError("No way wandb")
grids = dict()
for k in images:
grid = torchvision.utils.make_grid(images[k])
grids[f"{split}/{k}"] = wandb.Image(grid)
pl_module.logger.experiment.log(grids)
@rank_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = (grid+1.0)/2.0 # -1,1 -> 0,1; c,h,w
tag = f"{split}/{k}"
pl_module.logger.experiment.add_image(
tag, grid,
global_step=pl_module.global_step)
@rank_zero_only
def log_local(self, save_dir, split, images,
global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, "images", split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
grid = (grid+1.0)/2.0 # -1,1 -> 0,1; c,h,w
grid = grid.transpose(0,1).transpose(1,2).squeeze(-1)
grid = grid.numpy()
grid = (grid*255).astype(np.uint8)
filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
k,
global_step,
current_epoch,
batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split="train"):
if (self.check_frequency(batch_idx) and # batch_idx % self.batch_freq == 0
hasattr(pl_module, "log_images") and
callable(pl_module.log_images) and
self.max_images > 0):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], -1., 1.)
self.log_local(pl_module.logger.save_dir, split, images,
pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, batch_idx):
if (batch_idx % self.batch_freq) == 0 or (batch_idx in self.log_steps):
try:
self.log_steps.pop(0)
except IndexError:
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log_img(pl_module, batch, batch_idx, split="train")
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log_img(pl_module, batch, batch_idx, split="val")
if __name__ == "__main__":
# custom parser to specify config files, train, test and debug mode,
# postfix, resume.
# `--key value` arguments are interpreted as arguments to the trainer.
# `nested.key=value` arguments are interpreted as config parameters.
# configs are merged from left-to-right followed by command line parameters.
# model:
# base_learning_rate: float
# target: path to lightning module
# params:
# key: value
# data:
# target: main.DataModuleFromConfig
# params:
# batch_size: int
# wrap: bool
# train:
# target: path to train dataset
# params:
# key: value
# validation:
# target: path to validation dataset
# params:
# key: value
# test:
# target: path to test dataset
# params:
# key: value
# lightning: (optional, has sane defaults and can be specified on cmdline)
# trainer:
# additional arguments to trainer
# logger:
# logger to instantiate
# modelcheckpoint:
# modelcheckpoint to instantiate
# callbacks:
# callback1:
# target: importpath
# params:
# key: value
now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
# add cwd for convenience and to make classes in this file available when
# running as `python main.py`
# (in particular `main.DataModuleFromConfig`)
sys.path.append(os.getcwd())
parser = get_parser()
parser = Trainer.add_argparse_args(parser)
opt, unknown = parser.parse_known_args()
if opt.name and opt.resume:
raise ValueError(
"-n/--name and -r/--resume cannot be specified both."
"If you want to resume training in a new log folder, "
"use -n/--name in combination with --resume_from_checkpoint"
)
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
idx = len(paths)-paths[::-1].index("logs")+1
logdir = "/".join(paths[:idx])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
opt.resume_from_checkpoint = ckpt
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
opt.base = base_configs+opt.base
_tmp = logdir.split("/")
nowname = _tmp[_tmp.index("logs")+1]
else:
if opt.name:
name = "_"+opt.name
elif opt.base:
cfg_fname = os.path.split(opt.base[0])[-1]
cfg_name = os.path.splitext(cfg_fname)[0]
name = "_"+cfg_name
else:
name = ""
nowname = now+name+opt.postfix
logdir = os.path.join("logs", nowname)
ckptdir = os.path.join(logdir, "checkpoints")
cfgdir = os.path.join(logdir, "configs")
seed_everything(opt.seed)
try:
# init and save configs
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
config = OmegaConf.merge(*configs, cli)
lightning_config = config.pop("lightning", OmegaConf.create())
# merge trainer cli with config
trainer_config = lightning_config.get("trainer", OmegaConf.create())
# default to ddp
trainer_config["distributed_backend"] = "ddp"
for k in nondefault_trainer_args(opt):
trainer_config[k] = getattr(opt, k)
if not "gpus" in trainer_config:
del trainer_config["distributed_backend"]
cpu = True
else:
gpuinfo = trainer_config["gpus"]
print(f"Running on GPUs {gpuinfo}")
cpu = False
trainer_opt = argparse.Namespace(**trainer_config)
lightning_config.trainer = trainer_config
# model
model = instantiate_from_config(config.model)
# trainer and callbacks
trainer_kwargs = dict()
# default logger configs
# NOTE wandb < 0.10.0 interferes with shutdown
# wandb >= 0.10.0 seems to fix it but still interferes with pudb
# debugging (wrongly sized pudb ui)
# thus prefer testtube for now
default_logger_cfgs = {
"wandb": {
"target": "pytorch_lightning.loggers.WandbLogger",
"params": {
"name": nowname,
"save_dir": logdir,
"offline": opt.debug,
"id": nowname,
}
},
"testtube": {
"target": "pytorch_lightning.loggers.TestTubeLogger",
"params": {
"name": "testtube",
"save_dir": logdir,
}
},
}
default_logger_cfg = default_logger_cfgs["testtube"]
logger_cfg = lightning_config.logger or OmegaConf.create()
logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
# modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
# specify which metric is used to determine best models
default_modelckpt_cfg = {
"target": "pytorch_lightning.callbacks.ModelCheckpoint",
"params": {
"dirpath": ckptdir,
"filename": "{epoch:06}",
"verbose": True,
"save_last": True,
}
}
if hasattr(model, "monitor"):
print(f"Monitoring {model.monitor} as checkpoint metric.")
default_modelckpt_cfg["params"]["monitor"] = model.monitor
default_modelckpt_cfg["params"]["save_top_k"] = 3
modelckpt_cfg = lightning_config.modelcheckpoint or OmegaConf.create()
modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)
# add callback which sets up log directory
default_callbacks_cfg = {
"setup_callback": {
"target": "main.SetupCallback",
"params": {
"resume": opt.resume,
"now": now,
"logdir": logdir,
"ckptdir": ckptdir,
"cfgdir": cfgdir,
"config": config,
"lightning_config": lightning_config,
}
},
"image_logger": {
"target": "main.ImageLogger",
"params": {
"batch_frequency": 750,
"max_images": 4,
"clamp": True
}
},
"learning_rate_logger": {
"target": "main.LearningRateMonitor",
"params": {
"logging_interval": "step",
#"log_momentum": True
}
},
}
callbacks_cfg = lightning_config.callbacks or OmegaConf.create()
callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
# data
data = instantiate_from_config(config.data)
# NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
# calling these ourselves should not be necessary but it is.
# lightning still takes care of proper multiprocessing though
data.prepare_data()
data.setup()
# configure learning rate
bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
if not cpu:
ngpu = len(lightning_config.trainer.gpus.strip(",").split(','))
else:
ngpu = 1
accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches or 1
print(f"accumulate_grad_batches = {accumulate_grad_batches}")
lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
print("Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr))
# allow checkpointing via USR1
def melk(*args, **kwargs):
# run all checkpoint hooks
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path)
def divein(*args, **kwargs):
if trainer.global_rank == 0:
import pudb; pudb.set_trace()
import signal
signal.signal(signal.SIGUSR1, melk)
signal.signal(signal.SIGUSR2, divein)
# run
if opt.train:
try:
trainer.fit(model, data)
except Exception:
melk()
raise
if not opt.no_test and not trainer.interrupted:
trainer.test(model, data)
except Exception:
if opt.debug and trainer.global_rank==0:
try:
import pudb as debugger
except ImportError:
import pdb as debugger
debugger.post_mortem()
raise
finally:
# move newly created debug project to debug_runs
if opt.debug and not opt.resume and trainer.global_rank==0:
dst, name = os.path.split(logdir)
dst = os.path.join(dst, "debug_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
os.rename(logdir, dst)
| 21,114 | 35.217839 | 138 | py |
DaVinci | DaVinci-main/taming/modules/util.py | import torch
import torch.nn as nn
def count_params(model):
total_params = sum(p.numel() for p in model.parameters())
return total_params
class ActNorm(nn.Module):
def __init__(self, num_features, logdet=False, affine=True,
allow_reverse_init=False):
assert affine
super().__init__()
self.logdet = logdet
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.allow_reverse_init = allow_reverse_init
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input, reverse=False):
if reverse:
return self.reverse(input)
if len(input.shape) == 2:
input = input[:,:,None,None]
squeeze = True
else:
squeeze = False
_, _, height, width = input.shape
if self.training and self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
h = self.scale * (input + self.loc)
if squeeze:
h = h.squeeze(-1).squeeze(-1)
if self.logdet:
log_abs = torch.log(torch.abs(self.scale))
logdet = height*width*torch.sum(log_abs)
logdet = logdet * torch.ones(input.shape[0]).to(input)
return h, logdet
return h
def reverse(self, output):
if self.training and self.initialized.item() == 0:
if not self.allow_reverse_init:
raise RuntimeError(
"Initializing ActNorm in reverse direction is "
"disabled by default. Use allow_reverse_init=True to enable."
)
else:
self.initialize(output)
self.initialized.fill_(1)
if len(output.shape) == 2:
output = output[:,:,None,None]
squeeze = True
else:
squeeze = False
h = output / self.scale - self.loc
if squeeze:
h = h.squeeze(-1).squeeze(-1)
return h
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class Labelator(AbstractEncoder):
"""Net2Net Interface for Class-Conditional Model"""
def __init__(self, n_classes, quantize_interface=True):
super().__init__()
self.n_classes = n_classes
self.quantize_interface = quantize_interface
def encode(self, c):
c = c[:,None]
if self.quantize_interface:
return c, None, [None, None, c.long()]
return c
class SOSProvider(AbstractEncoder):
# for unconditional training
def __init__(self, sos_token, quantize_interface=True):
super().__init__()
self.sos_token = sos_token
self.quantize_interface = quantize_interface
def encode(self, x):
# get batch size from data and replicate sos_token
c = torch.ones(x.shape[0], 1)*self.sos_token
c = c.long().to(x.device)
if self.quantize_interface:
return c, None, [None, None, c]
return c
| 3,847 | 28.374046 | 85 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.