File size: 25,509 Bytes
9627ce0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
from __future__ import annotations

import io
import itertools
import re
import warnings
from dataclasses import asdict, dataclass, replace
from functools import cached_property
from pathlib import Path
from subprocess import check_output
from tempfile import TemporaryDirectory
from typing import Any, Iterable, Sequence

import biotite.structure as bs
import brotli
import msgpack
import msgpack_numpy
import numpy as np
import torch
from biotite.database import rcsb
from biotite.structure.io.pdb import PDBFile

from src.data.esm.utils import residue_constants
from src.data.esm.utils.constants import esm3 as esm3_c
from src.data.esm.utils.misc import slice_python_object_as_numpy
from src.data.esm.utils.structure.affine3d import Affine3D
from src.data.esm.utils.structure.aligner import Aligner
from src.data.esm.utils.structure.metrics import (
    compute_gdt_ts,
    compute_lddt_ca,
)
from src.data.esm.utils.structure.protein_chain import (
    PathOrBuffer,
    ProteinChain,
)
from src.data.esm.utils.structure.protein_structure import (
    index_by_atom_name,
)

msgpack_numpy.patch()

SINGLE_LETTER_CHAIN_IDS = (
    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
)


def protein_chain_to_protein_complex(chain: ProteinChain) -> ProteinComplex:
    if "|" not in chain.sequence:
        return ProteinComplex.from_chains([chain])
    chain_breaks = np.array(list(chain.sequence)) == "|"
    chain_break_inds = np.where(chain_breaks)[0]
    chain_break_inds = np.concatenate([[0], chain_break_inds, [len(chain)]])
    chain_break_inds = np.array(list(zip(chain_break_inds[:-1], chain_break_inds[1:])))
    complex_chains = []
    for start, end in chain_break_inds:
        if start != 0:
            start += 1
        complex_chains.append(chain[start:end])
    complex_chains = [
        ProteinChain.from_atom37(
            chain.atom37_positions,
            sequence=chain.sequence,
            chain_id=SINGLE_LETTER_CHAIN_IDS[i],
            entity_id=i,
        )
        for i, chain in enumerate(complex_chains)
    ]
    return ProteinComplex.from_chains(complex_chains)


@dataclass
class ProteinComplexMetadata:
    entity_lookup: dict[int, int]
    chain_lookup: dict[int, str]
    chain_boundaries: list[tuple[int, int]]


@dataclass
class DockQSingleScore:
    native_chains: tuple[str, str]
    DockQ: float
    interface_rms: float
    ligand_rms: float
    fnat: float
    fnonnat: float
    clashes: float
    F1: float
    DockQ_F1: float


@dataclass
class DockQResult:
    total_dockq: float
    native_interfaces: int
    chain_mapping: dict[str, str]
    interfaces: dict[tuple[str, str], DockQSingleScore]
    # zip(aligned.chain_iter(), native.chain_iter()) gives you the pairing
    # aligned.rmsd(native) should give you a low rmsd irrespective of shuffling
    aligned: ProteinComplex
    aligned_rmsd: float


class AtomIndexer:
    def __init__(self, structure: ProteinComplex, property: str, dim: int):
        self.structure = structure
        self.property = property
        self.dim = dim

    def __getitem__(self, atom_names: str | list[str]) -> np.ndarray:
        return index_by_atom_name(
            getattr(self.structure, self.property), atom_names, self.dim
        )


@dataclass
class ProteinComplex:
    """Dataclass with atom37 representation of an entire protein complex."""

    id: str
    sequence: str
    entity_id: np.ndarray  # entities map to unique sequences
    chain_id: np.ndarray  # multiple chains might share an entity id
    sym_id: np.ndarray  # complexes might be copies of the same chain
    residue_index: np.ndarray
    insertion_code: np.ndarray
    atom37_positions: np.ndarray
    atom37_mask: np.ndarray
    confidence: np.ndarray
    metadata: ProteinComplexMetadata

    def __post_init__(self):
        l = len(self.sequence)
        assert self.atom37_positions.shape[0] == l, (self.atom37_positions.shape, l)
        assert self.atom37_mask.shape[0] == l, (self.atom37_mask.shape, l)
        assert self.residue_index.shape[0] == l, (self.residue_index.shape, l)
        assert self.insertion_code.shape[0] == l, (self.insertion_code.shape, l)
        assert self.confidence.shape[0] == l, (self.confidence.shape, l)
        assert self.entity_id.shape[0] == l, (self.entity_id.shape, l)
        assert self.chain_id.shape[0] == l, (self.chain_id.shape, l)
        assert self.sym_id.shape[0] == l, (self.sym_id.shape, l)

    def __getitem__(self, idx: int | list[int] | slice | np.ndarray):
        """This function slices protein complexes without consideration of chain breaks
        NOTE: When slicing with a boolean mask, it's possible that the output array won't
        be the expected length. This is because we do our best to preserve chainbreak tokens.
        """

        if isinstance(idx, int):
            idx = [idx]
        if isinstance(idx, list):
            raise ValueError(
                "ProteinComplex doesn't supports indexing with lists of indices"
            )

        if isinstance(idx, np.ndarray):
            is_chainbreak = np.asarray([s == "|" for s in self.sequence])
            idx = idx.astype(bool) | is_chainbreak

        complex = self._unsafe_slice(idx)
        if len(complex) == 0:
            return complex

        # detect runs of chainbreaks by searching for instances of '||' in complex.sequence
        chainbreak_runs = np.asarray(
            [
                complex.sequence[i : i + 2] == "||"
                for i in range(len(complex.sequence) - 1)
            ]
            + [complex.sequence[-1] == "|"]
        )
        # We should remove as many chainbreaks as possible from the start of the sequence
        for i in range(len(chainbreak_runs)):
            if complex.sequence[i] == "|":
                chainbreak_runs[i] = True
            else:
                break
        complex = complex._unsafe_slice(~chainbreak_runs)
        return complex

    def _unsafe_slice(self, idx: int | list[int] | slice | np.ndarray):
        sequence = slice_python_object_as_numpy(self.sequence, idx)
        return replace(
            self,
            sequence=sequence,
            entity_id=self.entity_id[..., idx],
            chain_id=self.chain_id[..., idx],
            sym_id=self.sym_id[..., idx],
            residue_index=self.residue_index[..., idx],
            insertion_code=self.insertion_code[..., idx],
            atom37_positions=self.atom37_positions[..., idx, :, :],
            atom37_mask=self.atom37_mask[..., idx, :],
            confidence=self.confidence[..., idx],
        )

    def __len__(self):
        return len(self.sequence)

    @cached_property
    def atoms(self) -> AtomIndexer:
        return AtomIndexer(self, property="atom37_positions", dim=-2)

    def chain_iter(self) -> Iterable[ProteinChain]:
        boundaries = [i for i, s in enumerate(self.sequence) if s == "|"]
        boundaries = [-1, *boundaries, len(self)]
        for i in range(len(boundaries) - 1):
            c = self.__getitem__(slice(boundaries[i] + 1, boundaries[i + 1]))
            yield c.as_chain()

    def as_chain(self, force_conversion: bool = False) -> ProteinChain:
        """Convert the ProteinComplex to a ProteinChain.

        Args:
            force_conversion (bool): Forces the conversion into a protein chain even if the complex has multiple chains.
                The purpose of this is to use ProteinChain specific functions (like cbeta_contacts).

        """
        if not force_conversion:
            assert len(np.unique(self.chain_id)) == 1, f"{self.id}"
            assert len(np.unique(self.entity_id)) == 1, f"{self.id}"
            if self.chain_id[0] not in self.metadata.chain_lookup:
                warnings.warn("Chain ID not found in metadata, using 'A' as default")
            if self.entity_id[0] not in self.metadata.entity_lookup:
                warnings.warn("Entity ID not found in metadata, using None as default")
            chain_id = self.metadata.chain_lookup.get(self.chain_id[0], "A")
            entity_id = self.metadata.entity_lookup.get(self.entity_id[0], None)
        else:
            chain_id = "A"
            entity_id = None

        return ProteinChain(
            id=self.id,
            sequence=self.sequence,
            chain_id=chain_id,
            entity_id=entity_id,
            atom37_positions=self.atom37_positions,
            atom37_mask=self.atom37_mask,
            residue_index=self.residue_index,
            insertion_code=self.insertion_code,
            confidence=self.confidence,
        )

    @classmethod
    def from_pdb(cls, path: PathOrBuffer, id: str | None = None) -> "ProteinComplex":
        atom_array = PDBFile.read(path).get_structure(
            model=1, extra_fields=["b_factor"]
        )

        chains = []
        for chain in bs.chain_iter(atom_array):
            chain = chain[~chain.hetero]
            if len(chain) == 0:
                continue
            chains.append(ProteinChain.from_atomarray(chain, id))
        return ProteinComplex.from_chains(chains)

    @classmethod
    def from_rcsb(cls, pdb_id: str):
        """Fetch a protein complex from the RCSB PDB database."""
        f: io.StringIO = rcsb.fetch(pdb_id, "pdb")  # type: ignore
        return cls.from_pdb(f, id=pdb_id)

    def to_pdb(self, path: PathOrBuffer, include_insertions: bool = True):
        atom_array = None
        for chain in self.chain_iter():
            carr = (
                chain.atom_array
                if include_insertions
                else chain.atom_array_no_insertions
            )
            atom_array = carr if atom_array is None else atom_array + carr
        f = PDBFile()
        f.set_structure(atom_array)
        f.write(path)

    def to_pdb_string(self, include_insertions: bool = True) -> str:
        buf = io.StringIO()
        self.to_pdb(buf, include_insertions=include_insertions)
        buf.seek(0)
        return buf.read()

    def normalize_chain_ids_for_pdb(self):
        # Since PDB files have 1-letter chain IDs and don't support the idea of a symmetric index,
        # we can normalize it instead which might be necessary for DockQ and to_pdb.
        ids = SINGLE_LETTER_CHAIN_IDS
        chains = []
        for i, chain in enumerate(self.chain_iter()):
            chain.chain_id = ids[i]
            if i > len(ids):
                raise RuntimeError("Too many chains to write to PDB file")
            chains.append(chain)

        return ProteinComplex.from_chains(chains)

    def state_dict(self, backbone_only=False):
        """This state dict is optimized for storage, so it turns things to fp16 whenever
        possible. Note that we also only support int32 residue indices, I'm hoping we don't
        need more than 2**32 residues..."""
        dct = {k: v for k, v in vars(self).items()}
        for k, v in dct.items():
            if isinstance(v, np.ndarray):
                match v.dtype:
                    case np.int64:
                        dct[k] = v.astype(np.int32)
                    case np.float64 | np.float32:
                        dct[k] = v.astype(np.float16)
                    case _:
                        pass
            elif isinstance(v, ProteinComplexMetadata):
                dct[k] = asdict(v)
        dct["atom37_positions"] = dct["atom37_positions"][dct["atom37_mask"]]
        return dct

    def to_blob(self, backbone_only=False) -> bytes:
        return brotli.compress(msgpack.dumps(self.state_dict(backbone_only)), quality=5)

    @classmethod
    def from_state_dict(cls, dct):
        atom37 = np.full((*dct["atom37_mask"].shape, 3), np.nan)
        atom37[dct["atom37_mask"]] = dct["atom37_positions"]
        dct["atom37_positions"] = atom37
        dct = {
            k: (v.astype(np.float32) if k in ["atom37_positions", "confidence"] else v)
            for k, v in dct.items()
        }
        dct["metadata"] = ProteinComplexMetadata(**dct["metadata"])
        return cls(**dct)

    @classmethod
    def from_blob(cls, input: Path | str | io.BytesIO | bytes):
        """NOTE(@zlin): blob + sparse coding + brotli + fp16 reduces memory
        of chains from 52G/1M chains to 20G/1M chains, I think this is a good first
        shot at compressing and dumping chains to disk. I'm sure there's better ways."""
        match input:
            case Path() | str():
                bytes = Path(input).read_bytes()
            case io.BytesIO():
                bytes = input.getvalue()
            case _:
                bytes = input
        return cls.from_state_dict(
            msgpack.loads(brotli.decompress(bytes), strict_map_key=False)
        )

    @classmethod
    def from_chains(cls, chains: Sequence[ProteinChain]):
        if not chains:
            raise ValueError(
                "Cannot create a ProteinComplex from an empty list of chains"
            )

        # TODO: Make a proper protein complex class
        def join_arrays(arrays: Sequence[np.ndarray], sep: np.ndarray):
            full_array = []
            for array in arrays:
                full_array.append(array)
                full_array.append(sep)
            full_array = full_array[:-1]
            return np.concatenate(full_array, 0)

        sep_tokens = {
            "residue_index": np.array([-1]),
            "insertion_code": np.array([""]),
            "atom37_positions": np.full([1, 37, 3], np.nan),
            "atom37_mask": np.zeros([1, 37], dtype=bool),
            "confidence": np.array([0]),
        }

        array_args: dict[str, np.ndarray] = {
            name: join_arrays([getattr(chain, name) for chain in chains], sep)
            for name, sep in sep_tokens.items()
        }

        multimer_arrays = []
        chain2num_max = -1
        chain2num = {}
        ent2num_max = -1
        ent2num = {}
        total_index = 0
        chain_boundaries = []
        for i, c in enumerate(chains):
            num_res = c.residue_index.shape[0]
            if c.chain_id not in chain2num:
                chain2num[c.chain_id] = (chain2num_max := chain2num_max + 1)
            chain_id_array = np.full([num_res], chain2num[c.chain_id], dtype=np.int64)

            if c.entity_id is None:
                entity_num = (ent2num_max := ent2num_max + 1)
            else:
                if c.entity_id not in ent2num:
                    ent2num[c.entity_id] = (ent2num_max := ent2num_max + 1)
                entity_num = ent2num[c.entity_id]
            entity_id_array = np.full([num_res], entity_num, dtype=np.int64)

            sym_id_array = np.full([num_res], i, dtype=np.int64)

            multimer_arrays.append(
                {
                    "chain_id": chain_id_array,
                    "entity_id": entity_id_array,
                    "sym_id": sym_id_array,
                }
            )

            chain_boundaries.append((total_index, total_index + num_res))
            total_index += num_res + 1

        sep = np.array([-1])
        update = {
            name: join_arrays([dct[name] for dct in multimer_arrays], sep=sep)
            for name in ["chain_id", "entity_id", "sym_id"]
        }
        array_args.update(update)

        metadata = ProteinComplexMetadata(
            chain_boundaries=chain_boundaries,
            chain_lookup={v: k for k, v in chain2num.items()},
            entity_lookup={v: k for k, v in ent2num.items()},
        )

        return cls(
            id=chains[0].id,
            sequence=esm3_c.CHAIN_BREAK_STR.join(chain.sequence for chain in chains),
            metadata=metadata,
            **array_args,
        )

    def infer_oxygen(self) -> ProteinComplex:
        """Oxygen position is fixed given N, CA, C atoms. Infer it if not provided."""
        O_vector = torch.tensor([0.6240, -1.0613, 0.0103], dtype=torch.float32)
        N, CA, C = torch.from_numpy(self.atoms[["N", "CA", "C"]]).float().unbind(dim=1)
        N = torch.roll(N, -3)
        N[..., -1, :] = torch.nan

        # Get the frame defined by the CA-C-N atom
        frames = Affine3D.from_graham_schmidt(CA, C, N)
        O = frames.apply(O_vector)
        atom37_positions = self.atom37_positions.copy()
        atom37_mask = self.atom37_mask.copy()

        atom37_positions[:, residue_constants.atom_order["O"]] = O.numpy()
        atom37_mask[:, residue_constants.atom_order["O"]] = ~np.isnan(
            atom37_positions[:, residue_constants.atom_order["O"]]
        ).any(-1)
        new_chain = replace(
            self, atom37_positions=atom37_positions, atom37_mask=atom37_mask
        )
        return new_chain

    @classmethod
    def concat(cls, objs: list[ProteinComplex]) -> ProteinComplex:
        pdb_ids = [obj.id for obj in objs]
        if len(set(pdb_ids)) > 1:
            raise RuntimeError(
                "Concatention of protein complexes across different PDB ids is unsupported"
            )
        return ProteinComplex.from_chains(
            list(itertools.chain.from_iterable(obj.chain_iter() for obj in objs))
        )

    def _sanity_check_complexes_are_comparable(self, other: ProteinComplex):
        assert len(self) == len(other), "Protein complexes must have the same length"
        assert len(list(self.chain_iter())) == len(
            list(other.chain_iter())
        ), "Protein complexes must have the same number of chains"

    def lddt_ca(
        self,
        target: ProteinComplex,
        mobile_inds: list[int] | np.ndarray | None = None,
        target_inds: list[int] | np.ndarray | None = None,
        compute_chain_assignment: bool = True,
        **kwargs,
    ) -> float | np.ndarray:
        """Compute the LDDT between this protein complex and another.

        Arguments:
            target (ProteinComplex): The other protein complex to compare to.
            mobile_inds (list[int], np.ndarray, optional): The indices of the mobile atoms to align. These are NOT residue indices
            target_inds (list[int], np.ndarray, optional): The indices of the target atoms to align. These are NOT residue indices

        Returns:
            float | np.ndarray: The LDDT score between the two protein chains, either
                a single float or per-residue LDDT scores if `per_residue` is True.
        """
        if compute_chain_assignment:
            aligned = self.dockq(target).aligned
        else:
            aligned = self
        lddt = compute_lddt_ca(
            torch.tensor(aligned.atom37_positions[mobile_inds]).unsqueeze(0),
            torch.tensor(target.atom37_positions[target_inds]).unsqueeze(0),
            torch.tensor(aligned.atom37_mask[mobile_inds]).unsqueeze(0),
            **kwargs,
        )
        return float(lddt) if lddt.numel() == 1 else lddt.numpy().flatten()

    def gdt_ts(
        self,
        target: ProteinComplex,
        mobile_inds: list[int] | np.ndarray | None = None,
        target_inds: list[int] | np.ndarray | None = None,
        compute_chain_assignment: bool = True,
        **kwargs,
    ) -> float | np.ndarray:
        """Compute the GDT_TS between this protein complex and another.

        Arguments:
            target (ProteinComplex): The other protein complex to compare to.
            mobile_inds (list[int], np.ndarray, optional): The indices of the mobile atoms to align. These are NOT residue indices
            target_inds (list[int], np.ndarray, optional): The indices of the target atoms to align. These are NOT residue indices

        Returns:
            float: The GDT_TS score between the two protein chains.
        """
        if compute_chain_assignment:
            aligned = self.dockq(target).aligned
        else:
            aligned = self
        gdt_ts = compute_gdt_ts(
            mobile=torch.tensor(
                index_by_atom_name(aligned.atom37_positions[mobile_inds], "CA"),
                dtype=torch.float32,
            ).unsqueeze(0),
            target=torch.tensor(
                index_by_atom_name(target.atom37_positions[target_inds], "CA"),
                dtype=torch.float32,
            ).unsqueeze(0),
            atom_exists_mask=torch.tensor(
                index_by_atom_name(aligned.atom37_mask[mobile_inds], "CA", dim=-1)
                & index_by_atom_name(target.atom37_mask[target_inds], "CA", dim=-1)
            ).unsqueeze(0),
            **kwargs,
        )
        return float(gdt_ts) if gdt_ts.numel() == 1 else gdt_ts.numpy().flatten()

    def dockq(self, native: ProteinComplex):
        # This function uses dockqv2 to compute the DockQ score. Because it does a mapping
        # over all possible chains, it's quite slow. Be careful not to use this in an inference loop
        # or something that requires fast scoring. It defaults to 8 CPUs.

        try:
            pass
        except BaseException:
            raise RuntimeError(
                "DockQ is not installed. Please update your environment."
            )
        self._sanity_check_complexes_are_comparable(native)

        def sanity_check_chain_ids(pc: ProteinComplex):
            ids = []
            for i, chain in enumerate(pc.chain_iter()):
                if i > len(SINGLE_LETTER_CHAIN_IDS):
                    raise ValueError("Too many chains to write to PDB file")
                if len(chain.chain_id) > 1:
                    raise ValueError(
                        "We only supports single letter chain IDs for DockQ"
                    )
                ids.append(chain.chain_id)
            if len(set(ids)) != len(ids):
                raise ValueError(f"Duplicate chain IDs in protein complex: {ids}")
            return ids

        sanity_check_chain_ids(self)
        sanity_check_chain_ids(native)

        with TemporaryDirectory() as tdir:
            dir = Path(tdir)
            self.to_pdb(dir / "self.pdb")
            native.to_pdb(dir / "native.pdb")

            output = check_output(["DockQ", dir / "self.pdb", dir / "native.pdb"])
        lines = output.decode().split("\n")

        # Remove the header comments
        start_index = next(
            i for i, line in enumerate(lines) if line.startswith("Model")
        )
        lines = lines[start_index:]

        result = {}
        interfaces = []
        current_interface: dict = {}

        for line in lines:
            line = line.strip()
            if not line:
                continue

            if line.startswith("Model  :"):
                pass  # Tmp pdb file location, it's useless...
            elif line.startswith("Native :"):
                pass  # Tmp pdb file location, it's useless...
            elif line.startswith("Total DockQ"):
                total_dockq_match = re.search(
                    r"Total DockQ over (\d+) native interfaces: ([\d.]+) with (.*) model:native mapping",
                    line,
                )
                if total_dockq_match:
                    result["value"] = float(total_dockq_match.group(2))
                    result["native interfaces"] = int(total_dockq_match.group(1))
                    native_chains, self_chains = total_dockq_match.group(3).split(":")
                    result["mapping"] = dict(zip(native_chains, self_chains))
                else:
                    raise RuntimeError(
                        "Failed to parse DockQ output, maybe your DockQ version is wrong?"
                    )
            elif line.startswith("Native chains:"):
                if current_interface:
                    interfaces.append(current_interface)
                current_interface = {
                    "Native chains": line.split(":")[1].strip().split(", ")
                }
            elif line.startswith("Model chains:"):
                current_interface["Model chains"] = (
                    line.split(":")[1].strip().split(", ")
                )
            elif ":" in line:
                key, value = line.split(":", 1)
                current_interface[key.strip()] = float(value.strip())

        if current_interface:
            interfaces.append(current_interface)

        def parse_dict(d: dict[str, Any]) -> DockQSingleScore:
            return DockQSingleScore(
                native_chains=tuple(d["Native chains"]),  # type: ignore
                DockQ=float(d["DockQ"]),
                interface_rms=float(d["irms"]),
                ligand_rms=float(d["Lrms"]),  # Note the capitalization difference
                fnat=float(d["fnat"]),
                fnonnat=float(d["fnonnat"]),
                clashes=float(d["clashes"]),
                F1=float(d["F1"]),
                DockQ_F1=float(d["DockQ_F1"]),
            )

        inv_mapping = {v: k for k, v in result["mapping"].items()}

        self_chain_map = {c.chain_id: c for c in self.chain_iter()}
        realigned = []
        for chain in native.chain_iter():
            realigned.append(self_chain_map[inv_mapping[chain.chain_id]])

        realigned = ProteinComplex.from_chains(realigned)
        aligner = Aligner(realigned, native)
        realigned = aligner.apply(realigned)

        result = DockQResult(
            total_dockq=result["value"],
            native_interfaces=result["native interfaces"],
            chain_mapping=result["mapping"],
            interfaces={
                (i["Model chains"][0], i["Model chains"][1]): parse_dict(i)
                for i in interfaces
            },
            aligned=realigned,
            aligned_rmsd=aligner.rmsd,
        )

        return result