Datasets:

Formats:
text
ArXiv:
Libraries:
Datasets
Yimingbear commited on
Commit
2872543
·
verified ·
1 Parent(s): e2559a8

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .flake8 +59 -0
  2. .gitattributes +2 -0
  3. .gitignore +146 -0
  4. .pre-commit-config.yaml +51 -0
  5. CODE_OF_CONDUCT.md +127 -0
  6. CONTRIBUTING.md +61 -0
  7. Dockerfile +56 -0
  8. LICENSE +208 -0
  9. Protenix_Technical_Report.pdf +3 -0
  10. README.md +309 -3
  11. af3-dev/release/clusters-by-entity-40.txt +3 -0
  12. assets/constraint_metrics.png +3 -0
  13. assets/license_header.txt +13 -0
  14. assets/mini_performance.png +3 -0
  15. assets/protenix_predictions.gif +3 -0
  16. assets/v0.5.0_metrics.png +3 -0
  17. configs/__init__.py +0 -0
  18. configs/configs_base.py +395 -0
  19. configs/configs_data.py +279 -0
  20. configs/configs_inference.py +35 -0
  21. configs/configs_model_type.py +236 -0
  22. debug.py +4 -0
  23. docs/colabfold_compatible_msa.md +33 -0
  24. docs/docker_installation.md +30 -0
  25. docs/infer_json_format.md +329 -0
  26. docs/kernels.md +24 -0
  27. docs/model_train_inference_cost.md +51 -0
  28. docs/msa_pipeline.md +101 -0
  29. docs/prepare_training_data.md +119 -0
  30. docs/training.md +88 -0
  31. extract_tianrui.py +84 -0
  32. finetune_demo.sh +42 -0
  33. inference_demo.sh +50 -0
  34. protenix/__init__.py +0 -0
  35. protenix/config/__init__.py +14 -0
  36. protenix/config/config.py +288 -0
  37. protenix/config/extend_types.py +55 -0
  38. protenix/data/__init__.py +0 -0
  39. protenix/data/ccd.py +450 -0
  40. protenix/data/compute_esm.py +230 -0
  41. protenix/data/constants.py +977 -0
  42. protenix/data/constraint_featurizer.py +2414 -0
  43. protenix/data/data_pipeline.py +373 -0
  44. protenix/data/dataloader.py +373 -0
  45. protenix/data/dataset.py +1182 -0
  46. protenix/data/esm_featurizer.py +184 -0
  47. protenix/data/featurizer.py +828 -0
  48. protenix/data/filter.py +637 -0
  49. protenix/data/infer_data_pipeline.py +284 -0
  50. protenix/data/json_maker.py +317 -0
.flake8 ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # originally from: https://github.com/pytorch/pytorch/blob/main/.flake8
2
+ [flake8]
3
+ # NOTE: **Mirror any changes** to this file the [tool.ruff] config in pyproject.toml
4
+ # before we can fully move to use ruff
5
+ enable-extensions = G
6
+ select = B,C,E,F,G,P,SIM1,SIM911,T4,W,B9,TOR0,TOR1,TOR2,TOR9
7
+ max-line-length = 120
8
+ # C408 ignored because we like the dict keyword argument syntax
9
+ # E501 is not flexible enough, we're using B950 instead
10
+ ignore =
11
+ E203,E305,E402,E501,E704,E721,E741,F405,F824,F841,F999,W503,W504,C408,E302,W291,E303,
12
+ # shebang has extra meaning in fbcode lints, so I think it's not worth trying
13
+ # to line this up with executable bit
14
+ EXE001,
15
+ # these ignores are from flake8-bugbear; please fix!
16
+ B007,B008,B017,B019,B023,B028,B903,B904,B905,B906,B907
17
+ # these ignores are from flake8-comprehensions; please fix!
18
+ C407,
19
+ # these ignores are from flake8-logging-format; please fix!
20
+ G100,G101,G200
21
+ # these ignores are from flake8-simplify. please fix or ignore with commented reason
22
+ SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12,
23
+ # flake8-simplify code styles
24
+ SIM102,SIM103,SIM106,SIM112,
25
+
26
+ per-file-ignores =
27
+ __init__.py: F401
28
+ test/**: F821
29
+ test/**/__init__.py: F401,F821
30
+ torch/utils/cpp_extension.py: B950
31
+ torchgen/api/types/__init__.py: F401,F403
32
+ torchgen/executorch/api/types/__init__.py: F401,F403
33
+ test/dynamo/test_higher_order_ops.py: B950
34
+ test/dynamo/test_graph_break_messages.py: B950
35
+ torch/testing/_internal/dynamo_test_failures.py: B950
36
+ # TOR901 is only for test, we want to ignore it for everything else.
37
+ # It's not easy to configure this without affecting other per-file-ignores,
38
+ # so we explicitly list every file where it's violated outside of test.
39
+ torch/__init__.py: F401,TOR901
40
+ torch/_custom_op/impl.py: TOR901
41
+ torch/_export/serde/upgrade.py: TOR901
42
+ torch/_functorch/vmap.py: TOR901
43
+ torch/_inductor/test_operators.py: TOR901
44
+ torch/_library/abstract_impl.py: TOR901
45
+ torch/_meta_registrations.py: TOR901
46
+ torch/_prims/__init__.py: F401,TOR901
47
+ torch/_prims/rng_prims.py: TOR901
48
+ torch/ao/quantization/fx/_decomposed.py: TOR901
49
+ torch/distributed/_functional_collectives.py: TOR901
50
+ torch/distributed/_spmd/data_parallel.py: TOR901
51
+ torch/distributed/_tensor/_collective_utils.py: TOR901
52
+ # This is a full package that happen to live within the test
53
+ # folder, so ok to skip
54
+ test/cpp_extensions/open_registration_extension/pytorch_openreg/_aten_impl.py: TOR901
55
+ optional-ascii-coding = True
56
+ exclude =
57
+ ./.git,
58
+ ./venv,
59
+ *.pyi
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ Protenix_Technical_Report.pdf filter=lfs diff=lfs merge=lfs -text
61
+ af3-dev/release/clusters-by-entity-40.txt filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ *__pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+ *.o
9
+ *.obj
10
+ *.d
11
+ # ninjia relate
12
+ *ninja*
13
+ lock
14
+ # Distribution / packaging
15
+ .Python
16
+ .vscode
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ downloads/
21
+ eggs/
22
+ .eggs/
23
+ lib/
24
+ lib64/
25
+ parts/
26
+ sdist/
27
+ var/
28
+ wheels/
29
+ share/python-wheels/
30
+ *.egg-info/
31
+ .installed.cfg
32
+ *.egg
33
+ MANIFEST
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ pip-log.txt
43
+ pip-delete-this-directory.txt
44
+
45
+ # Unit test / coverage reports
46
+ .tox/
47
+ .nox/
48
+ .coverage
49
+ .coverage.*
50
+ .cache
51
+ *.cover
52
+ *.py,cover
53
+ .hypothesis/
54
+ .pytest_cache/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # data cache and checkpoints
88
+ data_cache/
89
+ checkpoints/
90
+
91
+ .pdm.toml
92
+
93
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
94
+ __pypackages__/
95
+
96
+ # Celery stuff
97
+ celerybeat-schedule
98
+ celerybeat.pid
99
+
100
+ # SageMath parsed files
101
+ *.sage.py
102
+
103
+ # Environments
104
+ .env
105
+ .venv
106
+ env/
107
+ venv/
108
+ ENV/
109
+ env.bak/
110
+ venv.bak/
111
+
112
+ # Spyder project settings
113
+ .spyderproject
114
+ .spyproject
115
+
116
+ # Rope project settings
117
+ .ropeproject
118
+
119
+ # mkdocs documentation
120
+ /site
121
+
122
+ # mypy
123
+ .mypy_cache/
124
+ .dmypy.json
125
+ dmypy.json
126
+
127
+ # Pyre type checker
128
+ .pyre/
129
+
130
+ # pytype static type analyzer
131
+ .pytype/
132
+
133
+ # Cython debug symbols
134
+ cython_debug/
135
+
136
+ wandb/
137
+ output/
138
+ release_data/
139
+
140
+ coord/
141
+ ModelGenerator/
142
+ protenix_1d_embeddings/
143
+ protenix_3d_embeddings/
144
+ second_stage/
145
+ training_json/
146
+ examples/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # originally from: https://github.com/pytorch/torchtune/blob/main/.pre-commit-config.yaml
2
+ exclude: 'build'
3
+
4
+ default_language_version:
5
+ python: python3
6
+
7
+ repos:
8
+ - repo: https://github.com/pre-commit/pre-commit-hooks
9
+ rev: v5.0.0
10
+ hooks:
11
+ - id: trailing-whitespace
12
+ - id: check-ast
13
+ - id: check-merge-conflict
14
+ - id: no-commit-to-branch
15
+ args: ['--branch=main']
16
+ - id: check-added-large-files
17
+ args: ['--maxkb=1000']
18
+ - id: end-of-file-fixer
19
+ exclude: '^(.*\.svg)$'
20
+
21
+ - repo: https://github.com/Lucas-C/pre-commit-hooks
22
+ rev: v1.5.5
23
+ hooks:
24
+ - id: insert-license
25
+ files: \.py$|\.sh$
26
+ args:
27
+ - --license-filepath
28
+ - assets/license_header.txt
29
+
30
+ - repo: https://github.com/pycqa/flake8
31
+ rev: 7.1.1
32
+ hooks:
33
+ - id: flake8
34
+ additional_dependencies:
35
+ - flake8-bugbear == 22.4.25
36
+ - pep8-naming == 0.12.1
37
+ - torchfix
38
+ args: ['--config=.flake8']
39
+
40
+ - repo: https://github.com/omnilib/ufmt
41
+ rev: v2.3.0
42
+ hooks:
43
+ - id: ufmt
44
+ additional_dependencies:
45
+ - black == 22.12.0
46
+ - usort == 1.0.5
47
+
48
+ - repo: https://github.com/jsh9/pydoclint
49
+ rev: d88180a8632bb1602a4d81344085cf320f288c5a
50
+ hooks:
51
+ - id: pydoclint
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ We as members, contributors, and leaders pledge to make participation in our
6
+ community a harassment-free experience for everyone, regardless of age, body
7
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
8
+ identity and expression, level of experience, education, socio-economic status,
9
+ nationality, personal appearance, race, religion, or sexual identity
10
+ and orientation.
11
+
12
+ We pledge to act and interact in ways that contribute to an open, welcoming,
13
+ diverse, inclusive, and healthy community.
14
+
15
+ ## Our Standards
16
+
17
+ Examples of behavior that contributes to a positive environment for our
18
+ community include:
19
+
20
+ * Demonstrating empathy and kindness toward other people
21
+ * Being respectful of differing opinions, viewpoints, and experiences
22
+ * Giving and gracefully accepting constructive feedback
23
+ * Accepting responsibility and apologizing to those affected by our mistakes,
24
+ and learning from the experience
25
+ * Focusing on what is best not just for us as individuals, but for the
26
+ overall community
27
+
28
+ Examples of unacceptable behavior include:
29
+
30
+ * The use of sexualized language or imagery, and sexual attention or
31
+ advances of any kind
32
+ * Trolling, insulting or derogatory comments, and personal or political attacks
33
+ * Public or private harassment
34
+ * Publishing others' private information, such as a physical or email
35
+ address, without their explicit permission
36
+ * Other conduct which could reasonably be considered inappropriate in a
37
+ professional setting
38
+
39
+ ## Enforcement Responsibilities
40
+
41
+ Community leaders are responsible for clarifying and enforcing our standards of
42
+ acceptable behavior and will take appropriate and fair corrective action in
43
+ response to any behavior that they deem inappropriate, threatening, offensive,
44
+ or harmful.
45
+
46
+ Community leaders have the right and responsibility to remove, edit, or reject
47
+ comments, commits, code, wiki edits, issues, and other contributions that are
48
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
49
+ decisions when appropriate.
50
+
51
+ ## Scope
52
+
53
+ This Code of Conduct applies within all community spaces, and also applies when
54
+ an individual is officially representing the community in public spaces.
55
+ Examples of representing our community include using an official e-mail address,
56
+ posting via an official social media account, or acting as an appointed
57
+ representative at an online or offline event.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported to the community leaders responsible for enforcement.
63
+ All complaints will be reviewed and investigated promptly and fairly.
64
+
65
+ All community leaders are obligated to respect the privacy and security of the
66
+ reporter of any incident.
67
+
68
+ ## Enforcement Guidelines
69
+
70
+ Community leaders will follow these Community Impact Guidelines in determining
71
+ the consequences for any action they deem in violation of this Code of Conduct:
72
+
73
+ ### 1. Correction
74
+
75
+ **Community Impact**: Use of inappropriate language or other behavior deemed
76
+ unprofessional or unwelcome in the community.
77
+
78
+ **Consequence**: A private, written warning from community leaders, providing
79
+ clarity around the nature of the violation and an explanation of why the
80
+ behavior was inappropriate. A public apology may be requested.
81
+
82
+ ### 2. Warning
83
+
84
+ **Community Impact**: A violation through a single incident or series
85
+ of actions.
86
+
87
+ **Consequence**: A warning with consequences for continued behavior. No
88
+ interaction with the people involved, including unsolicited interaction with
89
+ those enforcing the Code of Conduct, for a specified period of time. This
90
+ includes avoiding interactions in community spaces as well as external channels
91
+ like social media. Violating these terms may lead to a temporary or
92
+ permanent ban.
93
+
94
+ ### 3. Temporary Ban
95
+
96
+ **Community Impact**: A serious violation of community standards, including
97
+ sustained inappropriate behavior.
98
+
99
+ **Consequence**: A temporary ban from any sort of interaction or public
100
+ communication with the community for a specified period of time. No public or
101
+ private interaction with the people involved, including unsolicited interaction
102
+ with those enforcing the Code of Conduct, is allowed during this period.
103
+ Violating these terms may lead to a permanent ban.
104
+
105
+ ### 4. Permanent Ban
106
+
107
+ **Community Impact**: Demonstrating a pattern of violation of community
108
+ standards, including sustained inappropriate behavior, harassment of an
109
+ individual, or aggression toward or disparagement of classes of individuals.
110
+
111
+ **Consequence**: A permanent ban from any sort of public interaction within
112
+ the community.
113
+
114
+ ## Attribution
115
+
116
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
117
+ version 2.0, available at
118
+ https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
119
+
120
+ Community Impact Guidelines were inspired by [Mozilla's code of conduct
121
+ enforcement ladder](https://github.com/mozilla/diversity).
122
+
123
+ [homepage]: https://www.contributor-covenant.org
124
+
125
+ For answers to common questions about this code of conduct, see the FAQ at
126
+ https://www.contributor-covenant.org/faq. Translations are available at
127
+ https://www.contributor-covenant.org/translations.
CONTRIBUTING.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing
2
+
3
+ Thank you for investing your time in contributing to protenix project!
4
+
5
+ Read our [Code of Conduct](./CODE_OF_CONDUCT.md) to keep our community approachable and respectable.
6
+
7
+ This guide details how to use issues and pull requests to improve protenix project.
8
+
9
+ ## General Guidelines
10
+
11
+ ### Pull Requests
12
+
13
+ Make sure to keep Pull Requests small and functional to make them easier to review, understand, and look up in commit history. This repository uses "Squash and Commit" to keep our history clean and make it easier to revert changes based on PR.
14
+
15
+ Adding the appropriate documentation, unit tests and e2e tests as part of a feature is the responsibility of the feature owner, whether it is done in the same Pull Request or not.
16
+
17
+ Pull Requests should follow the "subject: message" format, where the subject describes what part of the code is being modified.
18
+
19
+ Refer to the template for more information on what goes into a PR description.
20
+
21
+ ### Design Docs
22
+
23
+ A contributor proposes a design with a PR on the repository to allow for revisions and discussions. If a design needs to be discussed before formulating a document for it, make use of Google doc and GitHub issue to involve the community on the discussion.
24
+
25
+ ### GitHub Issues
26
+
27
+ GitHub Issues are used to file bugs, work items, and feature requests with actionable items/issues (Please refer to the "Reporting Bugs/Feature Requests" section below for more information).
28
+
29
+ ### Reporting Bugs/Feature Requests
30
+
31
+ We welcome you to use the GitHub issue tracker to report bugs or suggest features that have actionable items/issues (as opposed to introducing a feature request on GitHub Discussions).
32
+
33
+ When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
34
+
35
+ - A reproducible test case or series of steps
36
+ - The version of the code being used
37
+ - Any modifications you've made relevant to the bug
38
+ - Anything unusual about your environment or deployment
39
+
40
+ ## Contributing via Pull Requests
41
+
42
+ ### Find interesting issue
43
+
44
+ If you spot a problem with the problem, [search if an issue already exists](https://github.com/bytedance/protenix/issues). If a related issue doesn't exist, you can open a new issue using [issue template](https://github.com/bytedance/protenix/issues/new/choose).
45
+
46
+ ### Solve an issue
47
+
48
+ Please check `DEVELOPMENT.md` in sub folder to get familiar with running and testing codes.
49
+
50
+ ### Open a Pull request.
51
+
52
+ When you're done making the changes, open a pull request and fill PR template so we can better review your PR. The template helps reviewers understand your changes and the purpose of your pull request.
53
+
54
+ Don't forget to link PR to the issue if you are solving one.
55
+
56
+ If you run into any merge issues, checkout this [git tutorial](https://lab.github.com/githubtraining/managing-merge-conflicts) to help you resolve merge conflicts and other issues.
57
+
58
+
59
+ ## Finding contributions to work on
60
+
61
+ Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' and 'good first issue' issues are a great place to start.
Dockerfile ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ai4s-cn-beijing.cr.volces.com/pytorch-mirror/pytorch:2.3.1-cuda12.1-cudnn8-devel
2
+
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+ ENV TZ=Asia/Shanghai
5
+ RUN apt-get update && \
6
+ apt-get install -y --no-install-recommends \
7
+ wget \
8
+ g++ \
9
+ gcc \
10
+ libc6-dev \
11
+ make zlib1g zlib1g-dev \
12
+ git git-lfs expect zsh vim wget curl unzip zip cmake cmake-curses-gui libgdbm-dev libnss3-dev libssl-dev libreadline-dev libffi-dev \
13
+ && apt-get clean \
14
+ && rm -rf /var/lib/apt/lists/*
15
+
16
+ RUN apt update && apt -y install postgresql
17
+
18
+ RUN DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
19
+ hmmer cmake cmake-curses-gui \
20
+ && git clone --branch v3.3.0 https://github.com/soedinglab/hh-suite.git /tmp/hh-suite \
21
+ && mkdir /tmp/hh-suite/build \
22
+ && cd /tmp/hh-suite/build \
23
+ && cmake -DCMAKE_INSTALL_PREFIX=/opt/hhsuite .. \
24
+ && make -j 32 && make install \
25
+ && ln -s /opt/hhsuite/bin/* /usr/bin \
26
+ && cd - \
27
+ && rm -rf /tmp/hh-suite
28
+
29
+ RUN apt-get install -yq --no-install-recommends iproute2 curl
30
+ # Add PIP Package
31
+ RUN pip3 --no-cache-dir install \
32
+ scipy \
33
+ ml_collections \
34
+ tqdm \
35
+ pandas \
36
+ optree \
37
+ rdkit
38
+
39
+ # Add openfold dependency
40
+ RUN pip3 --no-cache-dir install \
41
+ biopython==1.83 \
42
+ modelcif==0.7
43
+
44
+ # Add datapipeline dependency
45
+ RUN pip3 --no-cache-dir install \
46
+ biotite==1.0.1 \
47
+ scikit-learn \
48
+ scikit-learn-extra \
49
+ fair-esm \
50
+ deepspeed \
51
+ protobuf==3.20.2 tos icecream ipdb wandb numpy==1.26.3 matplotlib==3.9.2 ipywidgets py3Dmol
52
+
53
+ # For H20 compatibility
54
+ RUN pip3 install --no-cache-dir nvidia-cublas-cu12==12.4.5.8 --no-deps
55
+ RUN git clone -b v3.5.1 https://github.com/NVIDIA/cutlass.git /opt/cutlass
56
+ ENV CUTLASS_PATH=/opt/cutlass
LICENSE ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright 2024 ByteDance and/or its affiliates.
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
203
+
204
+ Implementation of the `LayerNorm` operators (in protenix/model/layer_norm/) referred to [OneFlow]
205
+ (https://github.com/Oneflow-Inc/oneflow) and [FastFold](https://github.com/hpcaitech/FastFold).
206
+ We used [OpenFold](https://github.com/aqlaboratory/openfold) for some
207
+ (in protenix/openfold_local/) implementations, except the `LayerNorm` part. the worker OneFlow,
208
+ FastFold and openfold are licensed under Apache License 2.0.
Protenix_Technical_Report.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4150ea75c2772c27e8fb64153434b568850bd724ac22d47cc73158737e3ee44
3
+ size 3586577
README.md CHANGED
@@ -1,3 +1,309 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Protenix: Protein + X
2
+
3
+
4
+ > 📣📣📣 **We're hiring!** \
5
+ > Positions in **_Beijing, China_** and **_Seattle, US_** ! \
6
+ > Interested in machine learning, computational chemistry/biology, structural biology, or drug discovery? \
7
+ > 👉 [**Join us »**](#join-us)
8
+
9
+
10
+
11
+ <div align="center" style="margin: 20px 0;">
12
+ <span style="margin: 0 10px;">⚡ <a href="https://protenix-server.com">Protenix Web Server</a></span>
13
+ &bull; <span style="margin: 0 10px;">📄 <a href="https://www.biorxiv.org/content/10.1101/2025.01.08.631967v1">Technical Report</a></span>
14
+ </div>
15
+
16
+ <div align="center">
17
+
18
+ [![Twitter](https://img.shields.io/badge/Twitter-Follow-blue?logo=x)](https://x.com/ai4s_protenix)
19
+ [![Slack](https://img.shields.io/badge/Slack-Join-yellow?logo=slack)](https://join.slack.com/t/protenixworkspace/shared_invite/zt-36j4kx1cy-GyQMWLDrMO4Wd0fjGxtxug)
20
+ [![Wechat](https://img.shields.io/badge/Wechat-Join-brightgreen?logo=wechat)](https://github.com/bytedance/Protenix/issues/52)
21
+ [![Email](https://img.shields.io/badge/Email-Contact-lightgrey?logo=gmail)](#contact-us)
22
+ </div>
23
+
24
+ We’re excited to introduce **Protenix** — a trainable, open-source PyTorch reproduction of [AlphaFold 3](https://www.nature.com/articles/s41586-024-07487-w).
25
+
26
+ Protenix is built for high-accuracy structure prediction. It serves as an initial step in our journey toward advancing accessible and extensible research tools for the computational biology community.
27
+
28
+
29
+
30
+ ![Protenix predictions](assets/protenix_predictions.gif)
31
+
32
+ ## 🌟 Related Projects
33
+ - **[PXMeter](https://github.com/bytedance/PXMeter/)** is an open-source toolkit designed for reproducible evaluation of structure prediction models, released with high-quality benchmark dataset that has been manually reviewed to remove experimental artifacts and non-biological interactions. The associated study presents an in-depth comparative analysis of state-of-the-art models, drawing insights from extensive metric data and detailed case studies. The evaluation of Protenix is based on PXMeter.
34
+ - **[Protenix-Dock](https://github.com/bytedance/Protenix-Dock)**: Our implementation of a classical protein-ligand docking framework that leverages empirical scoring functions. Without using deep neural networks, Protenix-Dock delivers competitive performance in rigid docking tasks.
35
+
36
+ ## 🎉 Updates
37
+ - 2025-07-17: **Protenix-Mini released!**: Lightweight model variants with significantly reduced inference cost are now available. Users can choose from multiple configurations to balance speed and accuracy based on deployment needs. See our [paper](https://arxiv.org/abs/2507.11839) and [model configs](./configs/configs_model_type.py) for more information.
38
+ - 2025-07-17: [***New constraint feature***](docs/infer_json_format.md#constraint) is released! Now supports **atom-level contact** and **pocket** constraints, significantly improving performance in our evaluations.
39
+ - 2025-05-30: **Protenix-v0.5.0** is now available! You may try Protenix-v0.5.0 by accessing the [server](https://protenix-server.com), or upgrade to the latest version using pip.
40
+ - 2025-01-16: The preview version of **constraint feature** is released to branch [`constraint_esm`](https://github.com/bytedance/Protenix/tree/constraint_esm).
41
+ - 2025-01-16: The [training data pipeline](./docs/prepare_training_data.md) is released.
42
+ - 2025-01-16: The [MSA pipeline](./docs/msa_pipeline.md) is released.
43
+ - 2025-01-16: Use [local colabfold_search](./docs/colabfold_compatible_msa.md) to generate protenix-compatible MSA.
44
+
45
+ ### 📊 Benchmark
46
+ We benchmarked the performance of Protenix-v0.5.0 against [Boltz-1](https://github.com/jwohlwend/boltz/releases/tag/v0.4.1) and [Chai-1](https://github.com/chaidiscovery/chai-lab/releases/tag/v0.6.1) across multiple datasets, including [PoseBusters v2](https://arxiv.org/abs/2308.05777), [AF3 Nucleic Acid Complexes](https://www.nature.com/articles/s41586-024-07487-w), [AF3 Antibody Set](https://github.com/google-deepmind/alphafold3/blob/20ad0a21eb49febcaad4a6f5d71aa6b701512e5b/docs/metadata_antibody_antigen.csv), and our curated Recent PDB set.
47
+ <!-- 1️⃣ [PoseBusters v2](https://arxiv.org/abs/2308.05777)\
48
+ 2️⃣ [AF3 Nucleic Acid Complexes](https://www.nature.com/articles/s41586-024-07487-w)\
49
+ 3️⃣ [AF3 Antibody Set](https://github.com/google-deepmind/alphafold3/blob/20ad0a21eb49febcaad4a6f5d71aa6b701512e5b/docs/metadata_antibody_antigen.csv)\
50
+ 4️⃣ Our curated Recent PDB set -->
51
+
52
+ Protenix-v0.5.0 was trained using a PDB cut-off date of September 30, 2021. For the comparative analysis, we adhered to AF3’s inference protocol, generating 25 predictions by employing 5 model seeds, with each seed yielding 5 diffusion samples. The predictions were subsequently ranked based on their respective ranking scores.
53
+
54
+
55
+ ![V0.5.0 model Metrics](assets/v0.5.0_metrics.png)
56
+
57
+ We will soon release the benchmarking toolkit, including the evaluation datasets, data curation pipeline, and metric calculators, to support transparent and reproducible benchmarking.
58
+
59
+
60
+ ## 🛠 Installation
61
+
62
+ ### PyPI
63
+
64
+ ```bash
65
+ pip3 install protenix
66
+ ```
67
+
68
+ For development on a CPU-only machine, it is convenient to install with the `--cpu` flag in editable mode:
69
+ ```
70
+ python3 setup.py develop --cpu
71
+ ```
72
+
73
+ ### Docker (Recommended for Training)
74
+
75
+ Check the detailed guide: [<u> Docker Installation</u>](docs/docker_installation.md).
76
+
77
+
78
+ ## 🚀 Inference
79
+
80
+ ### Expected Input & Output Format
81
+ For details on the input JSON format and expected outputs, please refer to the [Input/Output Documentation](docs/infer_json_format.md).
82
+
83
+
84
+ ### Prepare Inputs
85
+
86
+ #### Convert PDB/CIF File to Input JSON
87
+
88
+ If your input is a `.pdb` or `.cif` file, you can convert it into a JSON file for inference.
89
+
90
+
91
+ ```bash
92
+ # ensure `release_data/ccd_cache/components.cif` or run:
93
+ python scripts/gen_ccd_cache.py -c release_data/ccd_cache/ -n [num_cpu]
94
+
95
+ # for PDB
96
+ # download pdb file
97
+ wget https://files.rcsb.org/download/7pzb.pdb
98
+ # run with pdb/cif file, and convert it to json file for inference.
99
+ protenix tojson --input examples/7pzb.pdb --out_dir ./output
100
+
101
+ # for CIF (same process)
102
+ # download cif file
103
+ wget https://files.rcsb.org/download/7pzb.cif
104
+ # run with pdb/cif file, and convert it to json file for inference.
105
+ protenix tojson --input examples/7pzb.cif --out_dir ./output
106
+ ```
107
+
108
+
109
+ #### (Optional) Prepare MSA Files
110
+
111
+ We provide an independent MSA search utility. You can run it using either a JSON file or a protein FASTA file.
112
+ ```bash
113
+ # run msa search with json file, it will write precomputed msa dir info to a new json file.
114
+ protenix msa --input examples/example_without_msa.json --out_dir ./output
115
+
116
+ # run msa search with fasta file which only contains protein.
117
+ protenix msa --input examples/prot.fasta --out_dir ./output
118
+ ```
119
+
120
+ ### Inference via Command Line
121
+
122
+ If you installed `Protenix` via `pip`, you can run the following command to perform model inference:
123
+
124
+
125
+ ```bash
126
+ # 1. The default model_name is protenix_base_default_v0.5.0, you can modify it by passing --model_name xxxx
127
+ # 2. We provide recommended default configuration parameters for each model. To customize cycle/step/use_msa settings, you must set --use_default_params false
128
+ # 3. You can modify cycle/step/use_msa by passing --cycle x1 --step x2 --use_msa false
129
+
130
+ # run with example.json, which contains precomputed msa dir.
131
+ protenix predict --input examples/example.json --out_dir ./output --seeds 101 --model_name "protenix_base_default_v0.5.0"
132
+
133
+ # run with example.json, we use only esm feature.
134
+ protenix predict --input examples/example.json --out_dir ./output --seeds 101 --model_name "protenix_mini_esm_v0.5.0" --use_msa false
135
+
136
+ # run with multiple json files, the default seed is 101.
137
+ protenix predict --input ./jsons_dir/ --out_dir ./output
138
+
139
+ # if the json do not contain precomputed msa dir,
140
+ # add --use_msa (default: true) to search msa and then predict.
141
+ # if mutiple seeds are provided, split them by comma.
142
+ protenix predict --input examples/example_without_msa.json --out_dir ./output --seeds 101,102 --use_msa true
143
+ ```
144
+
145
+ ### Inference via Bash Script
146
+ Alternatively you can run inference by:
147
+ Alternatively, run inference via script:
148
+
149
+ ```bash
150
+ bash inference_demo.sh
151
+ ```
152
+
153
+ The script accepts the following arguments:
154
+ * `model_name`: Name of the model to use for inference.
155
+ * `input_json_path`: Path to a JSON file that fully specifies the input structure.
156
+ * `dump_dir`: Directory where inference results will be saved.
157
+ * `dtype`: Data type used during inference. Supported options: `bf16` and `fp32`.
158
+ * `use_msa`: Whether to enable MSA features (default: true).
159
+
160
+
161
+ > **Note**: By default, layernorm and EvoformerAttention kernels are disabled for simplicity.
162
+ > To enable them and speed up inference, see the [**Kernels Setup Guide**](docs/kernels.md).
163
+
164
+
165
+ ## 🧬 Training
166
+
167
+ Refer to the [Training Documentation](docs/training.md) for setup and details.
168
+
169
+ ## Model Features
170
+ ### 📌 Constraint
171
+
172
+ Protenix supports specifying ***contacts*** (at both residue and atom levels) and ***pocket constraints*** as extra guidance. Our benchmark results demonstrate that constraint-guided predictions are significantly more accurate.See our [doc](docs/infer_json_format.md#constraint) for input format details.
173
+
174
+ ![Constraint Metrics](assets/constraint_metrics.png)
175
+
176
+ ### 📌 Mini-Models
177
+ We introduce Protenix-Mini, a lightweight variant of Protenix that uses reduced network blocks and few ODE steps (even as few as one or two steps) to enable efficient prediction of biomolecular complex structures. Experimental results show that Protenix-Mini achieves a favorable balance between efficiency and accuracy, with only a marginal 1–5% drop in evaluation metrics such as interface LDDT, complex LDDT, and ligand RMSD success rate. Protenix-Mini enables accurate structure prediction in high-throughput and resource-limited scenarios, making it well-suited for practical applications at scale. The following comparisons were performed on a subset of the RecentPDB dataset comprising sequences with fewer than 768 tokens.
178
+
179
+ ![Mini/Tiny Metrics](assets/mini_performance.png)
180
+
181
+
182
+ ## Training and Inference Cost
183
+
184
+ For details on memory usage and runtime during training and inference, refer to the [Training & Inference Cost Documentation](docs/model_train_inference_cost.md).
185
+
186
+
187
+ ## Citing Protenix
188
+
189
+ If you use Protenix in your research, please cite the following:
190
+
191
+ ```
192
+ @article{bytedance2025protenix,
193
+ title={Protenix - Advancing Structure Prediction Through a Comprehensive AlphaFold3 Reproduction},
194
+ author={ByteDance AML AI4Science Team and Chen, Xinshi and Zhang, Yuxuan and Lu, Chan and Ma, Wenzhi and Guan, Jiaqi and Gong, Chengyue and Yang, Jincai and Zhang, Hanyu and Zhang, Ke and Wu, Shenghao and Zhou, Kuangqi and Yang, Yanping and Liu, Zhenyu and Wang, Lan and Shi, Bo and Shi, Shaochen and Xiao, Wenzhi},
195
+ year={2025},
196
+ journal={bioRxiv},
197
+ publisher={Cold Spring Harbor Laboratory},
198
+ doi={10.1101/2025.01.08.631967},
199
+ URL={https://www.biorxiv.org/content/early/2025/01/11/2025.01.08.631967},
200
+ elocation-id={2025.01.08.631967},
201
+ eprint={https://www.biorxiv.org/content/early/2025/01/11/2025.01.08.631967.full.pdf},
202
+ }
203
+ ```
204
+
205
+ ### 📚 Citing Related Work
206
+ Protenix is built upon and inspired by several influential projects. If you use Protenix in your research, we also encourage citing the following foundational works where appropriate:
207
+ ```
208
+ @article{abramson2024accurate,
209
+ title={Accurate structure prediction of biomolecular interactions with AlphaFold 3},
210
+ author={Abramson, Josh and Adler, Jonas and Dunger, Jack and Evans, Richard and Green, Tim and Pritzel, Alexander and Ronneberger, Olaf and Willmore, Lindsay and Ballard, Andrew J and Bambrick, Joshua and others},
211
+ journal={Nature},
212
+ volume={630},
213
+ number={8016},
214
+ pages={493--500},
215
+ year={2024},
216
+ publisher={Nature Publishing Group UK London}
217
+ }
218
+ @article{ahdritz2024openfold,
219
+ title={OpenFold: Retraining AlphaFold2 yields new insights into its learning mechanisms and capacity for generalization},
220
+ author={Ahdritz, Gustaf and Bouatta, Nazim and Floristean, Christina and Kadyan, Sachin and Xia, Qinghui and Gerecke, William and O’Donnell, Timothy J and Berenberg, Daniel and Fisk, Ian and Zanichelli, Niccol{\`o} and others},
221
+ journal={Nature Methods},
222
+ volume={21},
223
+ number={8},
224
+ pages={1514--1524},
225
+ year={2024},
226
+ publisher={Nature Publishing Group US New York}
227
+ }
228
+ @article{mirdita2022colabfold,
229
+ title={ColabFold: making protein folding accessible to all},
230
+ author={Mirdita, Milot and Sch{\"u}tze, Konstantin and Moriwaki, Yoshitaka and Heo, Lim and Ovchinnikov, Sergey and Steinegger, Martin},
231
+ journal={Nature methods},
232
+ volume={19},
233
+ number={6},
234
+ pages={679--682},
235
+ year={2022},
236
+ publisher={Nature Publishing Group US New York}
237
+ }
238
+ ```
239
+
240
+ ## Contributing to Protenix
241
+
242
+ We welcome contributions from the community to help improve Protenix!
243
+
244
+ 📄 Check out the [Contributing Guide](CONTRIBUTING.md) to get started.
245
+
246
+ ✅ Code Quality:
247
+ We use `pre-commit` hooks to ensure consistency and code quality. Please install them before making commits:
248
+
249
+ ```bash
250
+ pip install pre-commit
251
+ pre-commit install
252
+ ```
253
+
254
+ 🐞 Found a bug or have a feature request? [Open an issue](https://github.com/bytedance/Protenix/issues).
255
+
256
+
257
+
258
+ ## Acknowledgements
259
+
260
+
261
+ The implementation of LayerNorm operators refers to both [OneFlow](https://github.com/Oneflow-Inc/oneflow) and [FastFold](https://github.com/hpcaitech/FastFold).
262
+ We also adopted several [module](protenix/openfold_local/) implementations from [OpenFold](https://github.com/aqlaboratory/openfold), except for [`LayerNorm`](protenix/model/layer_norm/), which is implemented independently.
263
+
264
+
265
+ ## Code of Conduct
266
+
267
+ We are committed to fostering a welcoming and inclusive environment.
268
+ Please review our [Code of Conduct](CODE_OF_CONDUCT.md) for guidelines on how to participate respectfully.
269
+
270
+
271
+ ## Security
272
+
273
+ If you discover a potential security issue in this project, or think you may
274
+ have discovered a security issue, we ask that you notify Bytedance Security via our [security center](https://security.bytedance.com/src) or [vulnerability reporting email]([email protected]).
275
+
276
+ Please do **not** create a public GitHub issue.
277
+
278
+ ## License
279
+
280
+ The Protenix project including both code and model parameters is released under the [Apache 2.0 License](./LICENSE). It is free for both academic research and commercial use.
281
+
282
+ ## Contact Us
283
+
284
+ We welcome inquiries and collaboration opportunities for advanced applications of our model, such as developing new features, fine-tuning for specific use cases, and more. Please feel free to contact us at [email protected].
285
+
286
+ ## Join Us
287
+
288
+ We're expanding the **Protenix team** at ByteDance Seed-AI for Science! \
289
+ We’re looking for talented individuals in **machine learning** and **computational biology/chemistry**. Opportunities are available in both **Beijing** and **Seattle**, across internships, new grad roles, and experienced full-time positions. \
290
+ *“Computational Biology/Chemistry” covers structural biology, computational biology, computational chemistry, drug discovery, and more.*
291
+
292
+
293
+ ### 📍 Beijing, China
294
+ | Type | Expertise | Apply Link |
295
+ |------------|------------------------------------|------------|
296
+ | Full-Time | Computational Biology / Chemistry | [Experienced & New Grad](https://jobs.bytedance.com/society/position/detail/7505998274429421842) |
297
+ | Full-Time | Machine Learning | [Experienced & New Grad](https://jobs.bytedance.com/society/position/detail/7505999453133015314) |
298
+ | Internship | Computational Biology / Chemistry | [Internship](https://jobs.bytedance.com/campus/position/7509005072577546504/detail) |
299
+ | Internship | Machine Learning | [Internship](https://jobs.bytedance.com/campus/position/7509005074018961672/detail) |
300
+
301
+
302
+ ### 📍 Seattle, US
303
+
304
+ | Type | Expertise | Apply Link |
305
+ |------------|------------------------------------|------------|
306
+ | Full-Time | Computational Biology / Chemistry | [Experienced](https://jobs.bytedance.com/en/position/7270666468370614585/detail), [New Grad](https://jobs.bytedance.com/en/position/7515465250054211847/detail) |
307
+ | Full-Time | Machine Learning | [Experienced](https://jobs.bytedance.com/en/position/7270665658072926521/detail), [New Grad](https://jobs.bytedance.com/en/position/7515908698011601159/detail) |
308
+ | Internship | Computational Biology / Chemistry | Internship (opening ~August) |
309
+ | Internship | Machine Learning | Internship (opening ~August) |
af3-dev/release/clusters-by-entity-40.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ab4af905e75b382eda8dec59917dc3608bee0729e36b9e71baf860bbe86850c
3
+ size 21699572
assets/constraint_metrics.png ADDED

Git LFS Details

  • SHA256: 7d96e0825b99f159a9644be057fdb50a75f78d94b0040262c67b4a48983ee8ed
  • Pointer size: 131 Bytes
  • Size of remote file: 269 kB
assets/license_header.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2024 ByteDance and/or its affiliates.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
assets/mini_performance.png ADDED

Git LFS Details

  • SHA256: 383d078099b15bf69a1bd1bf89a25f898863761b7815f0720824bcfa67a20b4d
  • Pointer size: 131 Bytes
  • Size of remote file: 102 kB
assets/protenix_predictions.gif ADDED

Git LFS Details

  • SHA256: 400c4b72786c6ab2816a23cd03276502a32705dbdf663f7292659d892ed78a7b
  • Pointer size: 133 Bytes
  • Size of remote file: 27.6 MB
assets/v0.5.0_metrics.png ADDED

Git LFS Details

  • SHA256: 3ff727dac76ad6686a26535169c9d8196dab9176a13be8da4a3439f30927a8a9
  • Pointer size: 131 Bytes
  • Size of remote file: 226 kB
configs/__init__.py ADDED
File without changes
configs/configs_base.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # pylint: disable=C0114,C0301
16
+ from protenix.config.extend_types import (
17
+ GlobalConfigValue,
18
+ ListValue,
19
+ RequiredValue,
20
+ ValueMaybeNone,
21
+ )
22
+
23
+ basic_configs = {
24
+ "project": RequiredValue(str),
25
+ "run_name": RequiredValue(str),
26
+ "base_dir": RequiredValue(str),
27
+ # training
28
+ "eval_interval": RequiredValue(int),
29
+ "log_interval": RequiredValue(int),
30
+ "checkpoint_interval": -1,
31
+ "eval_first": False, # run evaluate() before training steps
32
+ "iters_to_accumulate": 1,
33
+ "finetune_params_with_substring": [
34
+ ""
35
+ ], # params with substring will be finetuned with different learning rate: finetune_optim_configs["lr"]
36
+ "eval_only": False,
37
+ "load_checkpoint_path": "",
38
+ "load_ema_checkpoint_path": "",
39
+ "load_strict": True,
40
+ "load_params_only": True,
41
+ "skip_load_step": False,
42
+ "skip_load_optimizer": False,
43
+ "skip_load_scheduler": False,
44
+ "load_step_for_scheduler": False,
45
+ "train_confidence_only": False,
46
+ "use_wandb": True,
47
+ "wandb_id": "",
48
+ "seed": 42,
49
+ "deterministic": False,
50
+ "deterministic_seed": False,
51
+ "ema_decay": -1.0,
52
+ "eval_ema_only": False, # whether wandb only tracking ema checkpoint metrics
53
+ "ema_mutable_param_keywords": [""],
54
+ "model_name": "protenix_base_default_v0.5.0", # train model name
55
+ }
56
+ data_configs = {
57
+ # Data
58
+ "train_crop_size": 256,
59
+ "test_max_n_token": -1,
60
+ "train_lig_atom_rename": False,
61
+ "train_shuffle_mols": False,
62
+ "train_shuffle_sym_ids": False,
63
+ "test_lig_atom_rename": False,
64
+ "test_shuffle_mols": False,
65
+ "test_shuffle_sym_ids": False,
66
+ "esm": {
67
+ "enable": False,
68
+ "model_name": "esm2-3b",
69
+ "embedding_dim": 2560,
70
+ },
71
+ }
72
+ optim_configs = {
73
+ # Optim
74
+ "lr": 0.0018,
75
+ "lr_scheduler": "af3",
76
+ "warmup_steps": 10,
77
+ "max_steps": RequiredValue(int),
78
+ "min_lr_ratio": 0.1,
79
+ "decay_every_n_steps": 50000,
80
+ "grad_clip_norm": 10,
81
+ # Optim - Adam
82
+ "adam": {
83
+ "beta1": 0.9,
84
+ "beta2": 0.95,
85
+ "weight_decay": 1e-8,
86
+ "lr": GlobalConfigValue("lr"),
87
+ "use_adamw": False,
88
+ },
89
+ # Optim - LRScheduler
90
+ "af3_lr_scheduler": {
91
+ "warmup_steps": GlobalConfigValue("warmup_steps"),
92
+ "decay_every_n_steps": GlobalConfigValue("decay_every_n_steps"),
93
+ "decay_factor": 0.95,
94
+ "lr": GlobalConfigValue("lr"),
95
+ },
96
+ }
97
+ # Fine-tuned optimizer settings.
98
+ # For models supporting structural constraints and ESM embeddings.
99
+ finetune_optim_configs = {
100
+ # Optim
101
+ "lr": 0.0018,
102
+ "lr_scheduler": "cosine_annealing",
103
+ "warmup_steps": 1000,
104
+ "max_steps": 20000,
105
+ "min_lr_ratio": 0.1,
106
+ "decay_every_n_steps": 50000,
107
+ }
108
+ model_configs = {
109
+ # Model
110
+ "c_s": 384,
111
+ "c_z": 128,
112
+ "c_s_inputs": 449, # c_s_inputs == c_token + 32 + 32 + 1
113
+ "c_atom": 128,
114
+ "c_atompair": 16,
115
+ "c_token": 384,
116
+ "n_blocks": 48,
117
+ "max_atoms_per_token": 24, # DNA G max_atoms = 23
118
+ "no_bins": 64,
119
+ "sigma_data": 16.0,
120
+ "diffusion_batch_size": 48,
121
+ "diffusion_chunk_size": ValueMaybeNone(4), # chunksize of diffusion_batch_size
122
+ "blocks_per_ckpt": ValueMaybeNone(
123
+ 1
124
+ ), # NOTE: Number of blocks in each activation checkpoint, if None, no checkpointing is performed.
125
+ # switch of kernels
126
+ "use_memory_efficient_kernel": False,
127
+ "use_deepspeed_evo_attention": True,
128
+ "use_flash": False,
129
+ "use_lma": False,
130
+ "use_xformer": False,
131
+ "find_unused_parameters": False,
132
+ "dtype": "bf16", # default training dtype: bf16
133
+ "loss_metrics_sparse_enable": True, # the swicth for both sparse lddt metrics and sparse bond/smooth lddt loss
134
+ "skip_amp": {
135
+ "sample_diffusion": True,
136
+ "confidence_head": True,
137
+ "sample_diffusion_training": True,
138
+ "loss": True,
139
+ },
140
+ "infer_setting": {
141
+ "chunk_size": ValueMaybeNone(
142
+ 64
143
+ ), # should set to null for normal training and small dataset eval [for efficiency]
144
+ "sample_diffusion_chunk_size": ValueMaybeNone(
145
+ 1
146
+ ), # should set to null for normal training and small dataset eval [for efficiency]
147
+ "lddt_metrics_sparse_enable": GlobalConfigValue("loss_metrics_sparse_enable"),
148
+ "lddt_metrics_chunk_size": ValueMaybeNone(
149
+ 1
150
+ ), # only works if loss_metrics_sparse_enable, can set as default 1
151
+ },
152
+ "train_noise_sampler": {
153
+ "p_mean": -1.2,
154
+ "p_std": 1.5,
155
+ "sigma_data": 16.0, # NOTE: in EDM, this is 1.0
156
+ },
157
+ "inference_noise_scheduler": {
158
+ "s_max": 160.0,
159
+ "s_min": 4e-4,
160
+ "rho": 7,
161
+ "sigma_data": 16.0, # NOTE: in EDM, this is 1.0
162
+ },
163
+ "sample_diffusion": {
164
+ "gamma0": 0.8,
165
+ "gamma_min": 1.0,
166
+ "noise_scale_lambda": 1.003,
167
+ "step_scale_eta": 1.5,
168
+ "N_step": 200,
169
+ "N_sample": 5,
170
+ "N_step_mini_rollout": 20,
171
+ "N_sample_mini_rollout": 1,
172
+ },
173
+ "model": {
174
+ "N_model_seed": 1, # for inference
175
+ "N_cycle": 4,
176
+ "condition_embedding_drop_rate": 0.0,
177
+ "confidence_embedding_drop_rate": 0.0,
178
+ "input_embedder": {
179
+ "c_atom": GlobalConfigValue("c_atom"),
180
+ "c_atompair": GlobalConfigValue("c_atompair"),
181
+ "c_token": GlobalConfigValue("c_token"),
182
+ },
183
+ "relative_position_encoding": {
184
+ "r_max": 32,
185
+ "s_max": 2,
186
+ "c_z": GlobalConfigValue("c_z"),
187
+ },
188
+ "template_embedder": {
189
+ "c": 64,
190
+ "c_z": GlobalConfigValue("c_z"),
191
+ "n_blocks": 0,
192
+ "dropout": 0.25,
193
+ "blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
194
+ },
195
+ "msa_module": {
196
+ "c_m": 64,
197
+ "c_z": GlobalConfigValue("c_z"),
198
+ "c_s_inputs": GlobalConfigValue("c_s_inputs"),
199
+ "n_blocks": 4,
200
+ "msa_dropout": 0.15,
201
+ "pair_dropout": 0.25,
202
+ "blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
203
+ "msa_chunk_size": ValueMaybeNone(2048),
204
+ },
205
+ # Optional constraint embedder, only used when constraint is enabled.
206
+ "constraint_embedder": {
207
+ "pocket_embedder": {
208
+ "enable": False,
209
+ "c_s_input": 3,
210
+ "c_z_input": 1,
211
+ },
212
+ "contact_embedder": {
213
+ "enable": False,
214
+ "c_z_input": 2,
215
+ },
216
+ "substructure_embedder": {
217
+ "enable": False,
218
+ "n_classes": 4,
219
+ "architecture": "transformer",
220
+ "hidden_dim": 128,
221
+ "n_layers": 1,
222
+ },
223
+ "contact_atom_embedder": {
224
+ "enable": False,
225
+ "c_z_input": 2,
226
+ },
227
+ "c_constraint_z": GlobalConfigValue("c_z"),
228
+ "c_constraint_s": GlobalConfigValue("c_s_inputs"),
229
+ "c_constraint_atom_pair": GlobalConfigValue("c_atompair"),
230
+ "initialize_method": "zero", # zero, kaiming
231
+ },
232
+ "pairformer": {
233
+ "n_blocks": GlobalConfigValue("n_blocks"),
234
+ "c_z": GlobalConfigValue("c_z"),
235
+ "c_s": GlobalConfigValue("c_s"),
236
+ "n_heads": 16,
237
+ "dropout": 0.25,
238
+ "blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
239
+ },
240
+ "diffusion_module": {
241
+ "use_fine_grained_checkpoint": True,
242
+ "sigma_data": GlobalConfigValue("sigma_data"),
243
+ "c_token": 768,
244
+ "c_atom": GlobalConfigValue("c_atom"),
245
+ "c_atompair": GlobalConfigValue("c_atompair"),
246
+ "c_z": GlobalConfigValue("c_z"),
247
+ "c_s": GlobalConfigValue("c_s"),
248
+ "c_s_inputs": GlobalConfigValue("c_s_inputs"),
249
+ "atom_encoder": {
250
+ "n_blocks": 3,
251
+ "n_heads": 4,
252
+ },
253
+ "transformer": {
254
+ "n_blocks": 24,
255
+ "n_heads": 16,
256
+ },
257
+ "atom_decoder": {
258
+ "n_blocks": 3,
259
+ "n_heads": 4,
260
+ },
261
+ "blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
262
+ },
263
+ "confidence_head": {
264
+ "c_z": GlobalConfigValue("c_z"),
265
+ "c_s": GlobalConfigValue("c_s"),
266
+ "c_s_inputs": GlobalConfigValue("c_s_inputs"),
267
+ "n_blocks": 4,
268
+ "max_atoms_per_token": GlobalConfigValue("max_atoms_per_token"),
269
+ "pairformer_dropout": 0.0,
270
+ "blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
271
+ "distance_bin_start": 3.25,
272
+ "distance_bin_end": 52.0,
273
+ "distance_bin_step": 1.25,
274
+ "stop_gradient": True,
275
+ },
276
+ "distogram_head": {
277
+ "c_z": GlobalConfigValue("c_z"),
278
+ "no_bins": GlobalConfigValue("no_bins"),
279
+ },
280
+ },
281
+ }
282
+ perm_configs = {
283
+ # Chain and Atom Permutation
284
+ "chain_permutation": {
285
+ "train": {
286
+ "mini_rollout": True,
287
+ "diffusion_sample": False,
288
+ },
289
+ "test": {
290
+ "diffusion_sample": True,
291
+ },
292
+ "permute_by_pocket": True,
293
+ "configs": {
294
+ "use_center_rmsd": False,
295
+ "find_gt_anchor_first": False,
296
+ "accept_it_as_it_is": False,
297
+ "enumerate_all_anchor_pairs": False,
298
+ "selection_metric": "aligned_rmsd",
299
+ },
300
+ },
301
+ "atom_permutation": {
302
+ "train": {
303
+ "mini_rollout": True,
304
+ "diffusion_sample": False,
305
+ },
306
+ "test": {
307
+ "diffusion_sample": True,
308
+ },
309
+ "permute_by_pocket": True,
310
+ "global_align_wo_symmetric_atom": False,
311
+ },
312
+ }
313
+ loss_configs = {
314
+ "loss": {
315
+ "diffusion_lddt_chunk_size": ValueMaybeNone(1),
316
+ "diffusion_bond_chunk_size": ValueMaybeNone(1),
317
+ "diffusion_chunk_size_outer": ValueMaybeNone(-1),
318
+ "diffusion_sparse_loss_enable": GlobalConfigValue("loss_metrics_sparse_enable"),
319
+ "diffusion_lddt_loss_dense": True, # only set true in initial training for training speed
320
+ "resolution": {"min": 0.1, "max": 4.0},
321
+ "weight": {
322
+ "alpha_confidence": 1e-4,
323
+ "alpha_pae": 0.0, # or 1 in finetuning stage 3
324
+ "alpha_except_pae": 1.0,
325
+ "alpha_diffusion": 4.0,
326
+ "alpha_distogram": 3e-2,
327
+ "alpha_bond": 0.0, # or 1 in finetuning stages
328
+ "smooth_lddt": 1.0, # or 0 in finetuning stages
329
+ },
330
+ "plddt": {
331
+ "min_bin": 0,
332
+ "max_bin": 1.0,
333
+ "no_bins": 50,
334
+ "normalize": True,
335
+ "eps": 1e-6,
336
+ },
337
+ "pde": {
338
+ "min_bin": 0,
339
+ "max_bin": 32,
340
+ "no_bins": 64,
341
+ "eps": 1e-6,
342
+ },
343
+ "resolved": {
344
+ "eps": 1e-6,
345
+ },
346
+ "pae": {
347
+ "min_bin": 0,
348
+ "max_bin": 32,
349
+ "no_bins": 64,
350
+ "eps": 1e-6,
351
+ },
352
+ "diffusion": {
353
+ "mse": {
354
+ "weight_mse": 1 / 3,
355
+ "weight_dna": 5.0,
356
+ "weight_rna": 5.0,
357
+ "weight_ligand": 10.0,
358
+ "eps": 1e-6,
359
+ },
360
+ "bond": {
361
+ "eps": 1e-6,
362
+ },
363
+ "smooth_lddt": {
364
+ "eps": 1e-6,
365
+ },
366
+ },
367
+ "distogram": {
368
+ "min_bin": 2.3125,
369
+ "max_bin": 21.6875,
370
+ "no_bins": 64,
371
+ "eps": 1e-6,
372
+ },
373
+ },
374
+ "metrics": {
375
+ "lddt": {
376
+ "eps": 1e-6,
377
+ },
378
+ "complex_ranker_keys": ListValue(["plddt", "gpde", "ranking_score"]),
379
+ "chain_ranker_keys": ListValue(["chain_ptm", "chain_plddt"]),
380
+ "interface_ranker_keys": ListValue(
381
+ ["chain_pair_iptm", "chain_pair_iptm_global", "chain_pair_plddt"]
382
+ ),
383
+ "clash": {"af3_clash_threshold": 1.1, "vdw_clash_threshold": 0.75},
384
+ },
385
+ }
386
+
387
+ configs = {
388
+ **basic_configs,
389
+ **data_configs,
390
+ **optim_configs,
391
+ **model_configs,
392
+ **perm_configs,
393
+ **loss_configs,
394
+ }
395
+ configs["finetune"] = finetune_optim_configs
configs/configs_data.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # pylint: disable=C0114,C0301
16
+ import os
17
+ from copy import deepcopy
18
+
19
+ from protenix.config.extend_types import GlobalConfigValue, ListValue
20
+
21
+ default_test_configs = {
22
+ "sampler_configs": {
23
+ "sampler_type": "uniform",
24
+ },
25
+ "cropping_configs": {
26
+ "method_weights": [
27
+ 0.0, # ContiguousCropping
28
+ 0.0, # SpatialCropping
29
+ 1.0, # SpatialInterfaceCropping
30
+ ],
31
+ "crop_size": -1,
32
+ },
33
+ "lig_atom_rename": GlobalConfigValue("test_lig_atom_rename"),
34
+ "shuffle_mols": GlobalConfigValue("test_shuffle_mols"),
35
+ "shuffle_sym_ids": GlobalConfigValue("test_shuffle_sym_ids"),
36
+ "constraint": {
37
+ "enable": False,
38
+ "fix_seed": False, # True means use use the same contact in each evaluation.
39
+ },
40
+ }
41
+
42
+ default_weighted_pdb_configs = {
43
+ "sampler_configs": {
44
+ "sampler_type": "weighted",
45
+ "beta_dict": {
46
+ "chain": 0.5,
47
+ "interface": 1,
48
+ },
49
+ "alpha_dict": {
50
+ "prot": 3,
51
+ "nuc": 3,
52
+ "ligand": 1,
53
+ },
54
+ "force_recompute_weight": True,
55
+ },
56
+ "cropping_configs": {
57
+ "method_weights": ListValue([0.2, 0.4, 0.4]),
58
+ "crop_size": GlobalConfigValue("train_crop_size"),
59
+ },
60
+ "sample_weight": 0.5,
61
+ "limits": -1,
62
+ "lig_atom_rename": GlobalConfigValue("train_lig_atom_rename"),
63
+ "shuffle_mols": GlobalConfigValue("train_shuffle_mols"),
64
+ "shuffle_sym_ids": GlobalConfigValue("train_shuffle_sym_ids"),
65
+ # If enabled, the training settings for different constraint types,
66
+ # providing the model a certain proportion of constraints
67
+ # that meet specific conditions.
68
+ "constraint": {
69
+ "enable": False,
70
+ "fix_seed": False,
71
+ "pocket": {
72
+ "prob": 0.0,
73
+ "size": 1 / 3,
74
+ "spec_binder_chain": False,
75
+ "max_distance_range": {"PP": ListValue([6, 20]), "LP": ListValue([6, 20])},
76
+ "group": "complex",
77
+ "distance_type": "center_atom",
78
+ },
79
+ "contact": {
80
+ "prob": 0.0,
81
+ "size": 1 / 3,
82
+ "max_distance_range": {
83
+ "PP": ListValue([6, 30]),
84
+ "PL": ListValue([4, 10]),
85
+ },
86
+ "group": "complex",
87
+ "distance_type": "center_atom",
88
+ },
89
+ "substructure": {
90
+ "prob": 0.0,
91
+ "size": 0.8,
92
+ "mol_type_pairs": {
93
+ "PP": 15,
94
+ "PL": 10,
95
+ "LP": 10,
96
+ },
97
+ "feature_type": "one_hot",
98
+ "ratios": {
99
+ "full": [
100
+ 0.0,
101
+ 0.5,
102
+ 1.0,
103
+ ], # ratio options of full chain substructure constraint
104
+ "partial": 0.3, # ratio of partial chain substructure constraint
105
+ },
106
+ "coord_noise_scale": 0.05,
107
+ "spec_asym_id": False,
108
+ },
109
+ "contact_atom": {
110
+ "prob": 0.0,
111
+ "size": 1 / 3,
112
+ "max_distance_range": {
113
+ "PP": ListValue([2, 12]),
114
+ "PL": ListValue([2, 8]),
115
+ },
116
+ "min_distance": -1,
117
+ "group": "complex",
118
+ "distance_type": "atom",
119
+ "feature_type": "continuous",
120
+ },
121
+ },
122
+ }
123
+
124
+ DATA_ROOT_DIR = os.environ.get("PROTENIX_DATA_ROOT_DIR", "/af3-dev/release_data/")
125
+
126
+ # Use CCD cache created by scripts/gen_ccd_cache.py priority. (without date in filename)
127
+ # See: docs/prepare_data.md
128
+ CCD_COMPONENTS_FILE_PATH = os.path.join(DATA_ROOT_DIR, "components.cif")
129
+ CCD_COMPONENTS_RDKIT_MOL_FILE_PATH = os.path.join(
130
+ DATA_ROOT_DIR, "components.cif.rdkit_mol.pkl"
131
+ )
132
+
133
+ if (not os.path.exists(CCD_COMPONENTS_FILE_PATH)) or (
134
+ not os.path.exists(CCD_COMPONENTS_RDKIT_MOL_FILE_PATH)
135
+ ):
136
+ CCD_COMPONENTS_FILE_PATH = os.path.join(DATA_ROOT_DIR, "components.v20240608.cif")
137
+ CCD_COMPONENTS_RDKIT_MOL_FILE_PATH = os.path.join(
138
+ DATA_ROOT_DIR, "components.v20240608.cif.rdkit_mol.pkl"
139
+ )
140
+ PDB_CLUSTER_FILE_PATH = os.path.join(DATA_ROOT_DIR, "clusters-by-entity-40.txt")
141
+
142
+
143
+ # This is a patch in inference stage for users that do not have root permission.
144
+ # If you run
145
+ # ```
146
+ # bash inference_demo.sh
147
+ # ```
148
+ # or
149
+ # ```
150
+ # protenix predict --input examples/example.json --out_dir ./output
151
+ # ````
152
+ # The checkpoint and the data cache will be downloaded to the current code directory.
153
+ if (not os.path.exists(CCD_COMPONENTS_FILE_PATH)) or (
154
+ not os.path.exists(CCD_COMPONENTS_RDKIT_MOL_FILE_PATH)
155
+ ):
156
+ print("Try to find the ccd cache data in the code directory for inference.")
157
+ current_file_path = os.path.abspath(__file__)
158
+ current_directory = os.path.dirname(current_file_path)
159
+ code_directory = os.path.dirname(current_directory)
160
+
161
+ data_cache_dir = os.path.join(code_directory, "release_data/ccd_cache")
162
+ CCD_COMPONENTS_FILE_PATH = os.path.join(data_cache_dir, "components.cif")
163
+ CCD_COMPONENTS_RDKIT_MOL_FILE_PATH = os.path.join(
164
+ data_cache_dir, "components.cif.rdkit_mol.pkl"
165
+ )
166
+ if (not os.path.exists(CCD_COMPONENTS_FILE_PATH)) or (
167
+ not os.path.exists(CCD_COMPONENTS_RDKIT_MOL_FILE_PATH)
168
+ ):
169
+
170
+ CCD_COMPONENTS_FILE_PATH = os.path.join(
171
+ data_cache_dir, "components.v20240608.cif"
172
+ )
173
+ CCD_COMPONENTS_RDKIT_MOL_FILE_PATH = os.path.join(
174
+ data_cache_dir, "components.v20240608.cif.rdkit_mol.pkl"
175
+ )
176
+
177
+ data_configs = {
178
+ "num_dl_workers": 16,
179
+ "epoch_size": 10000,
180
+ "train_ref_pos_augment": True,
181
+ "test_ref_pos_augment": True,
182
+ "train_sets": ListValue(["weightedPDB_before2109_wopb_nometalc_0925"]),
183
+ "train_sampler": {
184
+ "train_sample_weights": ListValue([1.0]),
185
+ "sampler_type": "weighted",
186
+ },
187
+ "test_sets": ListValue(["recentPDB_1536_sample384_0925"]),
188
+ "weightedPDB_before2109_wopb_nometalc_0925": {
189
+ "base_info": {
190
+ "mmcif_dir": os.path.join(DATA_ROOT_DIR, "mmcif"),
191
+ "bioassembly_dict_dir": os.path.join(DATA_ROOT_DIR, "mmcif_bioassembly"),
192
+ "indices_fpath": os.path.join(
193
+ DATA_ROOT_DIR,
194
+ "indices/weightedPDB_indices_before_2021-09-30_wo_posebusters_resolution_below_9.csv.gz",
195
+ ),
196
+ "pdb_list": "",
197
+ "random_sample_if_failed": True,
198
+ "max_n_token": -1, # can be used for removing data with too many tokens.
199
+ "use_reference_chains_only": False,
200
+ "exclusion": { # do not sample the data based on ions.
201
+ "mol_1_type": ListValue(["ions"]),
202
+ "mol_2_type": ListValue(["ions"]),
203
+ },
204
+ },
205
+ **deepcopy(default_weighted_pdb_configs),
206
+ },
207
+ "recentPDB_1536_sample384_0925": {
208
+ "base_info": {
209
+ "mmcif_dir": os.path.join(DATA_ROOT_DIR, "mmcif"),
210
+ "bioassembly_dict_dir": os.path.join(
211
+ DATA_ROOT_DIR, "recentPDB_bioassembly"
212
+ ),
213
+ "indices_fpath": os.path.join(
214
+ DATA_ROOT_DIR, "indices/recentPDB_low_homology_maxtoken1536.csv"
215
+ ),
216
+ "pdb_list": os.path.join(
217
+ DATA_ROOT_DIR,
218
+ "indices/recentPDB_low_homology_maxtoken1024_sample384_pdb_id.txt",
219
+ ),
220
+ "max_n_token": GlobalConfigValue("test_max_n_token"), # filter data
221
+ "sort_by_n_token": False,
222
+ "group_by_pdb_id": True,
223
+ "find_eval_chain_interface": True,
224
+ },
225
+ **deepcopy(default_test_configs),
226
+ },
227
+ "posebusters_0925": {
228
+ "base_info": {
229
+ "mmcif_dir": os.path.join(DATA_ROOT_DIR, "posebusters_mmcif"),
230
+ "bioassembly_dict_dir": os.path.join(
231
+ DATA_ROOT_DIR, "posebusters_bioassembly"
232
+ ),
233
+ "indices_fpath": os.path.join(
234
+ DATA_ROOT_DIR, "indices/posebusters_indices_mainchain_interface.csv"
235
+ ),
236
+ "pdb_list": "",
237
+ "find_pocket": True,
238
+ "find_all_pockets": False,
239
+ "max_n_token": GlobalConfigValue("test_max_n_token"), # filter data
240
+ },
241
+ **deepcopy(default_test_configs),
242
+ },
243
+ "msa": {
244
+ "enable": True,
245
+ "enable_rna_msa": False,
246
+ "prot": {
247
+ "pairing_db": "uniref100",
248
+ "non_pairing_db": "mmseqs_other",
249
+ "pdb_mmseqs_dir": os.path.join(DATA_ROOT_DIR, "mmcif_msa"),
250
+ "seq_to_pdb_idx_path": os.path.join(DATA_ROOT_DIR, "seq_to_pdb_index.json"),
251
+ "indexing_method": "sequence",
252
+ },
253
+ "rna": {
254
+ "seq_to_pdb_idx_path": "",
255
+ "rna_msa_dir": "",
256
+ "indexing_method": "sequence",
257
+ },
258
+ "strategy": "random",
259
+ "merge_method": "dense_max",
260
+ "min_size": {
261
+ "train": 1,
262
+ "test": 1,
263
+ },
264
+ "max_size": {
265
+ "train": 16384,
266
+ "test": 16384,
267
+ },
268
+ "sample_cutoff": {
269
+ "train": 16384,
270
+ "test": 16384,
271
+ },
272
+ },
273
+ "template": {
274
+ "enable": False,
275
+ },
276
+ "ccd_components_file": CCD_COMPONENTS_FILE_PATH,
277
+ "ccd_components_rdkit_mol_file": CCD_COMPONENTS_RDKIT_MOL_FILE_PATH,
278
+ "pdb_cluster_file": PDB_CLUSTER_FILE_PATH,
279
+ }
configs/configs_inference.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # pylint: disable=C0114
16
+ import os
17
+
18
+ from protenix.config.extend_types import ListValue, RequiredValue
19
+
20
+ current_file_path = os.path.abspath(__file__)
21
+ current_directory = os.path.dirname(current_file_path)
22
+ code_directory = os.path.dirname(current_directory)
23
+ # The model will be download to the following dir if not exists:
24
+ # "./release_data/checkpoint/model_v0.5.0.pt"
25
+ inference_configs = {
26
+ "model_name": "protenix_base_default_v0.5.0", # inference model selection
27
+ "seeds": ListValue([101]),
28
+ "dump_dir": "./output",
29
+ "need_atom_confidence": False,
30
+ "sorted_by_ranking_score": True,
31
+ "input_json_path": RequiredValue(str),
32
+ "load_checkpoint_dir": os.path.join(code_directory, "./release_data/checkpoint/"),
33
+ "num_workers": 16,
34
+ "use_msa": True,
35
+ }
configs/configs_model_type.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # model configs for inference and training,
16
+ # such as: protenix-base, protenix-mini, protenix-tiny, protenix-constraint.
17
+ # protenix_{model_size}_{features}_{version}
18
+ # model_size: base, mini, tiny
19
+ # features: default, constraint, esm, etc, if multiple split by "-"
20
+ # version: v{x}.{y}.{z}
21
+
22
+ """
23
+ Currently, the model_name support the following models.
24
+
25
+ | Model Name | ESM/MSA/Constraint | Model Parameters(M ) |
26
+ |-------------------------------------|--------------------------|----------------------|
27
+ | `protenix_base_default_v0.5.0` | ❌ / ✅ / ❌ | 368.09 |
28
+ | `protenix_base_constraint_v0.5.0` | ❌ / ✅ / ✅ | 368.30 |
29
+ | `protenix_mini_esm_v0.5.0` | ✅ / ✅ / ❌ | 135.22 |
30
+ | `protenix_mini_ism_v0.5.0` | ✅ / ✅ / ❌ | 135.22 |
31
+ | `protenix_mini_default_v0.5.0` | ❌ / ✅ / ❌ | 134.06 |
32
+ | `protenix_tiny_detault_v0.5.0` | ❌ / ✅ / ❌ | 109.50 |
33
+ """
34
+ model_configs = {
35
+ "protenix_base_default_v0.5.0": {
36
+ "model": {"N_cycle": 10},
37
+ "sample_diffusion": {
38
+ "N_step": 200,
39
+ }, # the default setting for base model
40
+ },
41
+ "protenix_base_constraint_v0.5.0": {
42
+ "model": {
43
+ "sample_diffusion": {
44
+ "N_step": 200,
45
+ }, # the default setting for constraint model
46
+ "N_cycle": 10,
47
+ "constraint_embedder": {
48
+ "pocket_embedder": {
49
+ "enable": True,
50
+ },
51
+ "contact_embedder": {
52
+ "enable": True,
53
+ },
54
+ "substructure_embedder": {
55
+ "enable": True,
56
+ },
57
+ "contact_atom_embedder": {
58
+ "enable": True,
59
+ },
60
+ },
61
+ },
62
+ "data": {
63
+ "weightedPDB_before2109_wopb_nometalc_0925": {
64
+ "constraint": {
65
+ "enable": True,
66
+ "pocket": {
67
+ "prob": 0.2,
68
+ "max_distance_range": {
69
+ "PP": [4, 15],
70
+ "LP": [3, 10],
71
+ },
72
+ },
73
+ "contact": {
74
+ "prob": 0.1,
75
+ },
76
+ "substructure": {
77
+ "prob": 0.5,
78
+ "size": 1,
79
+ "coord_noise_scale": 1,
80
+ },
81
+ "contact_atom": {
82
+ "prob": 0.1,
83
+ "max_distance_range": {
84
+ "PP": [2, 12],
85
+ "PL": [2, 15],
86
+ },
87
+ "min_distance": -1,
88
+ "group": "complex",
89
+ "distance_type": "atom",
90
+ "feature_type": "continuous",
91
+ },
92
+ },
93
+ },
94
+ "recentPDB_1536_sample384_0925": {
95
+ "constraint": {
96
+ "enable": True,
97
+ },
98
+ },
99
+ "posebusters_0925": {
100
+ "constraint": {
101
+ "enable": True,
102
+ },
103
+ },
104
+ },
105
+ "load_strict": False, # If finetuning from base model, model arch has been changed,
106
+ # it should be False, for inference, it should be True.
107
+ "finetune_params_with_substring": [
108
+ "constraint_embedder.substructure_z_embedder",
109
+ "constraint_embedder.pocket_z_embedder",
110
+ "constraint_embedder.contact_z_embedder",
111
+ "constraint_embedder.contact_atom_z_embedder",
112
+ ],
113
+ },
114
+ "protenix_mini_default_v0.5.0": {
115
+ "sample_diffusion": {
116
+ "gamma0": 0,
117
+ "step_scale_eta": 1.0,
118
+ "N_step": 5,
119
+ }, # the default setting for mini model
120
+ "model": {
121
+ "N_cycle": 4,
122
+ "msa_module": {
123
+ "n_blocks": 1,
124
+ },
125
+ "pairformer": {
126
+ "n_blocks": 16,
127
+ },
128
+ "diffusion_module": {
129
+ "atom_encoder": {
130
+ "n_blocks": 1,
131
+ },
132
+ "transformer": {
133
+ "n_blocks": 8,
134
+ },
135
+ "atom_decoder": {
136
+ "n_blocks": 1,
137
+ },
138
+ },
139
+ },
140
+ "load_strict": False, # For inference, it should be True.
141
+ },
142
+ "protenix_tiny_default_v0.5.0": {
143
+ "sample_diffusion": {
144
+ "gamma0": 0,
145
+ "step_scale_eta": 1.0,
146
+ "N_step": 5,
147
+ }, # the default setting for tiny model
148
+ "model": {
149
+ "N_cycle": 4,
150
+ "msa_module": {
151
+ "n_blocks": 1,
152
+ },
153
+ "pairformer": {
154
+ "n_blocks": 8,
155
+ },
156
+ "diffusion_module": {
157
+ "atom_encoder": {
158
+ "n_blocks": 1,
159
+ },
160
+ "transformer": {
161
+ "n_blocks": 8,
162
+ },
163
+ "atom_decoder": {
164
+ "n_blocks": 1,
165
+ },
166
+ },
167
+ },
168
+ "load_strict": False, # For inference, it should be True.
169
+ },
170
+ "protenix_mini_esm_v0.5.0": {
171
+ "sample_diffusion": {
172
+ "gamma0": 0,
173
+ "step_scale_eta": 1.0,
174
+ "N_step": 5,
175
+ }, # the default setting for mini model
176
+ "model": {
177
+ "N_cycle": 4,
178
+ "msa_module": {
179
+ "n_blocks": 1,
180
+ },
181
+ "pairformer": {
182
+ "n_blocks": 16,
183
+ },
184
+ "diffusion_module": {
185
+ "atom_encoder": {
186
+ "n_blocks": 1,
187
+ },
188
+ "transformer": {
189
+ "n_blocks": 8,
190
+ },
191
+ "atom_decoder": {
192
+ "n_blocks": 1,
193
+ },
194
+ },
195
+ },
196
+ "esm": {
197
+ "enable": True,
198
+ "model_name": "esm2-3b",
199
+ },
200
+ "load_strict": False, # For inference, it should be True.
201
+ "use_msa": False, # For efficiency, this model does not use MSA by default.
202
+ },
203
+ "protenix_mini_ism_v0.5.0": {
204
+ "sample_diffusion": {
205
+ "gamma0": 0,
206
+ "step_scale_eta": 1.0,
207
+ "N_step": 5,
208
+ }, # the default setting for mini model
209
+ "model": {
210
+ "N_cycle": 4,
211
+ "msa_module": {
212
+ "n_blocks": 1,
213
+ },
214
+ "pairformer": {
215
+ "n_blocks": 16,
216
+ },
217
+ "diffusion_module": {
218
+ "atom_encoder": {
219
+ "n_blocks": 1,
220
+ },
221
+ "transformer": {
222
+ "n_blocks": 8,
223
+ },
224
+ "atom_decoder": {
225
+ "n_blocks": 1,
226
+ },
227
+ },
228
+ },
229
+ "esm": {
230
+ "enable": True,
231
+ "model_name": "esm2-3b-ism",
232
+ },
233
+ "load_strict": False, # For inference, it should be True.
234
+ "use_msa": False, # For efficiency, this model does not use MSA by default.
235
+ },
236
+ }
debug.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import torch
2
+
3
+ data = torch.load('/home/hui007/Protenix/protenix_1d_embeddings/1EG0-0-M.pt')
4
+ print(data)
docs/colabfold_compatible_msa.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Using Local Colabfold_search to Generate Protenix-Compatible MSA
2
+
3
+ Colabfold provides an easy-to-use and efficient MSA search pipeline that's ideal for generating MSAs during inference. Unfortunately, this pipeline cannot fully match Protenix's MSA search process designed for training, as the current `colabfold_search` omits species information in the MSA, preventing correct pairing by Protenix's data pipeline. To address this issue, we provide the `scripts/colabfold_msa.py` script, which post-processes `colabfold_search` results by adding pseudo taxonomy IDs to paired MSAs to match Protenix's data pipeline.
4
+
5
+ Here's an example:
6
+ ```bash
7
+ python3 scripts/colabfold_msa.py examples/dimer.fasta <path/to/colabfold_db> dimer_colabfold_msa --db1 uniref30_2103_db --db3 colabfold_envdb_202108_db --mmseqs_path <path/to/mmseqs>
8
+ ```
9
+
10
+ #### Configuring Colabfold_search
11
+ Installation of colabfold and mmseqs2 is required.
12
+
13
+ colabfold can be installed with: `pip install colabfold[alphafold]`.
14
+
15
+ Build MMseqs2 from source:
16
+
17
+ ```bash
18
+ wget https://github.com/soedinglab/MMseqs2/archive/refs/tags/16-747c6.tar.gz
19
+ tar xzf 16-747c6.tar.gz
20
+ cd MMseqs2-16-747c6/
21
+ mkdir build && cd build
22
+ cmake -DCMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=. ..
23
+ make -j8
24
+ make install
25
+ ```
26
+
27
+ Download ColabFold database:
28
+ ```bash
29
+ git clone https://github.com/sokrypton/ColabFold.git
30
+ cd ColabFold
31
+ # Configure database:
32
+ MMSEQS_NO_INDEX=1 ./setup_databases.sh <path/to/colabfold_db>
33
+ ```
docs/docker_installation.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Run with Docker
2
+
3
+ 1. Install Docker (with GPU Support)
4
+
5
+ Ensure that Docker is installed and configured with GPU support. Follow these steps:
6
+ * Install [Docker](https://www.docker.com/) if not already installed.
7
+ * Install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to enable GPU support.
8
+ * Verify the setup with:
9
+ ```bash
10
+ docker run --rm --gpus all nvidia/cuda:12.1.0-base-ubuntu22.04 nvidia-smi
11
+ ```
12
+
13
+ 2. Pull the Docker image, which was built based on this [Dockerfile](../Dockerfile)
14
+ ```bash
15
+ docker pull ai4s-cn-beijing.cr.volces.com/infra/protenix:v0.0.3
16
+ ```
17
+
18
+ 3. Clone this repository and `cd` into it
19
+ ```bash
20
+ git clone https://github.com/bytedance/protenix.git
21
+ cd ./protenix
22
+ pip install -e .
23
+ ```
24
+
25
+ 4. Run Docker with an interactive shell
26
+ ```bash
27
+ docker run --gpus all -it -v $(pwd):/workspace -v /dev/shm:/dev/shm ai4s-cn-beijing.cr.volces.com/infra/protenix:v0.0.3 /bin/bash
28
+ ```
29
+
30
+ After running above commands, you’ll be inside the container’s environment and can execute commands as you would on a normal Linux terminal.
docs/infer_json_format.md ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Format of the input JSON file
2
+ The JSON file format closely resembles that used by the AlphaFold Server, with a few key differences:
3
+
4
+ 1. There are no restrictions on the types of ligands, ions, and modifications, whereas the AlphaFold Server currently supports only a limited set of specific CCD codes.
5
+ 2. Users can specify bonds between entities, such as covalent bonds between ligands and polymers.
6
+ 3. It supports inputting ligands in the form of SMILES strings or molecular structure files.
7
+ 4. Ligands composed of multiple CCD codes can be treated as a single entity. This feature is useful for representing glycans, for example, "NAG-NAG".
8
+ 5. The "glycans" field is no longer supported. Glycans can be fully represented by inputting multiple ligands with defined bonding or by providing their SMILES strings.
9
+
10
+ Here is an overview of the JSON file format:
11
+ ```json
12
+ [
13
+ {
14
+ "name": "Test Fold Job Number One",
15
+ "sequences": [...],
16
+ "covalent_bonds": [...]
17
+ }
18
+ ]
19
+ ```
20
+ The JSON file consists of a list of dictionaries, where each dictionary represents a set of sequences you want to model.
21
+ Even if you are modeling only one set of sequences, the top-level structure should still be a list.
22
+
23
+ Each dictionary contains the following three keys:
24
+ * `name`: A string representing the name of the inference job.
25
+ * `sequences`: A list of dictionaries that describe the entities (e.g., proteins, DNA, RNA, small molecules, and ions) involved in the inference.
26
+ * `covalent_bonds`: An optional list of dictionaries that define the covalent bonds between atoms from different entities.
27
+
28
+ Details of `sequences` and `covalent_bonds` are provided below.
29
+
30
+ #### sequences
31
+ There are 5 kinds of supported sequences:
32
+ * `proteinChain` – used for proteins
33
+ * `dnaSequence` – used for DNA (single strand)
34
+ * `rnaSequence` – used for RNA (single strand)
35
+ * `ligand` – used for ligands
36
+ * `ion` – used for ions
37
+
38
+ ##### proteinChain
39
+ ```json
40
+ {
41
+ "proteinChain": {
42
+ "sequence": "PREACHINGS",
43
+ "count": 1,
44
+ "modifications": [
45
+ {
46
+ "ptmType": "CCD_HY3",
47
+ "ptmPosition": 1,
48
+ },
49
+ {
50
+ "ptmType": "CCD_P1L",
51
+ "ptmPosition": 5
52
+ }
53
+ ],
54
+ "msa":{
55
+ "precomputed_msa_dir": "./precomputed_msa",
56
+ "pairing_db": "uniref100",
57
+ },
58
+ },
59
+ }
60
+ ```
61
+ * `sequence`: A string representating a protein sequence, which can only contain the 20 standard amino acid type and X (UNK) for unknown residues.
62
+ * `count`: The number of copies of this protein chain (integer).
63
+ * `modifications`: An optional list of dictionaries that describe post-translational modifications.
64
+
65
+ * `ptmType`: A string containing CCD code of the modification.
66
+ * `ptmPosition`: The position of the modified amino acid (integer).
67
+ * `msa`: A dictionary containing options for Multiple Sequence Alignment (MSA). **If you want to search MSAs using our inference pipeline, you should not set this field or set it to an empty dictionary**:
68
+ * `precomputed_msa_dir`: The path to a directory containing precomputed MSAs. This directory should contain two specific files: "pairing.a3m" for MSAs used for pairing, and "non_pairing.a3m" for non-pairing MSAs.
69
+ * `pairing_db`: The name of the genomic database used for pairing MSAs. The default is "uniref100" and you should not change it. In fact, The MSA search against the UniRef30, a clustered version of the UniRef100.
70
+
71
+ ##### dnaSequence
72
+ ```json
73
+ {
74
+ "dnaSequence": {
75
+ "sequence": "GATTACA",
76
+ "modifications": [
77
+ {
78
+ "modificationType": "CCD_6OG",
79
+ "basePosition": 1
80
+ },
81
+ {
82
+ "modificationType": "CCD_6MA",
83
+ "basePosition": 2
84
+ }
85
+ ],
86
+ "count": 1
87
+ }
88
+ },
89
+ {
90
+ "dnaSequence": {
91
+ "sequence": "TGTAATC",
92
+ "count": 1
93
+ }
94
+ }
95
+ ```
96
+ Please note that the `dnaSequence` type refers to a single stranded DNA sequence. If you
97
+ wish to model double-stranded DNA, please add a second `dnaSequence` entry representing
98
+ the sequence of the reverse complement strand.
99
+
100
+ * `sequence`: A string containing a DNA sequence; only letters A, T, G, C and N (unknown ribonucleotide) are allowed.
101
+ * `count`: The number of copies of this DNA chain (integer).
102
+ * `modifications`: An optional list of dictionaries describing of
103
+ the DNA chemical modifications:
104
+ * `modificationType`: A string containing CCD code of modification.
105
+ * `basePosition`: A position of the modified nucleotide (integer).
106
+
107
+ ##### rnaSequence
108
+ ```json
109
+ {
110
+ "rnaSequence": {
111
+ "sequence": "GUAC",
112
+ "modifications": [
113
+ {
114
+ "modificationType": "CCD_2MG",
115
+ "basePosition": 1
116
+ },
117
+ {
118
+ "modificationType": "CCD_5MC",
119
+ "basePosition": 4
120
+ }
121
+ ],
122
+ "count": 1
123
+ }
124
+ }
125
+ ```
126
+ * `sequence`: A string representing the RNA sequence (single-stranded); only letters A, U, G, C and N (unknown nucleotides) are allowed.
127
+ * `count`: The number of copies of this RNA chain (integer).
128
+ * `modifications`: An optional list of dictionaries describing RNA chemical modifications:
129
+ * `modificationType`: A string containing
130
+ CCD code of modification.
131
+ * `basePosition`: The position of the modified nucleotide (integer).
132
+
133
+ ##### ligand
134
+ ```json
135
+ {
136
+ "ligand": {
137
+ "ligand": "CCD_ATP",
138
+ "count": 1
139
+ }
140
+ },
141
+ {
142
+ "ligand": {
143
+ "ligand": "FILE_your_file_path/atp.sdf",
144
+ "count": 1
145
+ }
146
+ },
147
+ {
148
+ "ligand": {
149
+ "ligand": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](CO[P@@](=O)(O)O[P@](=O)(O)OP(=O)(O)O)[C@@H](O)[C@H]1O",
150
+ "count": 1
151
+ }
152
+ }
153
+ ```
154
+ * `ligand`: A string representing the ligand. `ligand` can be one of the following three:
155
+ * A string containing the CCD code of the ligand, prefixed with "CCD_". For glycans or similar structures, this can be a concatenation of multiple CCD codes, for example, "CCD_NAG_BMA_BGC".
156
+ * A molecular SMILES string representing the ligand.
157
+ * A path to a molecular structure file, prefixed with "FILE_", where the supported file formats are PDB, SDF, MOL, and MOL2. The file must include the 3D conformation of the molecule.
158
+
159
+ * `count` is the number of copies of this ligand (integer).
160
+
161
+ ##### ion
162
+ ```json
163
+ {
164
+ "ion": {
165
+ "ion": "MG",
166
+ "count": 2
167
+ }
168
+ },
169
+ {
170
+ "ion": {
171
+ "ion": "NA",
172
+ "count": 3
173
+ }
174
+ }
175
+ ```
176
+ * `ion`: A string containing the CCD code for the ion. Note that, unlike ligands, the ion code **does not** start with "CCD_".
177
+ * `count`: The number of copies of this ion (integer).
178
+
179
+ #### covalent_bonds
180
+ ```json
181
+ "covalent_bonds": [
182
+ {
183
+ "entity1": "2",
184
+ "copy1": 1,
185
+ "position1": "2",
186
+ "atom1": "N6",
187
+ "entity2": "3",
188
+ "copy2": 1,
189
+ "position2": "1",
190
+ "atom2": "C1"
191
+ }
192
+ ]
193
+ ```
194
+
195
+ The `covalent_bonds` section specifies covalent bonds between a polymer and a ligand, or between two ligands.
196
+ To define a covalent bond, two atoms involved in the bond must be identified. The following fields are used:
197
+
198
+ * `entity1`, `entity2`: The entity numbers for the two atoms involved in the bond.
199
+ The entity number corresponds to the order in which the entity appears in the `sequences` list, starting from 1.
200
+ * `copy1`, `copy2`: The copy index (starting from 1) of the `left_entity` and `right_entity`, respectively. These fields are optional, but if specified, both `left_copy` and `right_copy` must be filled simultaneously or left empty at the same time. If neither field is provided, a bond will be created between all pairs of copies of the two entities. For example, if both entity1 and entity2 have two copies, a bond will be formed between entity1.copy1 and entity2.copy1, as well as between entity1.copy2 and entity2.copy2. In this case, the number of copies for both entities must be equal.
201
+ * `position1`, `position2` - The position of the residue (or ligand part) within the entity.
202
+ The position value starts at 1 and can vary based on the type of entity:
203
+ * For **polymers** (e.g., proteins, DNA, RNA), the position corresponds to the location of the residue in the sequence.
204
+ * For **ligands** composed of multiple CCD codes, the position refers to the serial number of the CCD code.
205
+ * For **single CCD code ligands**, or ligands defined by **SMILES** or **FILE**, the position is always set to 1.
206
+
207
+ * `atom1`, `atom2` - The atom names (or atom indices) of the atoms to be bonded.
208
+ * If the entity is a polymer or described by a CCD code, the atom names are consistent with those defined in the CCD.
209
+ * If the entity is a ligand defined by SMILES or a FILE, atoms can be specified by their atom index. The atom index corresponds to the position of the atom in the file or in the SMILES string, starting from 0.
210
+
211
+ Deprecation Notice: The previous fields such as old `left_entity`, `right_entity`, and other fields starting with `left`/`right` have been updated to use `1` and `2` to denote the two atoms forming a bond. The current code still supports the old field names, but they may be deprecated in the future, leaving only the new field names. An alternative approach is to write the element name of the specified atom in the SMILES/file, along with its sequential number for that element, e.g., "C2" indicates it is the second carbon.
212
+
213
+ Here is a revised user guide compatible with **Version 2** of the `constraint` format for the Complex Structure Predictor:
214
+
215
+ ---
216
+
217
+ ### constraint
218
+ The `constraint` section specifies additional structural information to enable inter-chain guidance for Protenix. Currently, Protenix support two kind of constraint: `contact` and `pocket` constraint.
219
+ The `contact` constraint allows you to specify residue/atom-residue/atom level priors. The `pocket` constraint is used to guide the binding interface between a chain of interest (e.g. a ligand or an antibody) and specific residues in another chain (e.g. epitopes).
220
+
221
+ > 💡 *This is a **soft constraint**: the model is encouraged, but not strictly required, to satisfy it.*
222
+
223
+ #### contact constraint
224
+
225
+ The `contact` field consists of a list of dictionaries, each describing one contact. The residues and atoms involved in the contact are now represented as compact lists, making the format more concise and flexible.
226
+
227
+ ##### Example:
228
+
229
+ ```json
230
+ "contact": [
231
+ {
232
+ "residue1": ["1", 1, 169],
233
+ "atom2": ["2", 1, 1, "C5"],
234
+ "max_distance": 6,
235
+ "min_distance": 0
236
+ }, // token-contact
237
+ {
238
+ "atom1": ["1", 1, 169, "CA"],
239
+ "residue2": ["2", 1, 1],
240
+ "max_distance": 6,
241
+ "min_distance": 0
242
+ }, // token-contact
243
+ {
244
+ "residue1": ["1", 1, 169],
245
+ "residue2": ["2", 1, 1 ],
246
+ "max_distance": 6,
247
+ "min_distance": 0
248
+ }, // token-contact
249
+ {
250
+ "atom1": ["1", 1, 169, "CA"],
251
+ "atom2": ["2", 1, 1, "C5"],
252
+ "max_distance": 6,
253
+ "min_distance": 3
254
+ }, // atom-contact
255
+ ...
256
+ ]
257
+ ```
258
+
259
+ Each contact dictionary includes the following keys:
260
+ * `residue1` or `residue2` (list):
261
+ Specifies a **residue** in the format:`[entity_number, copy_index, position]`
262
+
263
+ * `atom1` or `atom2` (list):
264
+ Specifies an **atom** (commonly from a ligand or another residue) in the format:`[entity_number, copy_index, position, atom_name]`
265
+
266
+ * `max_distance` (float):
267
+ The **expected maximum distance** (in Ångströms) between the specified residues or atoms.
268
+ * `min_distance` (float):
269
+ The **expected minimum distance** (in Ångströms) between the specified residues or atoms. For token-contact, you do not need to specify this field. It is 0 by default.
270
+
271
+ #### pocket constraint
272
+
273
+ The `pocket` constraint is defined as a dictionary with three keys: `"binder_chain"`, `"contact_residues"`, and `"max_distance"` to allow chain-residue binding specification.
274
+
275
+ ##### Example
276
+
277
+ ```json
278
+ "pocket": {
279
+ "binder_chain": ["2", 1],
280
+ "contact_residues": [
281
+ ["1", 1, 126],
282
+ ...
283
+ ],
284
+ "max_distance": 6
285
+ }
286
+ ```
287
+
288
+ * `binder_chain` (list):
289
+ Specifies the **binder chain** in the format: `[entity_number, copy_index]`
290
+
291
+ * `contact_residues` (list of lists):
292
+ A list of residue that are expected to be in spatial proximity (i.e., in or near the binding pocket). Each residue is specified as:
293
+ `[entity_number, copy_index, position]`
294
+
295
+ * `max_distance` (float):
296
+ The **maximum allowed distance** (in Ångströms) between the binder and the specified contact residues.
297
+
298
+
299
+ ### Format of the model output
300
+ The outputs will be saved in the directory provided via the `--dump_dir` flag in the inference script. The outputs include the predicted structures in CIF format and the confidence in JSON files. The `--dump_dir` will have the following structure:
301
+
302
+ ```bash
303
+ ├── <name>/ # specified in the input JSON file
304
+ │ ├── <seed>/ # specified via the `--seeds` flag in the inference script
305
+ │ │ ├── <name>_<seed>_sample_0.cif
306
+ │ │ ├── <name>_<seed>_summary_confidence_sample_0.json
307
+ │ │ └──... # the number of samples in each seed is specified via `--sample_diffusion.N_sample ` flag in the inference script
308
+ │ └──...
309
+ └── ...
310
+ ```
311
+
312
+ The contents of each output file are as follows:
313
+ - `<name>_<seed>_sample_*.cif` - A CIF format text file containing the predicted structure
314
+ - `<name>_<seed>_summary_confidence_sample_*.json` - A JSON format text file containing various confidence scores for assessing the reliability of predictions. Here’s a description of each score:
315
+
316
+ - `plddt` - Predicted Local Distance Difference Test (pLDDT) score. Higher values indicate greater confidence.
317
+ - `gpde` - Globl Predicted Distance Error (PDE) score. Lower values indicate greater confidence.
318
+ - `ptm` - Predicted TM-score (pTM). Values closer to 1 indicate greater confidence.
319
+ - `iptm` - Interface Predicted TM-score, used to estimate the accuracy of interfaces between chains. Values closer to 1 indicate greater confidence.
320
+ - `chain_ptm` - pTM score calculated for individual chains with the shape of [N_chains], indicating the reliability of specific chain structure.
321
+ - `chain_pair_iptm`: Pairwise interface pTM scores between chain pairs with the shape of [N_chains, N_chains], indicating the reliability of specific chain-chain interactions.
322
+ - `chain_iptm` - Average ipTM scores for each chain with the shape of [N_chains].
323
+ - `chain_pair_iptm_global` - Averge `chain_iptm` between chain pairs with the shape of [N_chains, N_chains]. For interface containing a small molecule, ion, or bonded ligand chain (named `C*`), this value is equal to the `chain_iptm` value of `C*`.
324
+ - `chain_plddt` - pLDDT scores calculated for individual chains with the shape of [N_chains].
325
+ - `chain_pair_plddt` - Pairwise pLDDT scores for chain pairs with the shape of [N_chains, N_chains].
326
+ - `has_clash` - Boolean flag indicating if there are steric clashes in the predicted structure.
327
+ - `disorder` - Predicted regions of intrinsic disorder within the protein, highlighting residues that may be flexible or unstructured.
328
+ - `ranking_score` - Predicted confidence score for ranking complexes. Higher values indicate greater confidence.
329
+ - `num_recycles`: Number of recycling steps used during inference.
docs/kernels.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Setting up kernels
2
+
3
+ - **Custom CUDA layernorm kernels** modified from [FastFold](https://github.com/hpcaitech/FastFold) and [Oneflow](https://github.com/Oneflow-Inc/oneflow) accelerate about 30%-50% during different training stages. To use this feature, run the following command:
4
+ ```bash
5
+ export LAYERNORM_TYPE=fast_layernorm
6
+ ```
7
+ If the environment variable `LAYERNORM_TYPE` is set to `fast_layernorm`, the model will employ the layernorm we have developed; otherwise, the naive PyTorch layernorm will be adopted. The kernels will be compiled when `fast_layernorm` is called for the first time.
8
+ - **[DeepSpeed DS4Sci_EvoformerAttention kernel](https://www.deepspeed.ai/tutorials/ds4sci_evoformerattention/)** is a memory-efficient attention kernel developed as part of a collaboration between OpenFold and the DeepSpeed4Science initiative. To use this feature, run the following command:
9
+ ```bash
10
+ export USE_DEEPSPEED_EVO_ATTENTION=true
11
+ ```
12
+ DS4Sci_EvoformerAttention is implemented based on [CUTLASS](https://github.com/NVIDIA/cutlass). If you use this feature, You need to clone the CUTLASS repository and specify the path to it in the environment variable CUTLASS_PATH. The [Dockerfile](Dockerfile) has already include this setting:
13
+ ```bash
14
+ RUN git clone -b v3.5.1 https://github.com/NVIDIA/cutlass.git /opt/cutlass
15
+ ENV CUTLASS_PATH=/opt/cutlass
16
+ ```
17
+ If you set up `Protenix` by `pip`, you can set environment variable `CUTLASS_PATH` as follows:
18
+
19
+ ```bash
20
+ git clone -b v3.5.1 https://github.com/NVIDIA/cutlass.git /path/to/cutlass
21
+ export CUTLASS_PATH=/path/to/cutlass
22
+ ```
23
+
24
+ The kernels will be compiled when DS4Sci_EvoformerAttention is called for the first time.
docs/model_train_inference_cost.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Training
2
+ Some settings follow those in the [AlphaFold 3](https://www.nature.com/articles/s41586-024-07487-w) paper, The table below shows the training settings for different fine-tuning stages:
3
+
4
+ | Arguments | Initial training | Fine tuning 1 | Fine tuning 2 | Fine tuning 3 |
5
+ |-----------------------------------------|--------|---------|-------|-----|
6
+ | `train_crop_size` | 384 | 640 | 768 | 768 |
7
+ | `diffusion_batch_size` | 48 | 32 | 32 | 32 |
8
+ | `loss.weight.alpha_pae` | 0 | 0 | 0 | 1.0 |
9
+ | `loss.weight.alpha_bond` | 0 | 1.0 | 1.0 | 0 |
10
+ | `loss.weight.smooth_lddt` | 1.0 | 0 | 0 | 0 |
11
+ | `loss.weight.alpha_confidence` | 1e-4 | 1e-4 | 1e-4 | 1e-4|
12
+ | `loss.weight.alpha_diffusion` | 4.0 | 4.0 | 4.0 | 0 |
13
+ | `loss.weight.alpha_distogram` | 0.03 | 0.03 | 0.03 | 0 |
14
+ | `train_confidence_only` | False | False | False | True|
15
+ | full BF16-mixed speed(A100, s/step) | ~12 | ~30 | ~44 | ~13 |
16
+ | full BF16-mixed peak memory (G) | ~34 | ~35 | ~48 | ~24 |
17
+
18
+ We recommend carrying out the training on A100-80G or H20/H100 GPUs. If utilizing full BF16-Mixed precision training, the initial training stage can also be performed on A800-40G GPUs. GPUs with smaller memory, such as A30, you'll need to reduce the model size, such as decreasing `model.pairformer.nblocks` and `diffusion_batch_size`.
19
+
20
+ ### Inference
21
+
22
+ The model will be infered in BF16 Mixed precision, by **default**, the `SampleDiffusion`,`ConfidenceHead` part will still be infered in FP32 precision.
23
+
24
+ Below are reference examples of cuda memory usage (G).
25
+
26
+ | Ntoken | Natom | Memory(G) | Inference time(s) |
27
+ |--------|-------|-------|------------------|
28
+ | 500 | 5000 | 5.2 | 72 |
29
+ | 1000 | 10000 | 11.5 | 229 |
30
+ | 2000 | 20000 | 42.8 | 933 |
31
+ | 3000 | 30000 | 73.9 | 2295 |
32
+ | 3500 | 35000 | 69.5 | 3329 |
33
+ | 4000 | 40000 | 67.5 | 4483 |
34
+
35
+ The script in [runner/inference.py](../runner/inference.py) will automatically change the default precision to compute `SampleDiffusion`,`ConfidenceHead` to avoid OOM as follows:
36
+ ```python
37
+ def update_inference_configs(configs: Any, N_token: int):
38
+ # Setting the default inference configs for different N_token and N_atom
39
+ # when N_token is larger than 3000, the default config might OOM even on a
40
+ # A100 80G GPUS,
41
+ if N_token > 3840:
42
+ configs.skip_amp.confidence_head = False
43
+ configs.skip_amp.sample_diffusion = False
44
+ elif N_token > 2560:
45
+ configs.skip_amp.confidence_head = False
46
+ configs.skip_amp.sample_diffusion = True
47
+ else:
48
+ configs.skip_amp.confidence_head = True
49
+ configs.skip_amp.sample_diffusion = True
50
+ return configs
51
+ ```
docs/msa_pipeline.md ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## MSA data pipeline
2
+ If you download our released wwPDB dataset as in [training.md](./training.md), the mmcif_msa [450G] dir has the following directory structure.
3
+ ```bash
4
+ ├── seq_to_pdb_index.json [45M] # sequence to integers mapping file
5
+ ├── mmcif_msa [450G] # msa files
6
+ ├── 0
7
+ ├── uniref100_hits.a3m
8
+ ├── mmseqs_other_hits.a3m
9
+ ├── 1
10
+ ├── uniref100_hits.a3m
11
+ ├── mmseqs_other_hits.a3m
12
+ ├── 2
13
+ ├── uniref100_hits.a3m
14
+ ├── mmseqs_other_hits.a3m
15
+ ...
16
+ ├── 157201
17
+ ├── uniref100_hits.a3m
18
+ ├── mmseqs_other_hits.a3m
19
+
20
+ ```
21
+
22
+ Each integer in the first-level directory under mmcif_msa (for example, 0, 1, 2, and 157201) represents a unique protein sequence. The key of `seq_to_pdb_index.json` is the unique protein sequence, and the value is the integer corresponding to the first-level subdirectory of mmcif_msa mentioned above.
23
+
24
+ This document is used to provide the steps to convert the MSA obtained from colabfold into the Protenix training format.
25
+
26
+ ### Steps to get your own MSA data for training
27
+
28
+ #### Step1: get input protein sequence
29
+ Run the following command:
30
+
31
+ ```python
32
+ python3 scripts/msa/step1-get_prot_seq.py
33
+ ```
34
+ you will get outputs in `scripts/msa/data/pdb_seqs` dir. The result dir is as follows,
35
+
36
+ ```bash
37
+ ├── pdb_index_to_seq.json # mapping integers to sequences
38
+ ├── seq_to_pdb_index.json # mapping sequences to integers identifiers when saving MSA, This file is required in training for finding local MSA path from sequence
39
+ ├── pdb_seq.fasta # Input of MSA
40
+ ├── pdb_seq.csv # Intermediate Files
41
+ ├── seq_to_pdb_id_entity_id.json # Intermediate Files
42
+ ```
43
+
44
+ #### Step2: run msa search
45
+ We give detailed environment configuration and search commands in
46
+
47
+ ```python
48
+ scripts/msa/step2-get_msa.ipynb
49
+ ```
50
+
51
+ The searched MSA is in `scripts/msa/data/mmcif_msa_initial`, The result dir is as follows,
52
+ ```bash
53
+ ├── 0.a3m
54
+ ├── 1.a3m
55
+ ├── 2.a3m
56
+ ├── 3.a3m
57
+ ├── pdb70_220313_db.m8
58
+ ├── uniref_tax.m8 # record Taxonomy ID which is used by MSA Pairing
59
+ ```
60
+ #### Steps3: MSA Post-Processing
61
+
62
+ The overall solution is to search the MSA containing taxonomy information only once for the unique sequence, and pair it according to the species information of each MSA.
63
+
64
+ For MSA Post-Processing, Taxonomy ID from UniRef30 DB is added to MSAs and MSAs is split into `uniref100_hits.a3m` and `mmseqs_other_hits.a3m`, which correspond to `pairing.a3m` and `non_pairing.a3m` in inference stage respectively.
65
+
66
+ You can run:
67
+ ```python
68
+ python3 scripts/msa/step3-uniref_add_taxid.py
69
+
70
+ python3 scripts/msa/step4-split_msa_to_uniref_and_others.py
71
+ ```
72
+
73
+ The final pairing and non_pairing MSAs in `scripts/msa/data/mmcif_msa` is as follows:
74
+
75
+
76
+ ```
77
+ >query
78
+ GPTHRFVQKVEEMVQNHMTYSLQDVGGDANWQLVVEEGEMKVYRREVEENGIVLDPLKATHAVKGVTGHEVCNYFWNVDVRNDWETTIENFHVVETLADNAIIIYQTHKRVWPASQRDVLYLSVIRKIPALTENDPETWIVCNFSVDHDSAPLNNRCVRAKINVAMICQTLVSPPEGNQEISRDNILCKITYVANVNPGGWAPASVLRAVAKREYPKFLKRFTSYVQEKTAGKPILF
79
+ >UniRef100_A0A0S7JZT1_188132/ 246 0.897 6.614E-70 2 236 237 97 331 332
80
+ --THRFADKVEEMVQNHMTYSLQDVGGDANWQLVIEEGEMKVYRREVEENGIVLDPLKATHAVKGVTGHEVCHYFWDTDVRNDWETTIDNFNVVETLSDNAIIVYQTHKRVWPASQRDILFLSAIRKILAKNENDPDTWLVCNFSVDHDKAPPTNRCVRAKINVAMICQTLVSPPEGDKEISRDNILCKITYVANVNPGGWAPASVLRAVAKREYPKFLKRFTSYVQEKTAGNPILF
81
+ >UniRef100_A0A4W6GBN4_8187/ 246 0.893 9.059E-70 2 236 237 373 607 608
82
+ --THRFANKVEEMVQNHMTYSLQDVGGDANWQLVIEEGEMKVYRREVEENGIVLDPLKATHSVKGVTGHEVCHYFWDTDVRMDWETTIENFNVVEKLSENAIIVYQTHKRVWPASQRDVLYLSAIRKIMATNENDPDTWLVCNFSVDHNNAPPTNRCVRAKINVAMICQTLVSPPEGDKEISRDNILCKITYVANVNPGGWAPASVLRAVAKREYPKFLKRFTSYVQEKTAGKPILF
83
+ ```
84
+
85
+ ```
86
+ >query
87
+ MAEVIRSSAFWRSFPIFEEFDSETLCELSGIASYRKWSAGTVIFQRGDQGDYMIVVVSGRIKLSLFTPQGRELMLRQHEAGALFGEMALLDGQPRSADATAVTAAEGYVIGKKDFLALITQRPKTAEAVIRFLCAQLRDTTDRLETIALYDLNARVARFFLATLRQIHGSEMPQSANLRLTLSQTDIASILGASRPKVNRAILSLEESGAIKRADGIICCNVGRLLSIADPEEDLEHHHHHHHH
88
+ >MGYP001165762451 218 0.325 1.019E-59 5 230 244 3 228 230
89
+ -----DKVEFLKGVPLFSELPEAHLQSLGELLIERSYRRGATIFFEGDPGDALYIVRSGIVKISRVAEDGREKTLAFLGKGEPFGEMALIDGGPRSAIAQALEATSLYALHRADFLAALTENPALSLGVIKVLSARLQQANAQLMDLVFRDVRGRVAQALLDLARR-HGVPLTNGRMISVKLTHQEIANLVGTARETVSRTFAELQDSGIIRIeGRNIVLLDAAQLEGYAAG-------------
90
+ >A0A160T8V6 218 0.285 1.019E-59 0 227 244 0 229 237
91
+ MPTTRDsnAVQALQVVPFFANLPEDHVAALAKALVPRRFSPGQVIFHLGDPGGLLYLISRGKIKISHTTSDGQEVVLAILGPGDFFGEMALIDDAPRSATAITLEPSETWTLHREEFIQYLTDNPEFALHVLKTLARHIRRLNTQLADIFFLDLPGRLARTLLNLADQ-YGRRAADGTIIDLSLTQTDLAEMTGATRVSINKALGRFRRAGWIQvTGRQVTVLDRAALEAL----------------
92
+ >AP58_3_1055460.scaffolds.fasta_scaffold1119545_2 216 0.304 3.581E-59 10 225 244 5 221 226
93
+ ----------LSRVPLFAELPPERIHELAQSVRRRTYHRGETIFHKGDPGNGLYIIAAGQVKIVLPSEMGEEAMLAVLEGGEFFGELALFDGLPRSATVVAVQNAEVLVLHRDDFMSFVGRNPEVVSALFAALSRRLRDADEMIEDAIFLDVPGRLAKRLLDLAEKHGRAEEKGGVAIDLKLTQQDLAAMVGATRESVNKHLGWMRDHGLIQLDRqRIVILKPDDLR------------------
94
+ ```
95
+ ### Format of MSA
96
+ In `uniref100_hits.a3m`(training stage) or `pairing.a3m`(inference stage), the header must starts with the following format, which we use for pairing:
97
+ ```
98
+ >UniRef100_{hitname}_{taxonomyid}/
99
+ ```
100
+
101
+ we also provide a pipeline of local Colabfold_search to Generate Protenix-Compatible MSAs in [colabfold_compatible_msa.md](./colabfold_compatible_msa.md).
docs/prepare_training_data.md ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Start with CIF files and prepare your own training data.
2
+
3
+ ## Data Preparation
4
+
5
+ 1. **Prepare CIF Files**: Place the CIF files you want to convert into training data in a folder. Alternatively, you can use a `txt` file to record the paths to these CIF files, with each line corresponding to the path of a specific CIF file.
6
+
7
+ 2. **Prepare Protein Clustering File (Optional)**: The protein clustering file contains category information for each `[PDB ID]_[Entity ID]`. In the Protenix training data, we cluster protein sequences using a 40% sequence identity threshold.
8
+
9
+ You can download the official clustering results file provided by RCSB PDB using the following command, and use it directly:
10
+ ```bash
11
+ wget https://cdn.rcsb.org/resources/sequence/clusters/clusters-by-entity-40.txt
12
+ ```
13
+
14
+ If you prefer to perform your own clustering of protein sequences, ensure the final results are formatted as a text file like this:
15
+ Each line represents a cluster, containing `[PDB ID]_[Entity ID]` entries separated by spaces.
16
+
17
+ 3. **Update the CCD (Chemical Component Dictionary) Cache File (If needed)**: We provide a pre-processed file, with a cutoff date of 2024-06-08, that records the reference conformers for each CCD Code. If the training data you're preparing is more recent than this date, there may be issues with some CCD Codes might be missing. For example, the CCD Code "WTB," appearing in the PDB ID: 8P3K released on 2024-11-20, is not defined in the previously provided CCD file. In such cases, you need to run the following script to download and update the CCD CIF files:
18
+
19
+ ```bash
20
+ python3 scripts/gen_ccd_cache.py -c [ccd_cache_dir] -n [num_cpu]
21
+ ```
22
+
23
+ After running the script, three files will be generated in the specified "ccd_cache_dir":
24
+
25
+ - `components.cif` (CCD CIF file downloaded from RCSB)
26
+ - `components.cif.rdkit_mol.pkl` (pre-processed dictionary, where the key is the CCD Code and the value is an RDKit Mol object with 3D structure)
27
+ - `components.txt` (a list containing all the CCD Codes)
28
+
29
+ When running Protenix, it first uses
30
+ ```bash
31
+ `release_data/ccd_cache/components.cif`
32
+ `release_data/ccd_cache/components.cif.rdkit_mol.pkl`
33
+ ```
34
+ if unavailable, it switches to
35
+ ```bash
36
+ `release_data/ccd_cache/components.v20240608.cif`
37
+ `release_data/ccd_cache/components.v20240608.cif.rdkit_mol.pkl`
38
+ ```
39
+ Notes:
40
+ - The `-c` parameter is optional. If not specified, files will be saved in the "release_data/ccd_cache" folder within the Protenix code directory by default.
41
+ - You can add the `-d` parameter when running the script to skip the CIF file download step, in which case the script will directly process the "components.cif" file located in the "ccd_cache_dir".
42
+
43
+ ## Data Preprocessing
44
+ Execute the script to preprocess the data:
45
+ ```bash
46
+ python3 scripts/prepare_training_data.py -i [input_path] -o [output_csv] -b [output_dir] -c [cluster_txt] -n [num_cpu]
47
+ ```
48
+
49
+ The preprocessed structures will be saved as `.pkl.gz` files. Additionally, a `CSV` file will be generated to catalog the chains and interfaces within these structures, which will facilitate sampling during the training process.
50
+
51
+ You can view the explanation of the parameters by using the `--help` command.
52
+ ```
53
+ python3 scripts/prepare_training_data.py --help
54
+ ```
55
+
56
+ Note that there is an optional parameter `-d` in the script. When this parameter is not used, the script processes CIF files downloaded from RCSB PDB by applying the full set of WeightedPDB training data filters. These filters include:
57
+
58
+ - Removing water molecules
59
+ - Removing hydrogen atoms
60
+ - Deleting polymer chains composed entirely of unknown residues
61
+ - Eliminating chains where the Cα distance between adjacent numbered residues exceeds 10 angstroms
62
+ - Removing elements labeled as "X"
63
+ - Deleting chains where no residues have been resolved
64
+ - When the number of chains exceeds 20, selecting one central atom from those capable of forming interfaces and retaining the 20 nearest chains to it. If a ligand is covalently bonded to a polymer, it is considered as one chain together. Additionally, if the number of chains is greater than 20 but the total number of tokens in these chains is less than 5120, more chains will be retained until the 5120 token limit is reached.
65
+ - Removing chains with one-third of their heavy atoms colliding
66
+
67
+ For CIF files generated through model inference where these filtering steps aren't desired, you can run the script with the `-d` parameter, which disables all these filters. The CIF structure will not be expanded to Assembly 1 in this case.
68
+
69
+
70
+ ## Output Format
71
+ ### Bioassembly Dict
72
+ In the folder specified by the `-b` parameter of the data preprocessing script, a corresponding `[pdb_id].pkl.gz` file is generated for each successfully processed CIF file. This file contains a dictionary saved with `pickle.dump`, with the following contents:
73
+ ```
74
+ | Key | Value Type | Description |
75
+ |----------------------------|---------------|-------------------------------------------------------------------------------|
76
+ | pdb_id | str | PDB Code |
77
+ | assembly_id | str | Assembly ID |
78
+ | sequences | dict[str, str]| Key is polymer's label_entity_id, value is canonical_sequence |
79
+ | release_date | str | PDB's Release Date |
80
+ | num_assembly_polymer_chains| int | Number of assembly polymer chains (pdbx_struct_assembly.oligomeric_count) |
81
+ | num_prot_chains | int | Number of protein chains in AtomArray |
82
+ | entity_poly_type | dict[str, str]| Key is polymer's label_entity_id, value is corresponding to entity_poly.type |
83
+ | resolution | float | Resolution; if no resolution, value is -1 |
84
+ | num_tokens | int | Number of tokens |
85
+ | atom_array | AtomArray | AtomArray from structure processing |
86
+ | token_array | TokenArray | TokenArray generated based on AtomArray |
87
+ | msa_features | None | (Placeholder) |
88
+ | template_features | None | (Placeholder) |
89
+ ```
90
+
91
+ ### Indices CSV
92
+ After the script successfully completes, a CSV file will be generated in the directory specified by `-o`.
93
+ Each row contains information about a pre-processed chain or interface, and the content of each column is described as follows:
94
+ ```
95
+ | Column Name | Value Type | Meaning | Required |
96
+ |----------------|------------|------------------------------------------------------------------------|----------|
97
+ | type | str | "chain" or "interface" | Y |
98
+ | pdb_id | str | PDB Code (entry.id) | Y |
99
+ | cluster_id | str | Cluster_id of the chain/interface | Y |
100
+ | assembly_id | str | Assembly id | N |
101
+ | release_date | str | Release date | N |
102
+ | resolution | float | Resolution; if no resolution, value is -1 | N |
103
+ | num_tokens | int | Number of tokens in AtomArray of Bioassembly Dict | N |
104
+ | num_prot_chains| int | Number of protein chains in AtomArray of Bioassembly Dict | N |
105
+ | eval_type | str | Classification used for evaluation | N |
106
+ | entity_1_id | str | Chain 1's label_entity_id | Y |
107
+ | chain_1_id | str | Chain 1's chain ID | Y |
108
+ | mol_1_type | str | Chain 1's corresponding mol_type ("protein", "nuc", "ligand", "ions") | Y |
109
+ | sub_mol_1_type | str | Sub-classification of Chain 1's entity corresponding to mol_type | N |
110
+ | cluster_1_id | str | Chain 1's cluster ID | Y |
111
+ | entity_2_id | str | Chain 2's label_entity_id | Y |
112
+ | chain_2_id | str | Chain 2's chain ID | Y |
113
+ | mol_2_type | str | Chain 2's corresponding mol_type ("protein", "nuc", "ligand", "ions") | Y |
114
+ | sub_mol_2_type | str | Sub-classification of Chain 2's entity corresponding to mol_type | N |
115
+ | cluster_2_id | str | Chain 2's cluster_id | Y |
116
+ ```
117
+ Notes:
118
+ - In the table, columns marked with 'Y' under 'Required' indicate that these columns are essential for training. If you are creating your own CSV for training purposes, these columns must be included. Columns marked with 'N' are optional and can be excluded.
119
+ - For rows where the "type" is "chain", the values in columns related to Chain 2 should all be filled with empty strings.
docs/training.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Preparing the datasets
2
+ To download the [wwPDB dataset](https://www.wwpdb.org/) and preprocessed training data, you need at least 1T disk space.
3
+
4
+ Use the following command to download the preprocessed wwpdb training databases:
5
+
6
+ ```bash
7
+ wget -P /af3-dev/release_data/ https://af3-dev.tos-cn-beijing.volces.com/release_data.tar.gz
8
+ tar -xzvf /af3-dev/release_data/release_data.tar.gz -C /af3-dev/release_data/
9
+ rm /af3-dev/release_data/release_data.tar.gz
10
+ ```
11
+
12
+
13
+ The data should be placed in the `/af3-dev/release_data/` directory. You can also download it to a different directory, but remember to modify the `DATA_ROOT_DIR` in [configs/configs_data.py](../configs/configs_data.py) correspondingly. Alternatively, you can set an environment variable "PROTENIX_DATA_ROOT_DIR" to specify the path. Data hierarchy after extraction is as follows:
14
+
15
+ ```bash
16
+ ├── components.v20240608.cif [408M] # ccd source file
17
+ ├── components.v20240608.cif.rdkit_mol.pkl [121M] # rdkit Mol object generated by ccd source file
18
+ ├── indices [33M] # chain or interface entries
19
+ ├── mmcif [283G] # raw mmcif data
20
+ ├── mmcif_bioassembly [36G] # preprocessed wwPDB structural data
21
+ ├── mmcif_msa [450G] # msa files
22
+ ├── posebusters_bioassembly [42M] # preprocessed posebusters structural data
23
+ ├── posebusters_mmcif [361M] # raw mmcif data
24
+ ├── recentPDB_bioassembly [1.5G] # preprocessed recentPDB structural data
25
+ └── seq_to_pdb_index.json [45M] # sequence to pdb id mapping file
26
+ ```
27
+
28
+ Data processing scripts have also been released. you can refer to [prepare_training_data.md](./prepare_training_data.md) for generating `{dataset}_bioassembly` and `indices`. And you can refer to [msa_pipeline.md](./msa_pipeline.md) for pipelines to get `mmcif_msa` and `seq_to_pdb_index.json`.
29
+
30
+ ### Training demo
31
+ After the installation and data preparations, you can run the following command to train the model from scratch:
32
+
33
+ ```bash
34
+ bash train_demo.sh
35
+ ```
36
+ Key arguments in this scripts are explained as follows:
37
+ * `dtype`: data type used in training. Valid options include `"bf16"` and `"fp32"`.
38
+ * `--dtype fp32`: the model will be trained in full FP32 precision.
39
+ * `--dtype bf16`: the model will be trained in BF16 Mixed precision, by default, the `SampleDiffusion`,`ConfidenceHead`, `Mini-rollout` and `Loss` part will still be training in FP32 precision. if you want to train and infer the model in full BF16 Mixed precision, pass the following arguments to the [train_demo.sh](../train_demo.sh):
40
+ ```bash
41
+ --skip_amp.sample_diffusion_training false \
42
+ --skip_amp.confidence_head false \
43
+ --skip_amp.sample_diffusion false \
44
+ --skip_amp.loss false \
45
+ ```
46
+ * `ema_decay`: the decay rate of the EMA, default is 0.999.
47
+ * `sample_diffusion.N_step`: during evalutaion, the number of steps for the diffusion process is reduced to 20 to improve efficiency.
48
+
49
+ * `data.train_sets/data.test_sets`: the datasets used for training and evaluation. If there are multiple datasets, separate them with commas.
50
+ * Some settings follow those in the [AlphaFold 3](https://www.nature.com/articles/s41586-024-07487-w) paper, The table in [model_performance.md](../docs/model_performance.md) shows the training settings and memory usages for different training stages.
51
+ * In this version, we do not use the template and RNA MSA feature for training. As the default settings in [configs/configs_base.py](../configs/configs_base.py) and [configs/configs_data.py](../configs/configs_data.py):
52
+ ```bash
53
+ --model.template_embedder.n_blocks 0 \
54
+ --data.msa.enable_rna_msa false \
55
+ ```
56
+ This will be considered in our future work.
57
+
58
+ * The model also supports distributed training with PyTorch’s [`torchrun`](https://pytorch.org/docs/stable/elastic/run.html). For example, if you’re running distributed training on a single node with 4 GPUs, you can use:
59
+ ```bash
60
+ torchrun --nproc_per_node=4 runner/train.py
61
+ ```
62
+ You can also pass other arguments with `--<ARGS_KEY> <ARGS_VALUE>` as you want.
63
+
64
+
65
+ If you want to speed up training, see [<u> setting up kernels documentation </u>](./kernels.md).
66
+
67
+ ### Finetune demo
68
+
69
+ If you want to fine-tune the model on a specific subset, such as an antibody dataset, you only need to provide a PDB list file and load the pretrained weights as [finetune_demo.sh](../finetune_demo.sh) shows:
70
+
71
+ ```bash
72
+ # wget -P /af3-dev/release_model/ https://af3-dev.tos-cn-beijing.volces.com/release_model/model_v0.5.0.pt
73
+ checkpoint_path="/af3-dev/release_model/model_v0.5.0.pt"
74
+ ...
75
+
76
+ --load_checkpoint_path ${checkpoint_path} \
77
+ --load_checkpoint_ema_path ${checkpoint_path} \
78
+ --data.weightedPDB_before2109_wopb_nometalc_0925.base_info.pdb_list examples/subset.txt \
79
+ ```
80
+
81
+ , where the `subset.txt` is a file containing the PDB IDs like:
82
+ ```bash
83
+ 6hvq
84
+ 5mqc
85
+ 5zin
86
+ 3ew0
87
+ 5akv
88
+ ```
extract_tianrui.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pickle
3
+ import torch
4
+ import json
5
+
6
+ # np_data = np.load('/home/hui007/rna/rna_repr/zhiyuan/train_data_final.npz')
7
+ # data_list = pickle.loads(np_data['data_list'])
8
+
9
+ # training_json = []
10
+
11
+ # for item in data_list:
12
+ # full_id = item['full_id']
13
+ # sequence = ''.join([i[1] for i in item['data']])
14
+ # # entry = {
15
+ # # "sequences": [
16
+ # # {
17
+ # # "rnaSequence": {
18
+ # # "sequence": sequence,
19
+ # # "count": 1
20
+ # # }
21
+ # # }
22
+ # # ],
23
+ # # "name": full_id
24
+ # # }
25
+ # # training_json.append(entry)
26
+ # data = item['data']
27
+
28
+ # extracted = [[j[1], j[2]] for j in data]
29
+
30
+ # torch.save(extracted, f"/home/hui007/Protenix/coord/{full_id}.pt")
31
+
32
+
33
+ # # coords = [coord for i in item['data'] for coord in i[2]['coord_list']]
34
+ # # tensor = torch.tensor(coords, dtype=torch.float32)
35
+ # # centroid = tensor.mean(dim=0, keepdim=True)
36
+ # # normalized_pos = (tensor - centroid) / 20.3689
37
+
38
+ # # torch.save(normalized_pos, f"/home/hui007/Protenix/coord/{full_id}.pt")
39
+
40
+ # # with open("/home/hui007/Protenix/training.json", "w") as f:
41
+ # # json.dump(training_json, f, indent=2)
42
+
43
+ # import os
44
+ # import json
45
+ # from pathlib import Path
46
+
47
+ # # === 路径设置 ===
48
+ # embedding_dir = Path("/home/hui007/Protenix/protenix_1d_embeddings")
49
+ # input_json_path = "/home/hui007/Protenix/training_json/training.json"
50
+ # output_prefix = "training"
51
+
52
+ # # === 加载 JSON 数据 ===
53
+ # with open(input_json_path, "r") as f:
54
+ # data = json.load(f)
55
+
56
+ # # === 只保留那些其 name.pt 不存在的条目 ===
57
+ # filtered_data = []
58
+ # for item in data:
59
+ # name = item["name"]
60
+ # pt_path = embedding_dir / f"{name}.pt"
61
+ # if not pt_path.exists():
62
+ # filtered_data.append(item)
63
+
64
+ # print(f"共有 {len(filtered_data)} 条数据将被保留并拆分")
65
+
66
+ # # === 平均分成 4 份 ===
67
+ # chunk_size = (len(filtered_data) + 5) // 6 # 向上取整
68
+ # chunks = [filtered_data[i:i+chunk_size] for i in range(0, len(filtered_data), chunk_size)]
69
+
70
+ # # === 保存为 training1.json ~ training4.json ===
71
+ # for i, chunk in enumerate(chunks):
72
+ # out_path = f"/home/hui007/Protenix/training_json/{output_prefix}{i+1}.json"
73
+ # with open(out_path, "w") as f:
74
+ # json.dump(chunk, f, indent=2)
75
+ # print(f"保存 {out_path},包含 {len(chunk)} 条")
76
+
77
+ from huggingface_hub import upload_folder
78
+
79
+ upload_folder(
80
+ repo_id="Yimingbear/protenix",
81
+ repo_type="dataset",
82
+ folder_path=".",
83
+ ignore_patterns=["coord/*", "ModelGenerator/*", "protenix_1d_embeddings/*", "protenix_3d_embeddings/*", "second_stage/*", "training_json/*", "examples/*"] # 忽略 scale 文件夹
84
+ )
finetune_demo.sh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ export LAYERNORM_TYPE=fast_layernorm
16
+ export USE_DEEPSPEED_EVO_ATTENTION=true
17
+ # wget -P /af3-dev/release_model/ https://af3-dev.tos-cn-beijing.volces.com/release_model/protenix_base_default_v0.5.0.pt
18
+ checkpoint_path="/af3-dev/release_model/protenix_base_default_v0.5.0.pt"
19
+
20
+ python3 ./runner/train.py \
21
+ --model_name "protenix_base_constraint_v0.5.0" \
22
+ --run_name protenix_finetune \
23
+ --seed 42 \
24
+ --base_dir ./output \
25
+ --dtype bf16 \
26
+ --project protenix \
27
+ --use_wandb false \
28
+ --diffusion_batch_size 48 \
29
+ --eval_interval 400 \
30
+ --log_interval 50 \
31
+ --checkpoint_interval 400 \
32
+ --ema_decay 0.999 \
33
+ --train_crop_size 384 \
34
+ --max_steps 100000 \
35
+ --warmup_steps 2000 \
36
+ --lr 0.001 \
37
+ --sample_diffusion.N_step 20 \
38
+ --load_checkpoint_path ${checkpoint_path} \
39
+ --load_ema_checkpoint_path ${checkpoint_path} \
40
+ --data.train_sets weightedPDB_before2109_wopb_nometalc_0925 \
41
+ --data.weightedPDB_before2109_wopb_nometalc_0925.base_info.pdb_list examples/finetune_subset.txt \
42
+ --data.test_sets recentPDB_1536_sample384_0925,posebusters_0925
inference_demo.sh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ export LAYERNORM_TYPE=fast_layernorm
16
+ export USE_DEEPSPEED_EVO_ATTENTION=true
17
+
18
+ N_sample=5
19
+ N_step=200
20
+ N_cycle=10
21
+ seed=101
22
+
23
+ input_json_path="./examples/example.json"
24
+ dump_dir="./output"
25
+ # model_name="protenix_base_default_v0.5.0"
26
+ model_name = 'protenix_tiny_detault_v0.5.0'
27
+
28
+ python3 runner/inference.py \
29
+ --model_name ${model_name} \
30
+ --seeds ${seed} \
31
+ --dump_dir ${dump_dir} \
32
+ --input_json_path ${input_json_path} \
33
+ --model.N_cycle ${N_cycle} \
34
+ --sample_diffusion.N_sample ${N_sample} \
35
+ --sample_diffusion.N_step ${N_step}
36
+
37
+ # The following is a demo to use DDP for inference
38
+ # torchrun \
39
+ # --nproc_per_node $NPROC \
40
+ # --master_addr $WORKER_0_HOST \
41
+ # --master_port $WORKER_0_PORT \
42
+ # --node_rank=$ID \
43
+ # --nnodes=$WORKER_NUM \
44
+ # runner/inference.py \
45
+ # --seeds ${seed} \
46
+ # --dump_dir ${dump_dir} \
47
+ # --input_json_path ${input_json_path} \
48
+ # --model.N_cycle ${N_cycle} \
49
+ # --sample_diffusion.N_sample ${N_sample} \
50
+ # --sample_diffusion.N_step ${N_step}
protenix/__init__.py ADDED
File without changes
protenix/config/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .config import load_config, parse_configs, parse_sys_args, save_config
protenix/config/config.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import copy
17
+ import sys
18
+ from typing import Any, Optional, Union
19
+
20
+ import yaml
21
+ from ml_collections.config_dict import ConfigDict
22
+
23
+ from protenix.config.extend_types import (
24
+ DefaultNoneWithType,
25
+ GlobalConfigValue,
26
+ ListValue,
27
+ RequiredValue,
28
+ ValueMaybeNone,
29
+ get_bool_value,
30
+ )
31
+
32
+
33
+ class ArgumentNotSet(object):
34
+ pass
35
+
36
+
37
+ class ConfigManager(object):
38
+ def __init__(self, global_configs: dict, fill_required_with_null: bool = False):
39
+ """
40
+ Initialize the ConfigManager instance.
41
+
42
+ Args:
43
+ global_configs (dict): A dictionary containing global configuration settings.
44
+ fill_required_with_null (bool, optional):
45
+ A boolean flag indicating whether required values should be filled with `None` if not provided. Defaults to False.
46
+ """
47
+ self.global_configs = global_configs
48
+ self.fill_required_with_null = fill_required_with_null
49
+ self.config_infos, self.default_configs = self.get_config_infos()
50
+
51
+ def get_value_info(
52
+ self, value
53
+ ) -> tuple[Any, Optional[Any], Optional[bool], Optional[bool]]:
54
+ """
55
+ Return the type, default value, whether it allows None, and whether it is required for a given value.
56
+
57
+ Args:
58
+ value: The value to determine the information for.
59
+
60
+ Returns:
61
+ tuple: A tuple containing the following elements:
62
+ - dtype: The type of the value.
63
+ - default_value: The default value for the value.
64
+ - allow_none: A boolean indicating whether the value can be None.
65
+ - required: A boolean indicating whether the value is required.
66
+ """
67
+ if isinstance(value, DefaultNoneWithType):
68
+ return value.dtype, None, True, False
69
+ elif isinstance(value, ValueMaybeNone):
70
+ return value.dtype, value.value, True, False
71
+ elif isinstance(value, RequiredValue):
72
+ if self.fill_required_with_null:
73
+ return value.dtype, None, True, False
74
+ else:
75
+ return value.dtype, None, False, True
76
+ elif isinstance(value, GlobalConfigValue):
77
+ return self.get_value_info(self.global_configs[value.global_key])
78
+ elif isinstance(value, ListValue):
79
+ return (value.dtype, value.value, False, False)
80
+ elif isinstance(value, list):
81
+ return (type(value[0]), value, False, False)
82
+ else:
83
+ return type(value), value, False, False
84
+
85
+ def _get_config_infos(self, config_dict: dict) -> dict:
86
+ """
87
+ Recursively extracts configuration information from a given dictionary.
88
+
89
+ Args:
90
+ config_dict (dict): The dictionary containing configuration settings.
91
+
92
+ Returns:
93
+ tuple: A tuple containing two dictionaries:
94
+ - all_keys: A dictionary mapping keys to their corresponding configuration information.
95
+ - default_configs: A dictionary mapping keys to their default configuration values.
96
+
97
+ Raises:
98
+ AssertionError: If a key contains a period (.), which is not allowed.
99
+ """
100
+ all_keys = {}
101
+ default_configs = {}
102
+ for key, value in config_dict.items():
103
+ assert "." not in key
104
+ if isinstance(value, (dict)):
105
+ children_keys, children_configs = self._get_config_infos(value)
106
+ all_keys.update(
107
+ {
108
+ f"{key}.{child_key}": child_value_type
109
+ for child_key, child_value_type in children_keys.items()
110
+ }
111
+ )
112
+ default_configs[key] = children_configs
113
+ else:
114
+ value_info = self.get_value_info(value)
115
+ all_keys[key] = value_info
116
+ default_configs[key] = value_info[1]
117
+ return all_keys, default_configs
118
+
119
+ def get_config_infos(self):
120
+ return self._get_config_infos(self.global_configs)
121
+
122
+ def _merge_configs(
123
+ self,
124
+ new_configs: dict,
125
+ global_configs: dict,
126
+ local_configs: dict,
127
+ prefix="",
128
+ ) -> ConfigDict:
129
+ """Overwrite default configs with new configs recursively.
130
+ Args:
131
+ new_configs: global flattern config dict with all hierarchical config keys joined by '.', i.e.
132
+ {
133
+ 'c_z': 32,
134
+ 'model.evoformer.c_z': 16,
135
+ ...
136
+ }
137
+ global_configs: global hierarchical merging configs, i.e.
138
+ {
139
+ 'c_z' 32,
140
+ 'c_m': 128,
141
+ 'model': {
142
+ 'evoformer': {
143
+ ...
144
+ }
145
+ }
146
+ }
147
+ local_configs: hierarchical merging config dict in current level, i.e. for 'model' level, this maybe
148
+ {
149
+ 'evoformer': {
150
+ 'c_z': GlobalConfigValue("c_z"),
151
+ },
152
+ 'embedder': {
153
+ ...
154
+ }
155
+ }
156
+ prefix (str, optional): A prefix string to prepend to keys during recursion. Defaults to an empty string.
157
+
158
+ Returns:
159
+ ConfigDict: The merged configuration dictionary.
160
+
161
+ Raises:
162
+ Exception: If a required config value is not allowed to be None.
163
+ """
164
+ # Merge configs in current level first, since these configs maybe referenced by lower level
165
+ for key, value in local_configs.items():
166
+ if isinstance(value, dict):
167
+ continue
168
+ full_key = f"{prefix}.{key}" if prefix else key
169
+ dtype, default_value, allow_none, required = self.config_infos[full_key]
170
+ if full_key in new_configs and not isinstance(
171
+ new_configs[full_key], ArgumentNotSet
172
+ ):
173
+ if allow_none and new_configs[full_key] in [
174
+ "None",
175
+ "none",
176
+ "null",
177
+ ]:
178
+ local_configs[key] = None
179
+ elif dtype == bool:
180
+ local_configs[key] = get_bool_value(new_configs[full_key])
181
+ elif isinstance(value, (ListValue, list)):
182
+ local_configs[key] = (
183
+ [dtype(s) for s in new_configs[full_key].strip().split(",")]
184
+ if new_configs[full_key].strip()
185
+ else []
186
+ )
187
+ else:
188
+ local_configs[key] = dtype(new_configs[full_key])
189
+ elif isinstance(value, GlobalConfigValue):
190
+ local_configs[key] = global_configs[value.global_key]
191
+ else:
192
+ if not allow_none and default_value is None:
193
+ raise Exception(f"config {full_key} not allowed to be none")
194
+ local_configs[key] = default_value
195
+ for key, value in local_configs.items():
196
+ if not isinstance(value, dict):
197
+ continue
198
+ self._merge_configs(
199
+ new_configs, global_configs, value, f"{prefix}.{key}" if prefix else key
200
+ )
201
+
202
+ def merge_configs(self, new_configs: dict) -> ConfigDict:
203
+ configs = copy.deepcopy(self.global_configs)
204
+ self._merge_configs(new_configs, configs, configs)
205
+ return ConfigDict(configs)
206
+
207
+
208
+ def parse_configs(
209
+ configs: dict, arg_str: str = None, fill_required_with_null: bool = False
210
+ ) -> ConfigDict:
211
+ """
212
+ Parses and merges configuration settings from a dictionary and command-line arguments.
213
+
214
+ Args:
215
+ configs (dict): A dictionary containing initial configuration settings.
216
+ arg_str (str, optional): A string representing command-line arguments. Defaults to None.
217
+ fill_required_with_null (bool, optional):
218
+ A boolean flag indicating whether required values should be filled with `None` if not provided. Defaults to False.
219
+
220
+ Returns:
221
+ ConfigDict: The merged configuration dictionary.
222
+ """
223
+ manager = ConfigManager(configs, fill_required_with_null=fill_required_with_null)
224
+ parser = argparse.ArgumentParser()
225
+ # Register arguments
226
+ for key, (
227
+ dtype,
228
+ default_value,
229
+ allow_none,
230
+ required,
231
+ ) in manager.config_infos.items():
232
+ # All config use str type, strings will be converted to real dtype later
233
+ parser.add_argument(
234
+ "--" + key, type=str, default=ArgumentNotSet(), required=required
235
+ )
236
+ # Merge user commandline pargs with default ones
237
+ merged_configs = manager.merge_configs(
238
+ vars(parser.parse_args(arg_str.split())) if arg_str else {}
239
+ )
240
+ return merged_configs
241
+
242
+
243
+ def parse_sys_args() -> str:
244
+ """
245
+ Check whether command-line arguments are valid.
246
+ Each argument is expected to be in the format `--key value`.
247
+
248
+ Returns:
249
+ str: A string formatted as command-line arguments.
250
+
251
+ Raises:
252
+ AssertionError: If any key does not start with `--`.
253
+ """
254
+ args = sys.argv[1:]
255
+ arg_str = ""
256
+ for k, v in zip(args[::2], args[1::2]):
257
+ assert k.startswith("--")
258
+ arg_str += f"{k} {v} "
259
+ return arg_str
260
+
261
+
262
+ def load_config(path: str) -> dict:
263
+ """
264
+ Loads a configuration from a YAML file.
265
+
266
+ Args:
267
+ path (str): The path to the YAML file containing the configuration.
268
+
269
+ Returns:
270
+ dict: A dictionary containing the configuration loaded from the YAML file.
271
+ """
272
+ with open(path, "r") as f:
273
+ return yaml.safe_load(f)
274
+
275
+
276
+ def save_config(config: Union[ConfigDict, dict], path: str) -> None:
277
+ """
278
+ Saves a configuration to a YAML file.
279
+
280
+ Args:
281
+ config (ConfigDict or dict): The configuration to be saved.
282
+ If it is a ConfigDict, it will be converted to a dictionary.
283
+ path (str): The path to the YAML file where the configuration will be saved.
284
+ """
285
+ with open(path, "w") as f:
286
+ if isinstance(config, ConfigDict):
287
+ config = config.to_dict()
288
+ yaml.safe_dump(config, f)
protenix/config/extend_types.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ class DefaultNoneWithType(object):
17
+ def __init__(self, dtype):
18
+ self.dtype = dtype
19
+
20
+
21
+ class ValueMaybeNone(object):
22
+ def __init__(self, value):
23
+ assert value is not None
24
+ self.dtype = type(value)
25
+ self.value = value
26
+
27
+
28
+ class GlobalConfigValue(object):
29
+ def __init__(self, global_key):
30
+ self.global_key = global_key
31
+
32
+
33
+ class RequiredValue(object):
34
+ def __init__(self, dtype):
35
+ self.dtype = dtype
36
+
37
+
38
+ class ListValue(object):
39
+ def __init__(self, value, dtype=None):
40
+ if value is not None:
41
+ self.value = value
42
+ self.dtype = type(value[0])
43
+ else:
44
+ self.value = None
45
+ self.dtype = dtype
46
+
47
+
48
+ def get_bool_value(bool_str: str):
49
+ bool_str_lower = bool_str.lower()
50
+ if bool_str_lower in ("false", "f", "no", "n", "0"):
51
+ return False
52
+ elif bool_str_lower in ("true", "t", "yes", "y", "1"):
53
+ return True
54
+ else:
55
+ raise ValueError(f"Cannot interpret {bool_str} as bool")
protenix/data/__init__.py ADDED
File without changes
protenix/data/ccd.py ADDED
@@ -0,0 +1,450 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ import logging
17
+ import pickle
18
+ from collections import defaultdict
19
+ from pathlib import Path
20
+ from typing import Any, Optional, Union
21
+
22
+ import biotite
23
+ import biotite.structure as struc
24
+ import biotite.structure.io.pdbx as pdbx
25
+ import numpy as np
26
+ from biotite.structure import AtomArray
27
+ from rdkit import Chem
28
+
29
+ from configs.configs_data import data_configs
30
+ from protenix.data.substructure_perms import get_substructure_perms
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ COMPONENTS_FILE = data_configs["ccd_components_file"]
35
+ RKDIT_MOL_PKL = Path(data_configs["ccd_components_rdkit_mol_file"])
36
+
37
+
38
+ @functools.lru_cache
39
+ def biotite_load_ccd_cif() -> pdbx.CIFFile:
40
+ """biotite load CCD components file
41
+
42
+ Returns:
43
+ pdbx.CIFFile: ccd components file
44
+ """
45
+ return pdbx.CIFFile.read(COMPONENTS_FILE)
46
+
47
+
48
+ def _map_central_to_leaving_groups(component) -> Optional[dict[str, list[list[str]]]]:
49
+ """map each central atom (bonded atom) index to leaving atom groups in component (atom_array).
50
+
51
+ Returns:
52
+ dict[str, list[list[str]]]: central atom name to leaving atom groups (atom names).
53
+ """
54
+ comp = component.copy()
55
+ # Eg: ions
56
+ if comp.bonds is None:
57
+ return {}
58
+ central_to_leaving_groups = defaultdict(list)
59
+ for c_idx in np.flatnonzero(~comp.leaving_atom_flag):
60
+ bonds, _ = comp.bonds.get_bonds(c_idx)
61
+ for l_idx in bonds:
62
+ if comp.leaving_atom_flag[l_idx]:
63
+ comp.bonds.remove_bond(c_idx, l_idx)
64
+ group_idx = struc.find_connected(comp.bonds, l_idx)
65
+ if not np.all(comp.leaving_atom_flag[group_idx]):
66
+ return None
67
+ central_to_leaving_groups[comp.atom_name[c_idx]].append(
68
+ comp.atom_name[group_idx].tolist()
69
+ )
70
+ return central_to_leaving_groups
71
+
72
+
73
+ @functools.lru_cache
74
+ def get_component_atom_array(
75
+ ccd_code: str, keep_leaving_atoms: bool = False, keep_hydrogens=False
76
+ ) -> AtomArray:
77
+ """get component atom array
78
+
79
+ Args:
80
+ ccd_code (str): ccd code
81
+ keep_leaving_atoms (bool, optional): keep leaving atoms. Defaults to False.
82
+ keep_hydrogens (bool, optional): keep hydrogens. Defaults to False.
83
+
84
+ Returns:
85
+ AtomArray: Biotite AtomArray of CCD component
86
+ with additional attribute: leaving_atom_flag (bool)
87
+ """
88
+ ccd_cif = biotite_load_ccd_cif()
89
+ if ccd_code not in ccd_cif:
90
+ logger.warning(f"Warning: get_component_atom_array() can not parse {ccd_code}")
91
+ return None
92
+ try:
93
+ comp = pdbx.get_component(ccd_cif, data_block=ccd_code, use_ideal_coord=True)
94
+ except biotite.InvalidFileError as e:
95
+ # Eg: UNL without atom.
96
+ logger.warning(
97
+ f"Warning: get_component_atom_array() can not parse {ccd_code} for {e}"
98
+ )
99
+ return None
100
+ atom_category = ccd_cif[ccd_code]["chem_comp_atom"]
101
+ leaving_atom_flag = atom_category["pdbx_leaving_atom_flag"].as_array()
102
+ comp.set_annotation("leaving_atom_flag", leaving_atom_flag == "Y")
103
+
104
+ for atom_id in ["alt_atom_id", "pdbx_component_atom_id"]:
105
+ comp.set_annotation(atom_id, atom_category[atom_id].as_array())
106
+ if not keep_leaving_atoms:
107
+ comp = comp[~comp.leaving_atom_flag]
108
+ if not keep_hydrogens:
109
+ # EG: ND4
110
+ comp = comp[~np.isin(comp.element, ["H", "D"])]
111
+
112
+ # Map central atom index to leaving group (atom_indices) in component (atom_array).
113
+ comp.central_to_leaving_groups = _map_central_to_leaving_groups(comp)
114
+ if comp.central_to_leaving_groups is None:
115
+ logger.warning(
116
+ f"Warning: ccd {ccd_code} has leaving atom group bond to more than one central atom, central_to_leaving_groups is None."
117
+ )
118
+ return comp
119
+
120
+
121
+ @functools.lru_cache(maxsize=None)
122
+ def get_one_letter_code(ccd_code: str) -> Union[str, None]:
123
+ """get one_letter_code from CCD components file.
124
+
125
+ normal return is one letter: ALA --> A, DT --> T
126
+ unknown protein: X
127
+ unknown DNA or RNA: N
128
+ other unknown: None
129
+ some ccd_code will return more than one letter:
130
+ eg: XXY --> THG
131
+
132
+ Args:
133
+ ccd_code (str): _description_
134
+
135
+ Returns:
136
+ str: one letter code
137
+ """
138
+ ccd_cif = biotite_load_ccd_cif()
139
+ if ccd_code not in ccd_cif:
140
+ return None
141
+ one = ccd_cif[ccd_code]["chem_comp"]["one_letter_code"].as_item()
142
+ if one == "?":
143
+ return None
144
+ else:
145
+ return one
146
+
147
+
148
+ @functools.lru_cache(maxsize=None)
149
+ def get_mol_type(ccd_code: str) -> str:
150
+ """get mol_type from CCD components file.
151
+
152
+ based on _chem_comp.type
153
+ http://mmcif.rcsb.org/dictionaries/mmcif_pdbx_v50.dic/Items/_chem_comp.type.html
154
+
155
+ not use _chem_comp.pdbx_type, because it is not consistent with _chem_comp.type
156
+ e.g. ccd 000 --> _chem_comp.type="NON-POLYMER" _chem_comp.pdbx_type="ATOMP"
157
+ https://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v5_next.dic/Items/_struct_asym.pdbx_type.html
158
+
159
+ Args:
160
+ ccd_code (str): ccd code
161
+
162
+ Returns:
163
+ str: mol_type, one of {"protein", "rna", "dna", "ligand"}
164
+ """
165
+ ccd_cif = biotite_load_ccd_cif()
166
+ if ccd_code not in ccd_cif:
167
+ return "ligand"
168
+
169
+ link_type = ccd_cif[ccd_code]["chem_comp"]["type"].as_item().upper()
170
+
171
+ if "PEPTIDE" in link_type and link_type != "PEPTIDE-LIKE":
172
+ return "protein"
173
+ if "DNA" in link_type:
174
+ return "dna"
175
+ if "RNA" in link_type:
176
+ return "rna"
177
+ return "ligand"
178
+
179
+
180
+ def get_all_ccd_code() -> list:
181
+ """get all ccd code from components file"""
182
+ ccd_cif = biotite_load_ccd_cif()
183
+ return list(ccd_cif.keys())
184
+
185
+
186
+ _ccd_rdkit_mols: dict[str, Chem.Mol] = {}
187
+
188
+
189
+ def get_component_rdkit_mol(ccd_code: str) -> Union[Chem.Mol, None]:
190
+ """get rdkit mol by PDBeCCDUtils
191
+ https://github.com/PDBeurope/ccdutils
192
+
193
+ preprocessing all ccd components in _components_file at first time run.
194
+
195
+ Args:
196
+ ccd_code (str): ccd code
197
+
198
+ Returns
199
+ rdkit.Chem.Mol: rdkit mol with ref coord
200
+ """
201
+ global _ccd_rdkit_mols
202
+ # _ccd_rdkit_mols is not empty
203
+ if _ccd_rdkit_mols:
204
+ return _ccd_rdkit_mols.get(ccd_code, None)
205
+
206
+ rdkit_mol_pkl = RKDIT_MOL_PKL
207
+ if rdkit_mol_pkl.exists():
208
+ with open(rdkit_mol_pkl, "rb") as f:
209
+ _ccd_rdkit_mols = pickle.load(f)
210
+ return _ccd_rdkit_mols.get(ccd_code, None)
211
+ else:
212
+ raise FileNotFoundError(
213
+ f"CCD components file {rdkit_mol_pkl} not found, please download it to your DATA_ROOT_DIR before running."
214
+ "See https://github.com/bytedance/Protenix"
215
+ )
216
+
217
+
218
+ @functools.lru_cache
219
+ def get_ccd_ref_info(ccd_code: str, return_perm: bool = True) -> dict[str, Any]:
220
+ """
221
+ Ref: AlphaFold3 SI Chapter 2.8
222
+ Reference features. Features derived from a residue, nucleotide or ligand’s reference conformer.
223
+ Given an input CCD code or SMILES string, the conformer is typically generated
224
+ with RDKit v.2023_03_3 [25] using ETKDGv3 [26]. On error, we fall back to using the CCD ideal coordinates,
225
+ or finally the representative coordinates
226
+ if they are from before our training date cut-off (2021-09-30 unless otherwise stated).
227
+ At the end, any atom coordinates still missing are set to zeros.
228
+
229
+ Get reference atom mapping and coordinates.
230
+
231
+ Args:
232
+ name (str): CCD name
233
+ return_perm (bool): return atom permutations.
234
+
235
+ Returns:
236
+ Dict:
237
+ ccd: ccd code
238
+ atom_map: atom name to atom index
239
+ coord: atom coordinates
240
+ charge: atom formal charge
241
+ perm: atom permutation
242
+ """
243
+ mol = get_component_rdkit_mol(ccd_code)
244
+ if mol is None:
245
+ return {}
246
+ if mol.GetNumAtoms() == 0: # eg: "UNL"
247
+ logger.warning(
248
+ f"Warning: mol {ccd_code} from get_component_rdkit_mol() has no atoms,"
249
+ "get_ccd_ref_info() return empty dict"
250
+ )
251
+ return {}
252
+ conf = mol.GetConformer(mol.ref_conf_id)
253
+ coord = conf.GetPositions()
254
+ charge = np.array([atom.GetFormalCharge() for atom in mol.GetAtoms()])
255
+
256
+ results = {
257
+ "ccd": ccd_code, # str
258
+ "atom_map": mol.atom_map, # dict[str,int]: atom name to atom index
259
+ "coord": coord, # np.ndarray[float]: atom coordinates, shape:(n_atom,3)
260
+ "mask": mol.ref_mask, # np.ndarray[bool]: atom mask, shape:(n_atom,)
261
+ "charge": charge, # np.ndarray[int]: atom formal charge, shape:(n_atom,)
262
+ }
263
+
264
+ if return_perm:
265
+ try:
266
+ Chem.SanitizeMol(mol)
267
+ perm = get_substructure_perms(mol, MaxMatches=1000)
268
+
269
+ except:
270
+ # Sanitize failed, permutation is unavailable
271
+ perm = np.array(
272
+ [
273
+ [
274
+ i
275
+ for i, atom in enumerate(mol.GetAtoms())
276
+ if atom.GetAtomicNum() != 1
277
+ ]
278
+ ]
279
+ )
280
+ # np.ndarray[int]: atom permutation, shape:(n_atom_wo_h, n_perm)
281
+ results["perm"] = perm.T
282
+
283
+ return results
284
+
285
+
286
+ # Modified from biotite to use consistent ccd components file
287
+ def _connect_inter_residue(
288
+ atoms: AtomArray, residue_starts: np.ndarray
289
+ ) -> struc.BondList:
290
+ """
291
+ Create a :class:`BondList` containing the bonds between adjacent
292
+ amino acid or nucleotide residues.
293
+
294
+ Parameters
295
+ ----------
296
+ atoms : AtomArray or AtomArrayStack
297
+ The structure to create the :class:`BondList` for.
298
+ residue_starts : ndarray, dtype=int
299
+ Return value of
300
+ ``get_residue_starts(atoms, add_exclusive_stop=True)``.
301
+
302
+ Returns
303
+ -------
304
+ BondList
305
+ A bond list containing all inter residue bonds.
306
+ """
307
+
308
+ bonds = []
309
+
310
+ atom_names = atoms.atom_name
311
+ res_names = atoms.res_name
312
+ res_ids = atoms.res_id
313
+ chain_ids = atoms.chain_id
314
+
315
+ # Iterate over all starts excluding:
316
+ # - the last residue and
317
+ # - exclusive end index of 'atoms'
318
+ for i in range(len(residue_starts) - 2):
319
+ curr_start_i = residue_starts[i]
320
+ next_start_i = residue_starts[i + 1]
321
+ after_next_start_i = residue_starts[i + 2]
322
+
323
+ # Check if the current and next residue is in the same chain
324
+ if chain_ids[next_start_i] != chain_ids[curr_start_i]:
325
+ continue
326
+ # Check if the current and next residue
327
+ # have consecutive residue IDs
328
+ # (Same residue ID is also possible if insertion code is used)
329
+ if res_ids[next_start_i] - res_ids[curr_start_i] > 1:
330
+ continue
331
+
332
+ # Get link type for this residue from RCSB components.cif
333
+ curr_link = get_mol_type(res_names[curr_start_i])
334
+ next_link = get_mol_type(res_names[next_start_i])
335
+
336
+ if curr_link == "protein" and next_link in "protein":
337
+ curr_connect_atom_name = "C"
338
+ next_connect_atom_name = "N"
339
+ elif curr_link in ["dna", "rna"] and next_link in ["dna", "rna"]:
340
+ curr_connect_atom_name = "O3'"
341
+ next_connect_atom_name = "P"
342
+ else:
343
+ # Create no bond if the connection types of consecutive
344
+ # residues are not compatible
345
+ continue
346
+
347
+ # Index in atom array for atom name in current residue
348
+ # Addition of 'curr_start_i' is necessary, as only a slice of
349
+ # 'atom_names' is taken, beginning at 'curr_start_i'
350
+ curr_connect_indices = np.where(
351
+ atom_names[curr_start_i:next_start_i] == curr_connect_atom_name
352
+ )[0]
353
+ curr_connect_indices += curr_start_i
354
+
355
+ # Index in atom array for atom name in next residue
356
+ next_connect_indices = np.where(
357
+ atom_names[next_start_i:after_next_start_i] == next_connect_atom_name
358
+ )[0]
359
+ next_connect_indices += next_start_i
360
+
361
+ if len(curr_connect_indices) == 0 or len(next_connect_indices) == 0:
362
+ # The connector atoms are not found in the adjacent residues
363
+ # -> skip this bond
364
+ continue
365
+
366
+ bonds.append(
367
+ (curr_connect_indices[0], next_connect_indices[0], struc.BondType.SINGLE)
368
+ )
369
+
370
+ return struc.BondList(atoms.array_length(), np.array(bonds, dtype=np.uint32))
371
+
372
+
373
+ def add_inter_residue_bonds(
374
+ atom_array: AtomArray,
375
+ exclude_struct_conn_pairs: bool = False,
376
+ remove_far_inter_chain_pairs: bool = False,
377
+ ) -> AtomArray:
378
+ """
379
+ add polymer bonds (C-N or O3'-P) between adjacent residues based on auth_seq_id.
380
+
381
+ exclude_struct_conn_pairs: if True, do not add bond between adjacent residues already has non-standard polymer bonds
382
+ on atom C or N or O3' or P.
383
+
384
+ remove_far_inter_chain_pairs: if True, remove inter chain (based on label_asym_id) bonds that are far away from each other.
385
+
386
+ returns:
387
+ AtomArray: Biotite AtomArray merged inter residue bonds into atom_array.bonds
388
+ """
389
+ res_starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
390
+ inter_bonds = _connect_inter_residue(atom_array, res_starts)
391
+
392
+ if atom_array.bonds is None:
393
+ atom_array.bonds = inter_bonds
394
+ return atom_array
395
+
396
+ select_mask = np.ones(len(inter_bonds._bonds), dtype=bool)
397
+ if exclude_struct_conn_pairs:
398
+ for b_idx, (atom_i, atom_j, b_type) in enumerate(inter_bonds._bonds):
399
+ atom_k = atom_i if atom_array.atom_name[atom_i] in ("N", "O3'") else atom_j
400
+ bonds, types = atom_array.bonds.get_bonds(atom_k)
401
+ if len(bonds) == 0:
402
+ continue
403
+ for b in bonds:
404
+ if (
405
+ # adjacent residues
406
+ abs((res_starts <= b).sum() - (res_starts <= atom_k).sum()) == 1
407
+ and atom_array.chain_id[b] == atom_array.chain_id[atom_k]
408
+ and atom_array.atom_name[b] not in ("C", "P")
409
+ ):
410
+ select_mask[b_idx] = False
411
+ break
412
+
413
+ if remove_far_inter_chain_pairs:
414
+ if not hasattr(atom_array, "label_asym_id"):
415
+ logging.warning(
416
+ "label_asym_id not found, far inter chain bonds will not be removed"
417
+ )
418
+ for b_idx, (atom_i, atom_j, b_type) in enumerate(inter_bonds._bonds):
419
+ if atom_array.label_asym_id[atom_i] != atom_array.label_asym_id[atom_j]:
420
+ coord_i = atom_array.coord[atom_i]
421
+ coord_j = atom_array.coord[atom_j]
422
+ if np.linalg.norm(coord_i - coord_j) > 2.5:
423
+ select_mask[b_idx] = False
424
+
425
+ # filter out removed_inter_bonds from atom_array.bonds
426
+ remove_bonds = inter_bonds._bonds[~select_mask]
427
+ remove_mask = np.isin(atom_array.bonds._bonds[:, 0], remove_bonds[:, 0]) & np.isin(
428
+ atom_array.bonds._bonds[:, 1], remove_bonds[:, 1]
429
+ )
430
+ atom_array.bonds._bonds = atom_array.bonds._bonds[~remove_mask]
431
+
432
+ # merged normal inter_bonds into atom_array.bonds
433
+ inter_bonds._bonds = inter_bonds._bonds[select_mask]
434
+ atom_array.bonds = atom_array.bonds.merge(inter_bonds)
435
+ return atom_array
436
+
437
+
438
+ def res_names_to_sequence(res_names: list[str]) -> str:
439
+ """convert res_names to sequences {chain_id: canonical_sequence} based on CCD
440
+
441
+ Return
442
+ str: canonical_sequence
443
+ """
444
+ seq = ""
445
+ for res_name in res_names:
446
+ one = get_one_letter_code(res_name)
447
+ one = "X" if one is None else one
448
+ one = "X" if len(one) > 1 else one
449
+ seq += one
450
+ return seq
protenix/data/compute_esm.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import os
17
+
18
+ import pandas as pd
19
+ import torch
20
+ from esm import FastaBatchedDataset, pretrained
21
+ from tqdm.auto import tqdm
22
+
23
+ ESM_CONFIG = {
24
+ "esm2-3b": {
25
+ "type": "esm2",
26
+ "model_path": "esm2_t36_3B_UR50D.pt",
27
+ "emb_dim": 2560,
28
+ "n_layers": 36,
29
+ },
30
+ "esm2-3b-ism": {
31
+ "type": "esm2",
32
+ "model_path": "esm2_t36_3B_UR50D_ism.pt",
33
+ "emb_dim": 2560,
34
+ "n_layers": 36,
35
+ }, # https://www.biorxiv.org/content/10.1101/2024.11.08.622579v2
36
+ }
37
+
38
+
39
+ def _load_esm2_model(model_path):
40
+ if os.path.exists(model_path):
41
+ model, alphabet = pretrained.load_model_and_alphabet_local(model_path)
42
+ else:
43
+ model, alphabet = pretrained.load_model_and_alphabet(
44
+ os.path.splitext(os.path.basename(model_path))[0]
45
+ )
46
+ return model, alphabet
47
+
48
+
49
+ def load_esm_model(model_name, local_esm_dir="release_data/checkpoint"):
50
+ local_model_path = os.path.join(local_esm_dir, ESM_CONFIG[model_name]["model_path"])
51
+ if os.path.exists(local_model_path):
52
+ print("Try to load ESM language model from ", local_model_path)
53
+
54
+ if "ism" in model_name and not os.path.exists(local_model_path):
55
+ raise RuntimeError(
56
+ f"esm2-3b-ism model: {local_model_path} does not exist \n"
57
+ + "this model can not be download from fair-esm, \n"
58
+ + "download it from https://af3-dev.tos-cn-beijing.volces.com/release_model/esm2_t36_3B_UR50D_ism.pt"
59
+ )
60
+ if model_name.startswith("esm2"):
61
+ model, alphabet = _load_esm2_model(local_model_path)
62
+ model.eval()
63
+ if torch.cuda.is_available():
64
+ model = model.cuda()
65
+
66
+ return model, alphabet
67
+
68
+
69
+ def _check_files_exist(save_dir, labels):
70
+ return all(
71
+ [os.path.exists(os.path.join(save_dir, label + ".pt")) for label in labels]
72
+ )
73
+
74
+
75
+ def compute_ESM_embeddings(
76
+ model_name,
77
+ model,
78
+ alphabet,
79
+ labels,
80
+ sequences,
81
+ save_dir,
82
+ toks_per_batch=4096,
83
+ truncation_seq_length=1022,
84
+ ):
85
+ if model_name.startswith("esm2"):
86
+ embeddings = compute_esm2_embeddings(
87
+ model,
88
+ alphabet,
89
+ labels,
90
+ sequences,
91
+ save_dir,
92
+ toks_per_batch,
93
+ truncation_seq_length,
94
+ )
95
+ return embeddings
96
+
97
+
98
+ # Adapt from Corso, Gabriele, et al. "Diffdock: Diffusion steps, twists, and turns for molecular docking."
99
+ # URL: https://github.com/gcorso/DiffDock/blob/main/utils/inference_utils.py
100
+ def compute_esm2_embeddings(
101
+ model,
102
+ alphabet,
103
+ labels,
104
+ sequences,
105
+ save_dir,
106
+ toks_per_batch=4096,
107
+ truncation_seq_length=1022,
108
+ ):
109
+ dataset = FastaBatchedDataset(labels, sequences)
110
+ batches = dataset.get_batch_indices(toks_per_batch, extra_toks_per_seq=1)
111
+ data_loader = torch.utils.data.DataLoader(
112
+ dataset,
113
+ collate_fn=alphabet.get_batch_converter(truncation_seq_length),
114
+ batch_sampler=batches,
115
+ )
116
+ repr_layer = model.num_layers
117
+ embeddings = {}
118
+ with torch.no_grad():
119
+ for batch_idx, (labels, strs, toks) in enumerate(tqdm(data_loader)):
120
+ print(
121
+ f"Processing {batch_idx + 1} of {len(batches)} batches ({toks.size(0)} sequences)"
122
+ )
123
+ if _check_files_exist(save_dir, labels):
124
+ continue
125
+ if torch.cuda.is_available():
126
+ toks = toks.to(device="cuda", non_blocking=True)
127
+ out = model(toks, repr_layers=[repr_layer], return_contacts=False)
128
+ representation = out["representations"][repr_layer].to(device="cpu")
129
+ for i, label in enumerate(labels):
130
+ truncate_len = min(truncation_seq_length, len(strs[i]))
131
+ embeddings[label] = representation[i, 1 : truncate_len + 1].clone()
132
+ save_path = os.path.join(save_dir, label + ".pt")
133
+ torch.save(embeddings[label], save_path)
134
+ return embeddings
135
+
136
+
137
+ def pdb_sequences_iterator(
138
+ input_path="./scripts/msa/data/pdb_seqs/pdb_seq.csv",
139
+ save_path="./scripts/msa/data/pdb_seqs/pdb_labels_seqs.csv",
140
+ start_id=0,
141
+ end_id=-1,
142
+ ):
143
+ if os.path.exists(save_path):
144
+ df_seq = pd.read_csv(save_path)
145
+ else:
146
+ df = pd.read_csv(input_path)
147
+ # Protein only
148
+ df = df[df["mol_type"] == "protein"]
149
+ # Sequence name
150
+ df["pdb_entity_id"] = df["pdb_id"] + "_" + df["entity_id"].astype(str)
151
+ # Group by 'seq'
152
+ df_seq = (
153
+ df.groupby("seq")["pdb_entity_id"]
154
+ .apply(lambda x: ",".join(x))
155
+ .reset_index()
156
+ )
157
+ # Use the first pdb_entity_id as the label
158
+ df_seq["seq_label"] = df_seq["pdb_entity_id"].apply(lambda x: x.split(",")[0])
159
+ assert df_seq["seq_label"].nunique() == len(df_seq)
160
+ # Get a part id
161
+ df_seq["part_id"] = df_seq["pdb_entity_id"].apply(lambda x: x[1:3])
162
+ df_seq.to_csv(save_path)
163
+
164
+ if end_id == -1:
165
+ end_id = len(df_seq)
166
+ df_seq = df_seq[start_id:end_id]
167
+
168
+ part_counts = dict(df_seq["part_id"].value_counts())
169
+ for part_id, count in part_counts.items():
170
+ df_part = df_seq[df_seq["part_id"] == part_id]
171
+ print(f"Part {part_id}: {len(df_part)} sequences.")
172
+ yield part_id, df_part["seq_label"].tolist(), df_part["seq"].tolist()
173
+
174
+
175
+ def process_pdb_dataset(
176
+ model_name, root_save_dir, pdb_seq_path, pdb_seq_label_path, start_id=0, end_id=-1
177
+ ):
178
+
179
+ model, alphabet = load_esm_model(model_name)
180
+ seq_iterator = pdb_sequences_iterator(
181
+ pdb_seq_path, pdb_seq_label_path, start_id, end_id
182
+ )
183
+ error_parts = []
184
+ for part_id, labels, sequences in seq_iterator:
185
+ save_dir = os.path.join(root_save_dir, f"{part_id}")
186
+
187
+ if not os.path.exists(save_dir):
188
+ os.makedirs(save_dir)
189
+ print(f"[{part_id}] Generating ESM language model embeddings")
190
+ lm_embeddings = compute_ESM_embeddings(
191
+ model_name,
192
+ model,
193
+ alphabet,
194
+ labels,
195
+ sequences,
196
+ save_dir,
197
+ truncation_seq_length=4094,
198
+ toks_per_batch=16384,
199
+ )
200
+ print(f"[{part_id}] Processed {len(lm_embeddings)} sequences in total. Done!")
201
+
202
+ print("Error parts: ", error_parts)
203
+
204
+
205
+ def main():
206
+ parser = argparse.ArgumentParser()
207
+ parser.add_argument("--model_name", type=str, choices=list(ESM_CONFIG.keys()))
208
+ parser.add_argument("--start_id", type=int, default=0)
209
+ parser.add_argument("--end_id", type=int, default=-1)
210
+ args = parser.parse_args()
211
+
212
+ save_dir = f"./esm_embeddings/{args.model_name}"
213
+ pdb_seq_path = "./scripts/msa/data/pdb_seqs/pdb_seq.csv"
214
+ pdb_seq_label_path = "./scripts/msa/data/pdb_seqs/pdb_labels_seqs.csv"
215
+
216
+ if not os.path.exists(save_dir):
217
+ print("Make dir: ", save_dir)
218
+ os.makedirs(save_dir)
219
+ process_pdb_dataset(
220
+ args.model_name,
221
+ save_dir,
222
+ pdb_seq_path,
223
+ pdb_seq_label_path,
224
+ args.start_id,
225
+ args.end_id,
226
+ )
227
+
228
+
229
+ if __name__ == "__main__":
230
+ main()
protenix/data/constants.py ADDED
@@ -0,0 +1,977 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from rdkit.Chem import GetPeriodicTable
16
+
17
+ EvaluationChainInterface = [
18
+ "intra_ligand",
19
+ "intra_dna",
20
+ "intra_rna",
21
+ "intra_prot",
22
+ "ligand_prot",
23
+ "rna_prot",
24
+ "dna_prot",
25
+ "prot_prot",
26
+ "antibody_antigen",
27
+ "antibody",
28
+ ]
29
+
30
+ EntityPolyTypeDict = {
31
+ "nuc": [
32
+ "peptide nucleic acid",
33
+ "polydeoxyribonucleotide",
34
+ "polydeoxyribonucleotide/polyribonucleotide hybrid",
35
+ "polyribonucleotide",
36
+ ],
37
+ "protein": ["polypeptide(D)", "polypeptide(L)"],
38
+ "ligand": ["cyclic-pseudo-peptide", "other"],
39
+ }
40
+
41
+ CRYSTALLIZATION_METHODS = {
42
+ "X-RAY DIFFRACTION",
43
+ "NEUTRON DIFFRACTION",
44
+ "ELECTRON CRYSTALLOGRAPHY",
45
+ "POWDER CRYSTALLOGRAPHY",
46
+ "FIBER DIFFRACTION",
47
+ }
48
+
49
+ ### Protein Constants ###
50
+ # https://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v40.dic/Items/_entity_poly.pdbx_seq_one_letter_code_can.html
51
+
52
+ mmcif_restype_1to3 = {
53
+ "A": "ALA",
54
+ "R": "ARG",
55
+ "N": "ASN",
56
+ "D": "ASP",
57
+ "C": "CYS",
58
+ "Q": "GLN",
59
+ "E": "GLU",
60
+ "G": "GLY",
61
+ "H": "HIS",
62
+ "I": "ILE",
63
+ "L": "LEU",
64
+ "K": "LYS",
65
+ "M": "MET",
66
+ "F": "PHE",
67
+ "P": "PRO",
68
+ "S": "SER",
69
+ "T": "THR",
70
+ "W": "TRP",
71
+ "Y": "TYR",
72
+ "V": "VAL",
73
+ "B": "ASX", # additional
74
+ "Z": "GLX", # additional
75
+ # "X": "UNK",
76
+ }
77
+
78
+ mmcif_restype_3to1 = {v: k for k, v in mmcif_restype_1to3.items()}
79
+
80
+ """
81
+ vdw table from rdkit; indices match those of the ligand atom_types.
82
+ https://github.com/rdkit/rdkit/blob/master/Code/GraphMol/atomic_data.cpp#L46
83
+ """
84
+
85
+ rdkit_vdws = [
86
+ 1.2,
87
+ 1.4,
88
+ 2.2,
89
+ 1.9,
90
+ 1.8,
91
+ 1.7,
92
+ 1.6,
93
+ 1.55,
94
+ 1.5,
95
+ 1.54,
96
+ 2.4,
97
+ 2.2,
98
+ 2.1,
99
+ 2.1,
100
+ 1.95,
101
+ 1.8,
102
+ 1.8,
103
+ 1.88,
104
+ 2.8,
105
+ 2.4,
106
+ 2.3,
107
+ 2.15,
108
+ 2.05,
109
+ 2.05,
110
+ 2.05,
111
+ 2.05,
112
+ 2.0,
113
+ 2.0,
114
+ 2.0,
115
+ 2.1,
116
+ 2.1,
117
+ 2.1,
118
+ 2.05,
119
+ 1.9,
120
+ 1.9,
121
+ 2.02,
122
+ 2.9,
123
+ 2.55,
124
+ 2.4,
125
+ 2.3,
126
+ 2.15,
127
+ 2.1,
128
+ 2.05,
129
+ 2.05,
130
+ 2.0,
131
+ 2.05,
132
+ 2.1,
133
+ 2.2,
134
+ 2.2,
135
+ 2.25,
136
+ 2.2,
137
+ 2.1,
138
+ 2.1,
139
+ 2.16,
140
+ 3.0,
141
+ 2.7,
142
+ 2.5,
143
+ 2.48,
144
+ 2.47,
145
+ 2.45,
146
+ 2.43,
147
+ 2.42,
148
+ 2.4,
149
+ 2.38,
150
+ 2.37,
151
+ 2.35,
152
+ 2.33,
153
+ 2.32,
154
+ 2.3,
155
+ 2.28,
156
+ 2.27,
157
+ 2.25,
158
+ 2.2,
159
+ 2.1,
160
+ 2.05,
161
+ 2.0,
162
+ 2.0,
163
+ 2.05,
164
+ 2.1,
165
+ 2.05,
166
+ 2.2,
167
+ 2.3,
168
+ 2.3,
169
+ 2.0,
170
+ 2.0,
171
+ 2.0,
172
+ 2.0,
173
+ 2.0,
174
+ 2.0,
175
+ 2.4,
176
+ 2.0,
177
+ 2.3,
178
+ 2.0,
179
+ 2.0,
180
+ 2.0,
181
+ 2.0,
182
+ 2.0,
183
+ 2.0,
184
+ 2.0,
185
+ 2.0,
186
+ 2.0,
187
+ 2.0,
188
+ 2.0,
189
+ 2.0,
190
+ 2.0,
191
+ 2.0,
192
+ 2.0,
193
+ 2.0,
194
+ 2.0,
195
+ 2.0,
196
+ 2.0,
197
+ 2.0,
198
+ 2.0,
199
+ 2.0,
200
+ 2.0,
201
+ 2.0,
202
+ 2.0,
203
+ 2.0,
204
+ ]
205
+
206
+
207
+ """
208
+ atom37 vdw table. Orders match atom37 indices. Note: the vdw's for N and O are different from rdkit_van_der_waals in this file.
209
+ We used the rdkit values for consistency.
210
+ Ref to https://github.com/aqlaboratory/openfold/blob/80c85b54e1a81d9a66df3f1b6c257ff97f10acd3/openfold/utils/loss.py#L1208C5-L1211C6
211
+ rdkit_van_der_waals_radius = {
212
+ "C": 1.7,
213
+ "N": 1.6,
214
+ "O": 1.55,
215
+ "S": 1.8,
216
+ }
217
+
218
+ atom37_vdw = [
219
+ rdkit_van_der_waals_radius[name[0]]
220
+ for name in residue_constants.atom_types
221
+ ]
222
+
223
+ """
224
+ atom37_vdw = [
225
+ 1.55,
226
+ 1.7,
227
+ 1.7,
228
+ 1.7,
229
+ 1.52,
230
+ 1.7,
231
+ 1.7,
232
+ 1.7,
233
+ 1.52,
234
+ 1.52,
235
+ 1.8,
236
+ 1.7,
237
+ 1.7,
238
+ 1.7,
239
+ 1.55,
240
+ 1.55,
241
+ 1.52,
242
+ 1.52,
243
+ 1.8,
244
+ 1.7,
245
+ 1.7,
246
+ 1.7,
247
+ 1.7,
248
+ 1.55,
249
+ 1.55,
250
+ 1.55,
251
+ 1.52,
252
+ 1.52,
253
+ 1.7,
254
+ 1.55,
255
+ 1.55,
256
+ 1.52,
257
+ 1.7,
258
+ 1.7,
259
+ 1.7,
260
+ 1.55,
261
+ 1.52,
262
+ ]
263
+
264
+
265
+ # Standard residues (AlphaFold3 SI Talbe 13)
266
+ PRO_STD_RESIDUES = {
267
+ "ALA": 0,
268
+ "ARG": 1,
269
+ "ASN": 2,
270
+ "ASP": 3,
271
+ "CYS": 4,
272
+ "GLN": 5,
273
+ "GLU": 6,
274
+ "GLY": 7,
275
+ "HIS": 8,
276
+ "ILE": 9,
277
+ "LEU": 10,
278
+ "LYS": 11,
279
+ "MET": 12,
280
+ "PHE": 13,
281
+ "PRO": 14,
282
+ "SER": 15,
283
+ "THR": 16,
284
+ "TRP": 17,
285
+ "TYR": 18,
286
+ "VAL": 19,
287
+ "UNK": 20,
288
+ }
289
+
290
+ RNA_STD_RESIDUES = {
291
+ "A": 21,
292
+ "G": 22,
293
+ "C": 23,
294
+ "U": 24,
295
+ "N": 25,
296
+ }
297
+
298
+ DNA_STD_RESIDUES = {
299
+ "DA": 26,
300
+ "DG": 27,
301
+ "DC": 28,
302
+ "DT": 29,
303
+ "DN": 30,
304
+ }
305
+
306
+ GAP = {"-": 31}
307
+ STD_RESIDUES = PRO_STD_RESIDUES | RNA_STD_RESIDUES | DNA_STD_RESIDUES
308
+ STD_RESIDUES_WITH_GAP = STD_RESIDUES | GAP
309
+ STD_RESIDUES_WITH_GAP_ID_TO_NAME = {
310
+ idx: res_type for res_type, idx in STD_RESIDUES_WITH_GAP.items()
311
+ }
312
+
313
+ rna_order_with_x = {
314
+ "A": 0,
315
+ "G": 1,
316
+ "C": 2,
317
+ "U": 3,
318
+ "N": 4,
319
+ }
320
+
321
+ RNA_NT_TO_ID = {
322
+ "A": 0,
323
+ "G": 1,
324
+ "C": 2,
325
+ "U": 3,
326
+ "N": 4,
327
+ "R": 4, # A or G
328
+ "Y": 4, # C or U
329
+ "S": 4, # G or C
330
+ "W": 4, # A or U
331
+ "K": 4, # G or U
332
+ "M": 4, # A or C
333
+ "B": 4, # C, G, U
334
+ "D": 4, # A, G, U
335
+ "H": 4, # A, C, U
336
+ "V": 4, # A, C, G
337
+ "X": 4,
338
+ "I": 4,
339
+ "T": 4,
340
+ "-": 5,
341
+ }
342
+
343
+ # Partial inversion of RNA_NT_TO_ID
344
+ RNA_ID_TO_NT = {
345
+ 0: "A",
346
+ 1: "G",
347
+ 2: "C",
348
+ 3: "U",
349
+ 4: "N", # Also R, Y, S, W, K, M, B, D, H
350
+ 5: "-",
351
+ }
352
+
353
+
354
+ def get_all_elems():
355
+ """
356
+ Retrieve a list of all element symbols from the RDKit periodic table up to a specified cutoff.
357
+
358
+ Returns:
359
+ list: A list of element symbols strings.
360
+ """
361
+ elem_list = []
362
+ pt = GetPeriodicTable()
363
+ for i in range(1, 119):
364
+ elem_list.append(pt.GetElementSymbol(i).upper())
365
+
366
+ # 128 elements in the AlphaFold3 SI Table 5 ref_element
367
+ elem_list += [f"UNK_ELEM_{i}" for i in range(119, 129)]
368
+ return elem_list
369
+
370
+
371
+ # len(STD_RESIDUES) + Atomic number up to 118 + 10 UNK_ELEM
372
+ ELEMS = dict([(i, len(STD_RESIDUES) + idx) for idx, i in enumerate(get_all_elems())])
373
+
374
+ RES_ATOMS_DICT = {
375
+ "ALA": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "OXT": 5},
376
+ "ARG": {
377
+ "N": 0,
378
+ "CA": 1,
379
+ "C": 2,
380
+ "O": 3,
381
+ "CB": 4,
382
+ "CG": 5,
383
+ "CD": 6,
384
+ "NE": 7,
385
+ "CZ": 8,
386
+ "NH1": 9,
387
+ "NH2": 10,
388
+ "OXT": 11,
389
+ },
390
+ "ASN": {
391
+ "N": 0,
392
+ "CA": 1,
393
+ "C": 2,
394
+ "O": 3,
395
+ "CB": 4,
396
+ "CG": 5,
397
+ "OD1": 6,
398
+ "ND2": 7,
399
+ "OXT": 8,
400
+ },
401
+ "ASP": {
402
+ "N": 0,
403
+ "CA": 1,
404
+ "C": 2,
405
+ "O": 3,
406
+ "CB": 4,
407
+ "CG": 5,
408
+ "OD1": 6,
409
+ "OD2": 7,
410
+ "OXT": 8,
411
+ },
412
+ "CYS": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "SG": 5, "OXT": 6},
413
+ "GLN": {
414
+ "N": 0,
415
+ "CA": 1,
416
+ "C": 2,
417
+ "O": 3,
418
+ "CB": 4,
419
+ "CG": 5,
420
+ "CD": 6,
421
+ "OE1": 7,
422
+ "NE2": 8,
423
+ "OXT": 9,
424
+ },
425
+ "GLU": {
426
+ "N": 0,
427
+ "CA": 1,
428
+ "C": 2,
429
+ "O": 3,
430
+ "CB": 4,
431
+ "CG": 5,
432
+ "CD": 6,
433
+ "OE1": 7,
434
+ "OE2": 8,
435
+ "OXT": 9,
436
+ },
437
+ "GLY": {"N": 0, "CA": 1, "C": 2, "O": 3, "OXT": 4},
438
+ "HIS": {
439
+ "N": 0,
440
+ "CA": 1,
441
+ "C": 2,
442
+ "O": 3,
443
+ "CB": 4,
444
+ "CG": 5,
445
+ "ND1": 6,
446
+ "CD2": 7,
447
+ "CE1": 8,
448
+ "NE2": 9,
449
+ "OXT": 10,
450
+ },
451
+ "ILE": {
452
+ "N": 0,
453
+ "CA": 1,
454
+ "C": 2,
455
+ "O": 3,
456
+ "CB": 4,
457
+ "CG1": 5,
458
+ "CG2": 6,
459
+ "CD1": 7,
460
+ "OXT": 8,
461
+ },
462
+ "LEU": {
463
+ "N": 0,
464
+ "CA": 1,
465
+ "C": 2,
466
+ "O": 3,
467
+ "CB": 4,
468
+ "CG": 5,
469
+ "CD1": 6,
470
+ "CD2": 7,
471
+ "OXT": 8,
472
+ },
473
+ "LYS": {
474
+ "N": 0,
475
+ "CA": 1,
476
+ "C": 2,
477
+ "O": 3,
478
+ "CB": 4,
479
+ "CG": 5,
480
+ "CD": 6,
481
+ "CE": 7,
482
+ "NZ": 8,
483
+ "OXT": 9,
484
+ },
485
+ "MET": {
486
+ "N": 0,
487
+ "CA": 1,
488
+ "C": 2,
489
+ "O": 3,
490
+ "CB": 4,
491
+ "CG": 5,
492
+ "SD": 6,
493
+ "CE": 7,
494
+ "OXT": 8,
495
+ },
496
+ "PHE": {
497
+ "N": 0,
498
+ "CA": 1,
499
+ "C": 2,
500
+ "O": 3,
501
+ "CB": 4,
502
+ "CG": 5,
503
+ "CD1": 6,
504
+ "CD2": 7,
505
+ "CE1": 8,
506
+ "CE2": 9,
507
+ "CZ": 10,
508
+ "OXT": 11,
509
+ },
510
+ "PRO": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "CG": 5, "CD": 6, "OXT": 7},
511
+ "SER": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "OG": 5, "OXT": 6},
512
+ "THR": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "OG1": 5, "CG2": 6, "OXT": 7},
513
+ "TRP": {
514
+ "N": 0,
515
+ "CA": 1,
516
+ "C": 2,
517
+ "O": 3,
518
+ "CB": 4,
519
+ "CG": 5,
520
+ "CD1": 6,
521
+ "CD2": 7,
522
+ "NE1": 8,
523
+ "CE2": 9,
524
+ "CE3": 10,
525
+ "CZ2": 11,
526
+ "CZ3": 12,
527
+ "CH2": 13,
528
+ "OXT": 14,
529
+ },
530
+ "TYR": {
531
+ "N": 0,
532
+ "CA": 1,
533
+ "C": 2,
534
+ "O": 3,
535
+ "CB": 4,
536
+ "CG": 5,
537
+ "CD1": 6,
538
+ "CD2": 7,
539
+ "CE1": 8,
540
+ "CE2": 9,
541
+ "CZ": 10,
542
+ "OH": 11,
543
+ "OXT": 12,
544
+ },
545
+ "VAL": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "CG1": 5, "CG2": 6, "OXT": 7},
546
+ "UNK": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "CG": 5, "OXT": 6},
547
+ "DA": {
548
+ "OP3": 0,
549
+ "P": 1,
550
+ "OP1": 2,
551
+ "OP2": 3,
552
+ "O5'": 4,
553
+ "C5'": 5,
554
+ "C4'": 6,
555
+ "O4'": 7,
556
+ "C3'": 8,
557
+ "O3'": 9,
558
+ "C2'": 10,
559
+ "C1'": 11,
560
+ "N9": 12,
561
+ "C8": 13,
562
+ "N7": 14,
563
+ "C5": 15,
564
+ "C6": 16,
565
+ "N6": 17,
566
+ "N1": 18,
567
+ "C2": 19,
568
+ "N3": 20,
569
+ "C4": 21,
570
+ },
571
+ "DC": {
572
+ "OP3": 0,
573
+ "P": 1,
574
+ "OP1": 2,
575
+ "OP2": 3,
576
+ "O5'": 4,
577
+ "C5'": 5,
578
+ "C4'": 6,
579
+ "O4'": 7,
580
+ "C3'": 8,
581
+ "O3'": 9,
582
+ "C2'": 10,
583
+ "C1'": 11,
584
+ "N1": 12,
585
+ "C2": 13,
586
+ "O2": 14,
587
+ "N3": 15,
588
+ "C4": 16,
589
+ "N4": 17,
590
+ "C5": 18,
591
+ "C6": 19,
592
+ },
593
+ "DG": {
594
+ "OP3": 0,
595
+ "P": 1,
596
+ "OP1": 2,
597
+ "OP2": 3,
598
+ "O5'": 4,
599
+ "C5'": 5,
600
+ "C4'": 6,
601
+ "O4'": 7,
602
+ "C3'": 8,
603
+ "O3'": 9,
604
+ "C2'": 10,
605
+ "C1'": 11,
606
+ "N9": 12,
607
+ "C8": 13,
608
+ "N7": 14,
609
+ "C5": 15,
610
+ "C6": 16,
611
+ "O6": 17,
612
+ "N1": 18,
613
+ "C2": 19,
614
+ "N2": 20,
615
+ "N3": 21,
616
+ "C4": 22,
617
+ },
618
+ "DT": {
619
+ "OP3": 0,
620
+ "P": 1,
621
+ "OP1": 2,
622
+ "OP2": 3,
623
+ "O5'": 4,
624
+ "C5'": 5,
625
+ "C4'": 6,
626
+ "O4'": 7,
627
+ "C3'": 8,
628
+ "O3'": 9,
629
+ "C2'": 10,
630
+ "C1'": 11,
631
+ "N1": 12,
632
+ "C2": 13,
633
+ "O2": 14,
634
+ "N3": 15,
635
+ "C4": 16,
636
+ "O4": 17,
637
+ "C5": 18,
638
+ "C7": 19,
639
+ "C6": 20,
640
+ },
641
+ "DN": {
642
+ "OP3": 0,
643
+ "P": 1,
644
+ "OP1": 2,
645
+ "OP2": 3,
646
+ "O5'": 4,
647
+ "C5'": 5,
648
+ "C4'": 6,
649
+ "O4'": 7,
650
+ "C3'": 8,
651
+ "O3'": 9,
652
+ "C2'": 10,
653
+ "C1'": 11,
654
+ },
655
+ "A": {
656
+ "OP3": 0,
657
+ "P": 1,
658
+ "OP1": 2,
659
+ "OP2": 3,
660
+ "O5'": 4,
661
+ "C5'": 5,
662
+ "C4'": 6,
663
+ "O4'": 7,
664
+ "C3'": 8,
665
+ "O3'": 9,
666
+ "C2'": 10,
667
+ "O2'": 11,
668
+ "C1'": 12,
669
+ "N9": 13,
670
+ "C8": 14,
671
+ "N7": 15,
672
+ "C5": 16,
673
+ "C6": 17,
674
+ "N6": 18,
675
+ "N1": 19,
676
+ "C2": 20,
677
+ "N3": 21,
678
+ "C4": 22,
679
+ },
680
+ "C": {
681
+ "OP3": 0,
682
+ "P": 1,
683
+ "OP1": 2,
684
+ "OP2": 3,
685
+ "O5'": 4,
686
+ "C5'": 5,
687
+ "C4'": 6,
688
+ "O4'": 7,
689
+ "C3'": 8,
690
+ "O3'": 9,
691
+ "C2'": 10,
692
+ "O2'": 11,
693
+ "C1'": 12,
694
+ "N1": 13,
695
+ "C2": 14,
696
+ "O2": 15,
697
+ "N3": 16,
698
+ "C4": 17,
699
+ "N4": 18,
700
+ "C5": 19,
701
+ "C6": 20,
702
+ },
703
+ "G": {
704
+ "OP3": 0,
705
+ "P": 1,
706
+ "OP1": 2,
707
+ "OP2": 3,
708
+ "O5'": 4,
709
+ "C5'": 5,
710
+ "C4'": 6,
711
+ "O4'": 7,
712
+ "C3'": 8,
713
+ "O3'": 9,
714
+ "C2'": 10,
715
+ "O2'": 11,
716
+ "C1'": 12,
717
+ "N9": 13,
718
+ "C8": 14,
719
+ "N7": 15,
720
+ "C5": 16,
721
+ "C6": 17,
722
+ "O6": 18,
723
+ "N1": 19,
724
+ "C2": 20,
725
+ "N2": 21,
726
+ "N3": 22,
727
+ "C4": 23,
728
+ },
729
+ "U": {
730
+ "OP3": 0,
731
+ "P": 1,
732
+ "OP1": 2,
733
+ "OP2": 3,
734
+ "O5'": 4,
735
+ "C5'": 5,
736
+ "C4'": 6,
737
+ "O4'": 7,
738
+ "C3'": 8,
739
+ "O3'": 9,
740
+ "C2'": 10,
741
+ "O2'": 11,
742
+ "C1'": 12,
743
+ "N1": 13,
744
+ "C2": 14,
745
+ "O2": 15,
746
+ "N3": 16,
747
+ "C4": 17,
748
+ "O4": 18,
749
+ "C5": 19,
750
+ "C6": 20,
751
+ },
752
+ "N": {
753
+ "OP3": 0,
754
+ "P": 1,
755
+ "OP1": 2,
756
+ "OP2": 3,
757
+ "O5'": 4,
758
+ "C5'": 5,
759
+ "C4'": 6,
760
+ "O4'": 7,
761
+ "C3'": 8,
762
+ "O3'": 9,
763
+ "C2'": 10,
764
+ "O2'": 11,
765
+ "C1'": 12,
766
+ },
767
+ }
768
+
769
+ CRYSTALLIZATION_AIDS = (
770
+ "SO4",
771
+ "GOL",
772
+ "EDO",
773
+ "PO4",
774
+ "ACT",
775
+ "PEG",
776
+ "DMS",
777
+ "TRS",
778
+ "PGE",
779
+ "PG4",
780
+ "FMT",
781
+ "EPE",
782
+ "MPD",
783
+ "MES",
784
+ "CD",
785
+ "IOD",
786
+ )
787
+
788
+ PROT_STD_RESIDUES_ONE_TO_THREE = {
789
+ "A": "ALA",
790
+ "R": "ARG",
791
+ "N": "ASN",
792
+ "D": "ASP",
793
+ "C": "CYS",
794
+ "Q": "GLN",
795
+ "E": "GLU",
796
+ "G": "GLY",
797
+ "H": "HIS",
798
+ "I": "ILE",
799
+ "L": "LEU",
800
+ "K": "LYS",
801
+ "M": "MET",
802
+ "F": "PHE",
803
+ "P": "PRO",
804
+ "S": "SER",
805
+ "T": "THR",
806
+ "W": "TRP",
807
+ "Y": "TYR",
808
+ "V": "VAL",
809
+ "X": "UNK",
810
+ }
811
+
812
+ CRYSTALLIZATION_AIDS = (
813
+ "SO4",
814
+ "GOL",
815
+ "EDO",
816
+ "PO4",
817
+ "ACT",
818
+ "PEG",
819
+ "DMS",
820
+ "TRS",
821
+ "PGE",
822
+ "PG4",
823
+ "FMT",
824
+ "EPE",
825
+ "MPD",
826
+ "MES",
827
+ "CD",
828
+ "IOD",
829
+ )
830
+
831
+
832
+ ### Molecule Constants ###
833
+ # AlphaFold3 SI Tabel 9
834
+ LIGAND_EXCLUSION = {'144', 'SEP', 'PG0', 'BEN', 'NH4', 'PO4', '3SY', 'BO3', 'UNL', 'MES', 'FLC', 'PGR', '15P', 'MYR',
835
+ 'POL', 'CIT', 'N', 'SPD', 'CAQ', 'IPA', 'EGL', 'SAR', 'NO3', 'STU', 'NHE', 'BU1', 'OXA', 'TPO',
836
+ 'EEE', 'CAD', 'CBM', 'SPM', 'BCN', 'FMT', 'PEP', 'CM', 'BAM', 'ETF', 'IOD', 'MLI', 'MRD', 'SCN',
837
+ 'GSH', 'CCN', 'SR', '1PE', 'ACY', 'STE', '9JE', 'SEO', 'IHS', 'MLA', 'TBU', 'DEP', 'STO', 'ACE',
838
+ 'NA', 'TRS', 'CPT', 'OHE', 'TME', 'CL', 'BME', 'DN', '3HR', 'LDA', 'SO4', 'MPD', 'OLC', 'DOD',
839
+ 'PE4', 'DOX', 'CMO', 'POP', 'PG4', '2F2', 'DMS', 'IMD', 'NH2', 'EOX', 'IPH', 'ACT', '7PE', 'UNX',
840
+ 'GTT', '7N5', 'AZI', 'FCY', 'SIN', 'AAE', 'BTB', 'BTC', 'PGE', 'PE3', 'MB3', 'EDO', 'PLM', 'BCT',
841
+ 'EOH', 'P6G', 'ACN', 'D10', 'EPE', 'DIO', 'CO3', 'PVO', 'TAR', 'URE', 'BDN', 'GOL', 'MSE', 'HED',
842
+ 'CLR', 'MEG', 'IHP', 'PEO', 'CXS', 'MOH', 'GYF', 'PEG', 'FJO', 'FW5', 'OLA', '2JC', 'ABA', 'O4B',
843
+ 'UPL', 'OME', 'C8E', 'OMB', 'UNK'} # fmt: skip
844
+
845
+
846
+ # AlphaFold3 SI Tabel 11
847
+ GLYCANS = {'79J', 'LXZ', 'KO1', 'Z57', 'XDX', '8OQ', 'G0S', '14T', 'ZB3', '9PG', 'BGL', 'GYU', 'AHG', 'SUC', 'ADA', 'NGR',
848
+ '4R1', 'EBQ', 'GAF', 'NAA', 'GYP', 'NDG', 'U2D', 'ISL', '9GP', 'KDM', 'HSX', 'NYT', 'V3P', '4NN', 'Z3L', 'ZCZ',
849
+ 'D5E', 'RIP', '3LR', 'GL1', 'K99', 'MQG', 'RAM', 'TUP', 'KDB', 'SIO', 'Z5L', 'GUL', 'GU2', 'EQV', '0V4', 'ABD',
850
+ 'RY7', '5II', 'GAL', '2GL', 'DR5', '4RS', 'MNA', 'DFX', '0WK', 'HTG', 'RP5', 'A1Q', 'B1N', 'GUF', 'NGA', 'TMR',
851
+ 'C3X', '9S7', 'XLS', 'MAG', 'RST', 'SDY', 'HSH', 'GN4', 'GTR', 'KBA', '6YR', 'CKB', 'DDA', 'RHC', 'OPM', 'SIZ',
852
+ 'GE3', 'TS8', 'Z6W', 'BZD', '56N', 'RIB', 'GL6', '8GA', 'GLC', 'TAG', 'QIF', 'TA6', 'UAP', 'TVY', 'GC1', 'ARW',
853
+ 'GU3', 'LBS', 'KDD', 'NPF', '49V', 'CDR', '12E', '6LA', '2M4', 'SA0', 'HNW', 'AOG', 'G8Z', '8LR', 'GPH', 'XXX',
854
+ 'GPM', 'MTT', 'JFZ', 'LOG', 'LMO', '5TH', '8I4', 'GUP', '5KQ', 'R2G', 'SSG', 'P8E', 'RF5', 'TOC', 'CT3', '2FL',
855
+ '73E', 'VJ4', '0H0', 'ERI', 'AMG', '3GR', 'BO1', 'AFD', 'FYJ', 'IDF', 'NBY', 'DOM', 'MBF', 'QDK', 'TDG', '6GR',
856
+ 'MAV', '1X4', 'AF1', 'EEN', 'ZB1', 'Z2D', '445', 'KHP', 'LKS', '10M', '491', 'OTU', 'BNG', 'AY9', 'KDR', 'LEC',
857
+ 'FFX', 'AFO', 'SGA', '16F', 'X34', 'SEJ', 'LAG', 'DNO', '6PZ', 'LBT', 'OSU', '3BU', '6K3', 'SFU', 'YDR', 'SIA',
858
+ '2WP', '25E', 'SMD', 'NBG', 'DO8', 'LGU', 'S81', 'Z3Q', 'TWA', 'G6S', '2WS', 'G6D', '18D', 'IN1', '64K', 'QPS',
859
+ 'PTQ', 'FX1', 'RVM', '8GP', 'NLC', 'FCA', 'JLT', 'AH8', 'MFB', 'RRJ', 'SOL', 'TM5', 'TCB', 'GU5', 'TWY', 'ETT',
860
+ '8YV', 'SG6', 'XMM', '17T', 'BGC', 'MLR', 'Z6J', '9SJ', 'R2B', 'BBK', 'BEM', 'LTG', '0NZ', 'DKZ', '3YW', 'ASO',
861
+ 'FUB', '4GL', 'GLT', 'KTU', 'CBF', 'ARI', 'FIF', 'LCN', 'SG5', 'AC1', 'SUP', 'ZMR', 'GU8', 'YYH', 'XKJ', 'JSV',
862
+ 'DQR', 'M6D', 'FBP', 'AFP', 'F6P', 'GLG', 'JZR', 'DLG', '9C1', 'AAL', 'RRY', 'ZDC', 'TVS', 'B1H', 'XXM', '8B7',
863
+ 'RCD', 'UBO', '7D1', 'XYT', 'WZ2', 'X1X', 'LRH', 'GDA', 'GLS', 'G6P', '49A', 'NM9', 'DVC', 'MG5', 'SCR', 'MAF',
864
+ '149', 'LFC', 'FMF', 'FRU', 'BG8', 'GP4', 'GU1', 'XXR', '4V5', 'MA2', '293', '6KH', 'GAA', 'MXY', 'QV4', 'MSX',
865
+ 'GU6', '95Z', 'Z9M', 'ARB', 'FNY', 'H1S', 'VG1', 'VTB', 'Z61', 'H6Z', '7K3', 'XGP', 'SOE', 'Z6H', 'GYV', 'MLB',
866
+ 'DR3', 'ISD', 'BGN', 'AXR', 'SCG', 'Z8T', '6UD', 'KDF', 'GLA', 'BNX', '3MG', 'BDP', 'KFN', 'Z9N', '2FG', 'PNA',
867
+ 'MUB', 'ZDO', '9WJ', 'GMB', 'LER', 'TVM', '89Y', 'Z4Y', '9SM', 'NGS', 'LAO', 'KGM', 'FKD', 'M1F', 'BG6', 'LAK',
868
+ '8GG', '6LS', 'GBH', 'CEG', 'BDR', 'RR7', 'SOG', 'AZC', 'AMU', 'BS7', '3S6', 'MXZ', 'Z3U', 'MDP', '6MJ', 'M3M',
869
+ 'DT6', 'PRP', 'TUG', 'Z16', 'IDG', 'TUR', 'Z4S', 'GM0', 'A0K', 'GCN', 'ZEE', 'UEA', 'HVC', 'CE5', 'FUD', 'NAG',
870
+ 'GPO', '22S', '3J4', 'DKX', 'FMO', 'BXP', 'NSQ', '50A', 'MAT', '5TM', '0MK', '9OK', 'RI2', 'SZZ', 'IDS', 'JRV',
871
+ '18O', '1CF', 'RAO', 'P53', '27C', 'Z3K', 'Z4U', 'Z4R', 'B4G', '6KU', 'HBZ', '07E', 'KBG', '98U', 'GFP', 'LFR',
872
+ 'G2F', '51N', 'FUF', 'LGC', '6S2', 'E3M', 'G7P', 'OTN', 'MVP', 'TVD', 'BBV', 'E5G', 'MJJ', 'IEM', 'FSA', 'CE8',
873
+ 'U1Y', '1FT', 'HTM', 'DLD', 'YO5', 'W9T', '5N6', 'PNG', 'NGY', 'DSR', 'M3N', 'GP0', '3MK', 'RBL', 'GTM', 'FSW',
874
+ '4JA', 'YYM', 'Z4V', '3HD', '2DR', 'AIG', 'GL0', 'BND', 'TM6', 'TUJ', 'DAN', '5GF', '4QY', '3FM', '6KW', 'LNV',
875
+ '289', 'BFN', 'PSG', 'U9J', 'YX0', 'EQP', 'YZ0', '0BD', 'GAT', 'LVZ', 'FUL', '22O', 'DLF', 'MA1', 'BXY', 'C3G',
876
+ 'CR6', 'GNS', 'EEQ', 'IDY', 'FFC', 'NBX', 'SID', '9KJ', '9WZ', 'M2F', 'FK9', 'SSH', 'TWG', 'RVG', 'BXX', '24S',
877
+ 'FSM', 'GDL', 'F1X', '3R3', 'ALX', '4GC', 'GL2', 'DL6', 'GS1', 'AMV', 'TVV', '2DG', 'RGG', 'TFU', '1GN', 'N3U',
878
+ 'SOR', 'MA3', 'GCT', 'H1M', '16G', '49T', 'BCD', 'GPW', 'DAG', 'GN1', 'IAB', 'EBG', 'GPU', '38J', '1LL', 'DR2',
879
+ 'YIO', 'YKR', '15L', 'WZ1', 'BTG', 'GPK', '5MM', '26O', 'AMN', 'DEL', 'CTT', '83Y', 'GMT', 'CTO', 'MBE', '1SD',
880
+ '6ZC', 'AXP', 'OX2', '5LT', 'MRH', '6BG', 'MDA', 'SG7', '045', 'GC4', 'LDY', 'YYJ', '07Y', 'KDO', 'GP1', 'BHG',
881
+ 'DPC', 'BM3', 'GU4', 'ISX', 'P6P', 'GPQ', '1S4', '475', 'GYE', 'CBK', 'CEZ', 'SGD', 'TH1', 'V3M', 'RWI', 'RM4',
882
+ 'U9M', 'U2A', '7GP', '05L', 'Z0F', 'GLO', 'LXB', 'TGA', '61J', 'GYG', 'GCU', 'GE1', 'F1P', 'GLP', 'CTR', 'AHR',
883
+ '3LJ', 'FUY', 'JVA', 'LAT', 'NHF', 'RB5', 'XYS', 'LXC', 'SLT', 'U8V', 'GMH', 'EAG', 'GCV', 'B6D', 'IDU', 'KG1',
884
+ 'BDF', 'NTP', 'IXD', 'RZM', 'PH5', 'SHB', 'X6Y', 'B16', 'Z9E', '9VP', 'LAH', 'H2P', 'TNX', '5GO', 'TGY', '5SP',
885
+ 'RHA', '5KV', 'GTK', 'SUS', 'DAF', '6DM', '8S0', '6MN', 'G4D', 'NT1', 'XYF', '5TJ', '46Z', '9AM', '7K2', '6C2',
886
+ 'WIA', '9YW', 'G4S', '46D', 'Z9W', 'ABL', 'XYZ', 'G3I', 'S7P', 'GC9', 'GQ1', 'GCO', 'M6P', 'WUN', 'U63', 'ZB2',
887
+ 'GLD', 'T6P', 'ZEL', '145', '2OS', 'BGP', 'C4W', 'IDX', 'MUR', '3SA', 'CR1', '34V', 'DEG', 'F55', 'L0W', 'TYV',
888
+ 'CJB', 'TW7', 'DDL', '5L3', 'NGC', 'ACX', 'JVS', 'NA1', 'GAD', '7JZ', 'BOG', 'GCW', 'BDG', 'Z15', '0LP', 'ABE',
889
+ 'RG1', 'DGU', 'N1L', 'NGE', 'PUF', 'B9D', '49S', '5LS', '4N2', '23V', 'RUU', 'B0D', 'RTV', '42D', 'M1P', 'MAB',
890
+ '2F8', 'TQY', 'L6S', 'V71', '2H5', 'M8C', 'NTF', 'H3S', 'LM2', 'MN0', 'JV4', '9WN', 'U9G', 'LZ0', 'X0X', 'TXB',
891
+ '3DO', 'SG4', 'IDR', '8B9', 'TOA', 'CRA', 'HSJ', '0HX', 'FDQ', 'FUC', 'ABF', 'ALL', 'G20', 'GL9', 'IDC', 'LOX',
892
+ 'Z2T', 'RP6', '2HA', 'AHM', 'DRI', 'EMZ', 'GMZ', 'HD4', 'GU9', 'L1L', 'PNW', 'PPC', 'MMA', 'CE6', '5KS', 'MGC',
893
+ 'XLF', 'KO2', 'RUG', 'HSG', 'SF6', 'IPT', 'TF0', 'GCD', 'B8D', '0YT', 'GRX', 'HNV', 'FVQ', 'RV7', 'J5B', 'ERE',
894
+ 'DFR', 'LVO', '4GP', 'BQY', 'BMA', 'KDA', 'ARA', 'KDN', 'ZCD', 'A5C', 'T68', 'XYL', 'YJM', 'NM6', '9CD', 'CNP',
895
+ 'U97', '9T1', 'C5X', 'R1X', 'BW3', '09X', 'GNX', 'PDX', 'Z9D', 'DGO', 'SLM', '66O', '4CQ', 'X6X', 'RTG', 'HSY',
896
+ '20X', 'GCB', 'EUS', 'FNG', '1S3', 'EGA', 'MQT', 'NXD', '5TK', 'Z9K', 'TGR', '9MR', 'M7P', 'PA1', 'MFU', 'UBH',
897
+ 'CBI', 'TMX', 'T6D', '32O', 'JHM', 'X2F', '4SG', '3DY', 'SGC', 'PAV', 'A2G', 'LAI', '0UB', 'BXF', '3J3', '9T7',
898
+ 'T6T', 'OI7', 'ANA', '9QG', 'K5B', 'KOT', 'GIV', 'MGL', 'GL4', '9SP', 'FDP', 'GPV', '6KS', 'GXV', 'NFG', 'M7B',
899
+ 'DG0', '57S', 'GUZ', '96O', 'GCS', 'MAN', 'YYB', 'TWD', 'MGS', 'TT7', 'PNJ', 'GXL', 'TRE', 'G28', '7NU', '8PK',
900
+ 'LKA', 'ASG', 'SF9', '2M8', '1GL', '5KT', 'BWG', 'OTG', 'VJ1', 'ZGE', '40J', 'Z4K', 'F58', 'KME', 'SR1', 'ZB0',
901
+ 'UDC', '6KL', '6LW', '8EX', 'D1M', '62I', 'H6Q', 'RAE', 'SHD', 'AGL', 'DGS', 'VKN', 'TWJ', 'MRP', 'TGK', 'HSQ',
902
+ 'ASC', 'F8X', '6GB', '0XY', 'BMX', 'SN5', 'Z5J', 'ZD0', 'DJB', 'KDE', 'TEU', 'M55', 'YYQ', 'DK4', 'D6G', 'KD5',
903
+ 'AH2', '4AM', 'RER', '16O', 'C3B', 'G1P', 'NG6', 'MBG', 'Z4W', 'MAW', '147', 'NGK', 'CKP', 'DJE', 'GL5', 'TVG',
904
+ 'PKM', 'L6T', 'XS2', '2GS', 'BTU', 'G16', 'PSV', 'AQA', 'MCU', 'SNG', '2M5', 'SLB', 'BM7', 'H53', 'MA8', 'OAK',
905
+ 'GRF', 'BGS', 'NTO', 'YYK', 'EPG', '6GP', 'MYG', 'FCT', 'Z9H', 'GL7', '48Z', '4UZ', '7CV', 'DYM', 'GLF', 'GU0',
906
+ 'CGF', 'STZ', '44S', 'LB2', 'TU4', 'Z8H', '5QP', 'A6P', 'XYP', 'B2G', 'U9A', 'SWE', 'NGZ', 'SGN', 'B7G', 'MAL',
907
+ '291', 'FSI', 'R1P', 'ACR', 'PZU', 'X2Y', 'Z9L', 'STW', 'U9D', 'X1P', 'TTV', 'GS9', 'QKH', 'SHG', 'N9S', 'NNG',
908
+ 'RP3', 'G3F', 'YX1', 'EMP', 'XIL', '08U', 'WOO', 'FCB', 'NG1', 'TRV', '20S', 'RAF', 'GZL', 'C4B', '9SG', 'GAC'} # fmt: skip
909
+
910
+
911
+ # AlphaFold3 SI Tabel 12
912
+ IONS = {'XGP', 'Z4K', '147', 'B0D', 'G6D', 'RIB', 'AXR', 'SOG', 'NTF', 'SHB', 'RG1', 'G6S', 'GPO', 'BTG', '5LT', 'CEG', 'KG1',
913
+ 'TDG', 'TRV', 'WZ1', 'ARI', 'HVC', 'TM6', '2DG', '6K3', 'ARA', 'ASO', '6GB', 'NBX', 'OTG', 'ASG', 'YO5', 'MRH', 'GYP',
914
+ 'C4B', 'GDA', 'MUB', 'XXM', 'M6D', 'OPM', 'GYV', 'DKX', '9SG', 'LOG', 'TRE', 'DLG', 'FNG', 'BBK', 'ABF', 'AQA', '3BU',
915
+ 'SIA', 'CGF', 'LBS', 'QV4', 'NAA', 'GLC', 'BHG', 'MSX', 'ZB1', 'YYJ', 'TUP', '6ZC', '0WK', 'RY7', 'L1L', 'RRY', 'M55',
916
+ '9PG', '5GF', '4V5', 'FMO', 'SWE', 'KDA', 'P8E', '14T', 'DL6', 'CKB', '2M8', 'AHR', 'NGY', '8GP', 'YYQ', 'LVO', 'CRA',
917
+ 'GU9', 'PPC', '6GP', 'CR1', 'G20', 'T6P', 'EMZ', 'RHA', 'GC4', 'AH2', 'FCT', 'QDK', 'DDA', 'RTV', '8S0', 'TVG', 'HNV',
918
+ 'FYJ', 'BDP', 'GYE', 'TS8', 'CEZ', '42D', 'NHF', 'NT1', 'WOO', '0LP', 'HBZ', 'SG5', 'NM9', 'CJB', 'DLF', 'EUS', 'IDY',
919
+ '2GL', 'NTO', 'PNG', 'B2G', '7NU', '4UZ', '5LS', '475', 'DJE', 'Z9E', 'GC9', 'QPS', '0NZ', 'F1X', 'G8Z', '2F8', '3SA',
920
+ '46D', '3DO', '6PZ', 'OI7', 'SLM', 'A0K', '9SJ', 'TWD', 'AOG', 'TW7', '2WS', 'GU5', 'NSQ', 'FUD', 'GLO', 'TNX', 'XYP',
921
+ 'JFZ', '2HA', 'G16', 'V3M', 'RTG', 'C4W', 'R2G', 'HD4', '66O', 'MFB', 'GXL', '8YV', 'NFG', 'FFC', '3YW', 'XYZ', '445',
922
+ 'IXD', 'GUL', 'CTO', '05L', 'Z3L', 'RBL', 'DR5', 'S81', 'CTR', '15L', 'GLP', '7K3', 'LDY', 'Z4S', 'H2P', '4GP', '5SP',
923
+ '18O', 'DGS', 'OX2', 'DFR', 'GN1', 'BGL', 'Z9K', 'GU4', '0V4', 'MA2', 'U2A', 'MXZ', 'PA1', '9YW', 'GS9', '3MK', 'AAL',
924
+ 'NBY', 'XXX', 'ISD', 'SEJ', 'DKZ', 'GL9', '23V', 'AMN', 'AHG', '25E', 'DJB', '7K2', 'GDL', '08U', 'TT7', 'DRI', 'HSY',
925
+ 'LB2', 'GCV', 'X1P', 'MN0', 'BW3', 'U9J', 'FFX', 'Z3U', 'LOX', 'MQG', 'HSG', 'GCO', 'GPQ', 'IDR', '2GS', 'AGL', 'RUU',
926
+ '5KV', 'R1X', 'LZ0', 'P6P', '0H0', '32O', 'LAG', 'YYK', '07E', '6KS', 'KOT', '17T', 'TQY', 'RM4', 'LNV', 'BGN', 'STW',
927
+ 'NGC', 'GLF', '2WP', 'GL5', 'KHP', '9SP', 'LAI', 'KDB', 'JVA', 'OTN', 'NA1', 'RR7', 'B16', 'PSV', 'NXD', 'C5X', 'G1P',
928
+ 'RRJ', 'DAF', '5N6', 'SG4', 'KDN', '95Z', 'FDQ', 'K5B', 'MDP', 'GTK', '4SG', 'ALL', 'LXC', 'TM5', 'NGA', '98U', '7JZ',
929
+ 'A6P', 'UBH', '293', '9T7', 'PUF', '5TM', 'VTB', 'BGP', 'JV4', 'SN5', 'FSA', 'LAK', 'G7P', 'BGC', 'ZCD', '7GP', '79J',
930
+ 'FKD', 'TWY', 'ZGE', 'OAK', 'FMF', 'ZCZ', 'GL2', 'MAV', 'ZB3', 'SA0', '3LR', 'SHD', 'XLS', 'DOM', 'Z4R', 'GP0', '5KS',
931
+ 'KO1', 'FCB', 'LFC', 'AC1', 'NPF', 'X6Y', 'IDF', '20X', '6KL', '6LW', '49S', '0YT', 'BDR', 'GBH', 'LAH', 'KO2', '40J',
932
+ '4CQ', 'D5E', 'T6D', 'SUP', 'TGR', 'Z57', 'SDY', '4NN', 'MNA', 'Z5J', '20S', 'CT3', 'DQR', '5MM', '83Y', '49T', 'BDG',
933
+ 'GL1', 'TOC', '6UD', 'GM0', 'GU3', '18D', 'ADA', '4AM', '9WZ', 'HSX', 'QIF', '6DM', '4RS', 'KDF', 'GAL', 'ISL', 'Z9H',
934
+ 'GC1', 'Z9W', 'NBG', 'MAL', 'BGS', 'W9T', 'U9A', '62I', 'M6P', 'AFO', 'C3G', 'M2F', 'RUG', 'ARW', 'LEC', 'B8D', '61J',
935
+ 'GL7', 'F58', 'GP4', 'GFP', 'TVY', 'ZB0', 'FSM', 'BDF', 'TCB', 'ZEL', 'IDG', '9CD', 'PNA', 'SF9', 'DSR', 'MG5', 'E5G',
936
+ 'PNW', 'TH1', '1S4', 'PTQ', 'KDD', 'SSH', 'F55', 'V71', 'VG1', '9T1', '145', 'GU2', '2M5', '8I4', 'H1S', 'YYB', '1LL',
937
+ '4N2', 'BG6', 'R2B', 'MAT', 'LMO', 'OSU', 'PSG', 'RCD', '26O', 'DGO', 'SID', 'FUB', '2FL', '3HD', '34V', 'FK9', 'AMG',
938
+ 'G4D', 'EPG', 'BWG', 'KTU', '491', 'JHM', 'NG1', 'DLD', 'MCU', 'MQT', 'EQV', 'CBF', '4GL', 'GS1', 'DEG', 'DDL', 'SGA',
939
+ '16O', 'X6X', 'H53', 'FUC', 'IDS', 'LTG', 'TMX', '9SM', '045', 'DAN', 'FRU', 'Z5L', 'AHM', 'BNG', 'AFP', 'MAF', 'UBO',
940
+ 'BOG', '2H5', 'NG6', '10M', 'NM6', 'RST', 'C3X', '9S7', '49A', 'AXP', 'PH5', 'ISX', 'B6D', 'GU6', 'TWG', '6GR', 'H3S',
941
+ 'Z61', '9WJ', 'BMA', 'U63', 'LKA', 'GRF', 'VJ1', 'RZM', 'MA3', '0XY', 'GAF', 'GAD', '1FT', '149', 'DPC', 'LFR', 'B9D',
942
+ 'CE5', 'SOR', '6KU', 'SFU', 'BEM', 'YKR', '38J', 'N3U', 'ARB', 'CBK', 'SGD', '8EX', 'WZ2', '8B9', 'TF0', 'X2Y', 'PKM',
943
+ 'RF5', 'D1M', 'AF1', 'DR2', 'EQP', 'AMV', 'PRP', 'VJ4', 'BCD', '1GN', 'SMD', '9QG', 'GCW', 'A5C', 'M3N', 'SZZ', 'B1H',
944
+ 'GPH', 'NDG', '5KT', 'TYV', 'KDM', 'A2G', 'CE6', 'H1M', 'JVS', 'ABL', 'LAO', 'P53', 'GCN', 'QKH', 'U2D', 'YYH', '6S2',
945
+ 'L0W', 'DEL', 'G2F', 'LER', 'MGC', 'RI2', '5KQ', 'DT6', 'U97', 'BG8', '1X4', 'GYG', 'U9D', 'SG7', '8B7', 'FCA', 'RWI',
946
+ '8GG', 'TAG', 'ERE', '46Z', '5QP', 'UDC', '51N', 'SGN', 'NLC', '8LR', 'L6T', 'WIA', 'TMR', 'IDC', 'GLT', 'FDP', 'GCT',
947
+ 'FSW', 'XYS', 'GAA', 'N9S', 'DO8', 'UAP', 'TUG', 'F1P', '2FG', '12E', '56N', 'IAB', 'LAT', 'X1X', 'MBE', 'GP1', 'X34',
948
+ '6MJ', '6KH', 'G3F', '3DY', 'XYF', 'GE1', 'MAB', 'Z9L', '289', 'GIV', 'F8X', '9WN', 'KDO', 'GLA', 'SIZ', 'G0S', 'EGA',
949
+ 'MJJ', 'B7G', 'BND', 'JRV', '1S3', 'DAG', 'GL0', 'GPV', 'HTM', '3R3', 'SHG', 'DR3', 'TTV', 'DK4', '22S', 'IDU', 'XIL',
950
+ 'RER', '6BG', 'GXV', 'BTU', 'GE3', 'H6Z', 'ZD0', 'SF6', 'VKN', 'GYU', '16F', 'K99', 'KGM', 'FX1', 'NGS', 'RVG', 'YX1',
951
+ '4GC', 'EEQ', 'XDX', 'MVP', 'PNJ', 'BS7', 'M7B', '0BD', 'AIG', 'TVV', 'BXY', 'T68', 'SIO', '8OQ', '2OS', 'S7P', 'GNX',
952
+ 'TUR', 'YX0', 'DVC', 'NGK', 'M8C', 'RHC', 'GPM', 'LKS', '64K', 'GMT', 'JLT', 'XS2', 'LBT', 'TVM', '6MN', 'DYM', 'E3M',
953
+ 'NGR', 'G6P', 'RAO', 'SCR', 'YJM', 'MRP', 'YIO', 'ACR', '291', '3GR', 'M1F', 'L6S', 'XLF', 'GU1', 'LVZ', 'DNO', '22O',
954
+ 'SOL', 'GPW', 'KD5', 'GCU', 'ERI', 'YZ0', 'TXB', 'ABD', 'YYM', 'BFN', 'G4S', 'GAC', 'PAV', 'MMA', 'RV7', 'MBG', '16G',
955
+ 'MA8', 'GU8', '4JA', 'NTP', 'FNY', '07Y', '1CF', 'KDE', 'Z16', 'CBI', '50A', 'Z4W', 'U9G', 'D6G', 'JSV', 'YDR', 'DGU',
956
+ 'Z15', 'G3I', 'XKJ', 'IEM', 'CDR', 'GLG', '0HX', 'TA6', '57S', 'LGU', '27C', 'BO1', 'EEN', 'HSJ', 'GLD', 'RP3', 'FSI',
957
+ 'LRH', '8PK', 'GTR', 'B1N', 'XXR', 'TFU', 'RAF', 'ETT', 'AY9', '3FM', 'G28', '2DR', 'FUL', 'CE8', 'GQ1', 'TGA', '6C2',
958
+ 'NGZ', '6LS', 'SOE', 'BQY', 'HSH', 'XYL', '5TH', 'A1Q', 'HTG', 'Z3K', '3MG', 'GMH', 'M1P', 'ASC', '73E', 'Z8T', 'STZ',
959
+ 'RAE', 'GL6', '7CV', 'GPU', '5L3', '7D1', 'CKP', 'BXP', 'M7P', 'RVM', 'TWA', '4R1', 'N1L', 'X2F', 'TVD', '3J3', 'TOA',
960
+ 'B4G', 'WUN', '0MK', '6YR', 'H6Q', 'CNP', 'TEU', 'MBF', '44S', 'Z9N', 'BM7', 'NGE', 'U9M', 'GMB', 'MTT', '9GP', 'DG0',
961
+ 'RP5', 'KBA', 'ALX', 'FVQ', 'TGY', 'EBG', 'BXF', '9C1', 'BBV', 'AFD', '4QY', 'GCD', 'FBP', '96O', 'GNS', 'OTU', 'ACX',
962
+ 'RP6', 'UEA', 'SGC', 'Z4V', 'RAM', 'AZC', 'J5B', '1GL', 'TGK', 'HSQ', 'LM2', 'MYG', 'PDX', 'Z6W', 'ZDC', '09X', 'IDX',
963
+ '9MR', 'MFU', 'CR6', 'Z8H', 'SUS', 'PZU', '89Y', '5TK', 'KME', 'U1Y', 'Z4U', 'LCN', 'GPK', 'MUR', '5TJ', 'NYT', '24S',
964
+ 'SR1', '0UB', '48Z', 'MGL', 'Z6J', 'BMX', 'C3B', 'TVS', 'SLB', 'IPT', 'MLB', 'SLT', 'Z9D', 'GRX', 'AH8', 'F6P', 'BNX',
965
+ 'JZR', 'LXB', 'M3M', 'XYT', 'MA1', 'GTM', 'SCG', 'Z3Q', 'KFN', 'LGC', 'ZB2', 'FIF', 'GLS', 'SSG', 'Z4Y', 'T6T', 'GCS',
966
+ 'GZL', 'U8V', 'V3P', 'ABE', 'MGS', '6KW', '8GA', 'BZD', 'FUF', 'GMZ', 'FUY', 'HNW', 'LXZ', 'IN1', 'SNG', 'GAT', 'Z9M',
967
+ 'BM3', 'ZDO', '9AM', '3LJ', 'X0X', 'MAN', '5GO', 'AMU', 'GUF', 'XMM', 'EAG', 'SUC', 'BXX', 'Z0F', '9OK', 'CTT', 'MLR',
968
+ '49V', 'ZMR', 'TWJ', 'MAW', '5II', 'ZEE', 'KBG', 'EMP', 'GUZ', 'TUJ', 'RB5', 'GCB', '9KJ', 'MAG', 'Z2D', '6LA', '2M4',
969
+ 'GN4', 'MDA', 'TU4', 'Z2T', 'GL4', 'EBQ', 'NNG', '1SD', 'ANA', 'MXY', 'Z6H', 'GU0', 'GUP', 'SG6', 'NAG', '9VP', 'RIP',
970
+ '3S6', 'KDR', 'R1P', '3J4', 'DFX', 'RGG'} # fmt: skip
971
+
972
+
973
+ # AlphaFold3 SI Tabel 15
974
+ PBV2_COMMON_NATURAL_LIGANDS = {'UPG', 'CDP', 'DSG', 'APC', 'GSP', 'FAD', 'IPE', 'NAI', '2BA', 'PGA', 'A3P', 'PRP', 'NAD', 'PLG',
975
+ 'SFG', 'MFU', 'APR', 'GTP', 'PLP', 'UDP', 'SAH', 'ACP', 'GSH', 'CTP', 'AKG', 'F15', '5AD', 'BCN',
976
+ 'BDP', 'H4B', 'PHO', 'FMN', 'MTA', 'NGA', 'OGA', 'SLB', 'SIN', 'C5P', 'TPP', 'BGC', 'NCA', 'UD1',
977
+ 'ANP', 'DGL', 'FDA', 'URI', 'ADP', 'MTE', 'PJ8', 'ATP'} # fmt: skip
protenix/data/constraint_featurizer.py ADDED
@@ -0,0 +1,2414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import copy
16
+ import hashlib
17
+ from typing import Any
18
+
19
+ import numpy as np
20
+ import pandas as pd
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from biotite.structure import AtomArray
24
+ from scipy.spatial.distance import cdist
25
+
26
+ from protenix.data.constants import ELEMS, STD_RESIDUES
27
+ from protenix.data.tokenizer import Token, TokenArray
28
+ from protenix.data.utils import get_atom_mask_by_name
29
+ from protenix.utils.logger import get_logger
30
+
31
+ logger = get_logger(__name__)
32
+
33
+
34
+ class ConstraintFeatureGenerator:
35
+ """Generates constraint features by coordinating different featurizers"""
36
+
37
+ def __init__(
38
+ self, constraint_config: dict[str, Any], ab_top2_clusters: set[str] = None
39
+ ):
40
+ """
41
+ Args:
42
+ constraint_config: Configuration dict for constraints
43
+ ab_top2_clusters: Optional antibody cluster info
44
+ """
45
+ self.constraint = constraint_config
46
+ self.ab_top2_clusters = ab_top2_clusters
47
+
48
+ # function set for inference
49
+ @staticmethod
50
+ def _canonicalize_contact_format(
51
+ sequences: list[dict[str, Any]], pair: dict[str, Any]
52
+ ) -> dict[str, Any]:
53
+ pair = copy.deepcopy(pair)
54
+ _pair = {}
55
+
56
+ for id_num in ["1", "2"]:
57
+ res_id = f"residue{id_num}"
58
+ if (res_info := pair.get(res_id, None)) is not None:
59
+ assert len(res_info) == 3, "residue contact should have 3 identifiers"
60
+ res_info.append(None) # Add None for atom_name
61
+ _pair[f"id{id_num}"] = res_info
62
+
63
+ atom_id = f"atom{id_num}"
64
+ if (atom_info := pair.get(atom_id, None)) is not None:
65
+ assert len(atom_info) == 4, "atom contact must have 4 identifiers"
66
+
67
+ entity_id, atom_name = atom_info[0], atom_info[3]
68
+ if isinstance(atom_name, int):
69
+ entity_dict = list(sequences[int(entity_id - 1)].values())[0]
70
+ assert "atom_map_to_atom_name" in entity_dict
71
+ atom_info[3] = entity_dict["atom_map_to_atom_name"][atom_name]
72
+
73
+ _pair[f"id{id_num}"] = atom_info
74
+
75
+ if hash(tuple(_pair["id1"][:2])) == hash(tuple(_pair["id2"][:2])):
76
+ raise ValueError("A contact pair can not be specified on the same chain")
77
+
78
+ _pair["min_distance"] = float(pair.get("min_distance", 0))
79
+ _pair["max_distance"] = float(pair["max_distance"])
80
+ if _pair["max_distance"] < _pair["min_distance"]:
81
+ raise ValueError("max_distance must be greater than min_distance")
82
+ if "atom1" in pair and "atom2" in pair:
83
+ _pair["contact_type"] = "atom_contact"
84
+ else:
85
+ _pair["contact_type"] = "token_contact"
86
+ return _pair
87
+
88
+ @staticmethod
89
+ def _canonicalize_pocket_res_format(binder: list, pocket_pos: list) -> list:
90
+ assert len(pocket_pos) == 3
91
+ if hash(tuple(binder[:2])) == hash(tuple(pocket_pos[:2])):
92
+ raise ValueError("Pockets can not be the same chain with the binder")
93
+ return pocket_pos
94
+
95
+ @staticmethod
96
+ def _log_constraint_feature(
97
+ atom_array: AtomArray, token_array: TokenArray, constraint_feature: dict
98
+ ):
99
+
100
+ atom_feature = constraint_feature["contact_atom"]
101
+ if atom_feature.sum() > 0:
102
+ # logging contact feature
103
+ token_idx_1, token_idx_2 = torch.nonzero(
104
+ torch.triu(atom_feature[..., 1]), as_tuple=True
105
+ )
106
+
107
+ atom1_index = token_array[token_idx_1].get_annotation("centre_atom_index")
108
+ atom2_index = token_array[token_idx_2].get_annotation("centre_atom_index")
109
+
110
+ res_name_1 = atom_array.res_name[atom1_index]
111
+ res_name_2 = atom_array.res_name[atom2_index]
112
+
113
+ atom_name_1 = atom_array.atom_name[atom1_index]
114
+ atom_name_2 = atom_array.atom_name[atom2_index]
115
+
116
+ chain_id_1 = atom_array.chain_id[atom1_index]
117
+ chain_id_2 = atom_array.chain_id[atom2_index]
118
+
119
+ max_distance = atom_feature[token_idx_1, token_idx_2, 1]
120
+ min_distance = atom_feature[token_idx_1, token_idx_2, 0]
121
+
122
+ contact_info = {
123
+ "chain_id": np.stack([chain_id_1, chain_id_2]).T.tolist(),
124
+ "res_name": np.stack([res_name_1, res_name_2]).T.tolist(),
125
+ "atom_name": np.stack([atom_name_1, atom_name_2]).T.tolist(),
126
+ "max_distance": max_distance.tolist(),
127
+ "min_distance": min_distance.tolist(),
128
+ }
129
+ logger.info(f"loaded atom contact info:{contact_info}")
130
+
131
+ token_feature = constraint_feature["contact"]
132
+ if token_feature.sum() > 0:
133
+ # logging contact feature
134
+ token_idx_1, token_idx_2 = torch.nonzero(
135
+ torch.triu(token_feature[..., 1]), as_tuple=True
136
+ )
137
+
138
+ atom1_index = token_array[token_idx_1].get_annotation("centre_atom_index")
139
+ atom2_index = token_array[token_idx_2].get_annotation("centre_atom_index")
140
+
141
+ res_name_1 = atom_array.res_name[atom1_index]
142
+ res_name_2 = atom_array.res_name[atom2_index]
143
+
144
+ atom_name_1 = atom_array.atom_name[atom1_index]
145
+ atom_name_2 = atom_array.atom_name[atom2_index]
146
+
147
+ chain_id_1 = atom_array.chain_id[atom1_index]
148
+ chain_id_2 = atom_array.chain_id[atom2_index]
149
+
150
+ max_distance = token_feature[token_idx_1, token_idx_2, 1]
151
+ min_distance = token_feature[token_idx_1, token_idx_2, 0]
152
+
153
+ contact_info = {
154
+ "chain_id": np.stack([chain_id_1, chain_id_2]).T.tolist(),
155
+ "res_name": np.stack([res_name_1, res_name_2]).T.tolist(),
156
+ "atom_name": np.stack([atom_name_1, atom_name_2]).T.tolist(),
157
+ "max_distance": max_distance.tolist(),
158
+ "min_distance": min_distance.tolist(),
159
+ }
160
+ logger.info(f"loaded contact info:{contact_info}")
161
+
162
+ pocket_feature = constraint_feature["pocket"]
163
+ if pocket_feature.sum() > 0:
164
+ # logging contact feature
165
+ binder_idx, pocket_idx = torch.nonzero(
166
+ pocket_feature[..., 0], as_tuple=True
167
+ )
168
+ atom1_index = token_array[binder_idx].get_annotation("centre_atom_index")
169
+ atom2_index = token_array[pocket_idx].get_annotation("centre_atom_index")
170
+
171
+ res_name_1 = atom_array.res_name[atom1_index]
172
+ res_name_2 = atom_array.res_name[atom2_index]
173
+
174
+ atom_name_1 = atom_array.atom_name[atom1_index]
175
+ atom_name_2 = atom_array.atom_name[atom2_index]
176
+
177
+ chain_id_1 = atom_array.chain_id[atom1_index]
178
+ chain_id_2 = atom_array.chain_id[atom2_index]
179
+
180
+ distance = pocket_feature[binder_idx, pocket_idx, 0]
181
+
182
+ pocket_info = {
183
+ "binder_chain_id": np.unique(chain_id_1).tolist(),
184
+ "binder_res_name": np.unique(res_name_1).tolist(),
185
+ "binder_atom_name": np.unique(atom_name_1).tolist(),
186
+ "pocket_chain_id": np.unique(chain_id_2).tolist(),
187
+ "pocket_res_name": np.unique(res_name_2).tolist(),
188
+ "pocket_atom_name": np.unique(atom_name_2).tolist(),
189
+ "distance": distance.unique().item(),
190
+ }
191
+ logger.info(f"loaded pocket info:{pocket_info}")
192
+
193
+ @staticmethod
194
+ def generate_from_json(
195
+ token_array: TokenArray,
196
+ atom_array: AtomArray,
197
+ sequences: list[dict[str, Any]],
198
+ constraint_param: dict,
199
+ ) -> tuple[dict[str, Any], TokenArray, AtomArray]:
200
+ feature_dict = {}
201
+
202
+ # build atom-level contact features
203
+ contact_inputs = [
204
+ ConstraintFeatureGenerator._canonicalize_contact_format(sequences, pair)
205
+ for pair in constraint_param.get("contact", [])
206
+ ]
207
+ atom_contact_inputs = list(
208
+ filter(lambda d: d["contact_type"] == "atom_contact", contact_inputs)
209
+ )
210
+ token_contact_inputs = list(
211
+ filter(lambda d: d["contact_type"] == "token_contact", contact_inputs)
212
+ )
213
+
214
+ atom_to_token_idx_dict = {}
215
+ for idx, token in enumerate(token_array.tokens):
216
+ for atom_idx in token.atom_indices:
217
+ atom_to_token_idx_dict[atom_idx] = idx
218
+
219
+ atom_contact_specifics = []
220
+ for i, pair in enumerate(atom_contact_inputs):
221
+ atom_mask1 = get_atom_mask_by_name(
222
+ atom_array=atom_array,
223
+ entity_id=pair["id1"][0],
224
+ copy_id=pair["id1"][1],
225
+ position=pair["id1"][2],
226
+ atom_name=pair["id1"][3],
227
+ )
228
+ atom_mask2 = get_atom_mask_by_name(
229
+ atom_array=atom_array,
230
+ entity_id=pair["id2"][0],
231
+ copy_id=pair["id2"][1],
232
+ position=pair["id2"][2],
233
+ atom_name=pair["id2"][3],
234
+ )
235
+ atom_list_1 = np.nonzero(atom_mask1)[0]
236
+ atom_list_2 = np.nonzero(atom_mask2)[0]
237
+ if np.size(atom_list_1) != 1 or np.size(atom_list_2) != 1:
238
+ logger.info(f"Atom contact {i} not found for the input")
239
+ continue
240
+ atom_contact_specifics.append(
241
+ (
242
+ atom_list_1.item(),
243
+ atom_list_2.item(),
244
+ pair["max_distance"],
245
+ pair["min_distance"],
246
+ )
247
+ )
248
+
249
+ token_array, atom_array, atom_contact_specifics, _, _ = (
250
+ ConstraintFeatureGenerator.expand_tokens(
251
+ token_array,
252
+ atom_array,
253
+ atom_contact_specifics,
254
+ atom_to_token_idx_dict,
255
+ None,
256
+ )
257
+ )
258
+ atom_contact_featurizer = ContactAtomFeaturizer(
259
+ token_array=token_array, atom_array=atom_array
260
+ )
261
+ feature_dict["contact_atom"], _ = (
262
+ atom_contact_featurizer.generate_spec_constraint(
263
+ atom_contact_specifics,
264
+ feature_type="continuous",
265
+ shape=(len(token_array), len(token_array), 2),
266
+ )
267
+ )
268
+
269
+ # build token-level contact
270
+ atom_to_token_idx_dict = {}
271
+ for idx, token in enumerate(token_array.tokens):
272
+ for atom_idx in token.atom_indices:
273
+ atom_to_token_idx_dict[atom_idx] = idx
274
+ atom_to_token_idx = np.array(
275
+ [atom_to_token_idx_dict[atom_idx] for atom_idx in range(len(atom_array))]
276
+ )
277
+ token_contact_specifics = []
278
+ for i, pair in enumerate(token_contact_inputs):
279
+ atom_mask1 = get_atom_mask_by_name(
280
+ atom_array=atom_array,
281
+ entity_id=pair["id1"][0],
282
+ copy_id=pair["id1"][1],
283
+ position=pair["id1"][2],
284
+ atom_name=pair["id1"][3],
285
+ )
286
+ atom_mask2 = get_atom_mask_by_name(
287
+ atom_array=atom_array,
288
+ entity_id=pair["id2"][0],
289
+ copy_id=pair["id2"][1],
290
+ position=pair["id2"][2],
291
+ atom_name=pair["id2"][3],
292
+ )
293
+ token_list_1 = atom_to_token_idx[atom_mask1]
294
+ token_list_2 = atom_to_token_idx[atom_mask2]
295
+ if np.size(token_list_1) == 0 or np.size(token_list_2) == 0:
296
+ logger.info(f"Contact {i} not found for the input")
297
+ continue
298
+ token_contact_specifics.append(
299
+ (token_list_1, token_list_2, pair["max_distance"], 0)
300
+ ) # default min_distance=0
301
+
302
+ contact_featurizer = ContactFeaturizer(
303
+ token_array=token_array, atom_array=atom_array
304
+ )
305
+ feature_dict["contact"], _ = contact_featurizer.generate_spec_constraint(
306
+ token_contact_specifics, feature_type="continuous"
307
+ )
308
+
309
+ # build pocket features
310
+ pocket_specifics = []
311
+ if pocket := constraint_param.get("pocket", {}):
312
+ distance = pocket["max_distance"]
313
+ binder = pocket["binder_chain"]
314
+
315
+ assert len(binder) == 2
316
+
317
+ atom_mask_binder = get_atom_mask_by_name(
318
+ atom_array=atom_array,
319
+ entity_id=binder[0],
320
+ copy_id=binder[1],
321
+ )
322
+
323
+ binder_asym_id = torch.tensor(
324
+ atom_array.asym_id_int[atom_mask_binder], dtype=torch.long
325
+ )
326
+ num_binder = binder_asym_id.unique().numel()
327
+ if num_binder == 0:
328
+ logger.info(f"Binder does not exist. {i},{num_binder}")
329
+ elif num_binder > 1:
330
+ logger.info(f"#Binders is more than 1. {i},{num_binder}")
331
+ else:
332
+ binder_token_list = atom_to_token_idx[atom_mask_binder]
333
+
334
+ for j, pocket_res in enumerate(pocket["contact_residues"]):
335
+ pocket_res = (
336
+ ConstraintFeatureGenerator._canonicalize_pocket_res_format(
337
+ binder, pocket_res
338
+ )
339
+ )
340
+
341
+ atom_mask_pocket = get_atom_mask_by_name(
342
+ atom_array=atom_array,
343
+ entity_id=pocket_res[0],
344
+ copy_id=pocket_res[1],
345
+ position=pocket_res[2],
346
+ )
347
+ pocket_token_list = atom_to_token_idx[atom_mask_pocket]
348
+
349
+ if np.size(pocket_token_list) == 0:
350
+ logger.info(f"Pocket not found: {i}:{j}")
351
+ continue
352
+
353
+ pocket_specifics.append(
354
+ (binder_token_list, pocket_token_list, distance)
355
+ )
356
+ logger.info(f"#pocket:{len(pocket_specifics)}")
357
+
358
+ pocket_featurizer = PocketFeaturizer(
359
+ token_array=token_array, atom_array=atom_array
360
+ )
361
+ feature_dict["pocket"], _ = pocket_featurizer.generate_spec_constraint(
362
+ pocket_specifics,
363
+ feature_type="continuous",
364
+ )
365
+
366
+ # build substructure features
367
+ substructure_specifics = {
368
+ "token_indices": [],
369
+ "token_coords": [],
370
+ }
371
+ if substructure := constraint_param.get("structure", {}):
372
+ # TODO parse substructure specifics
373
+ pass
374
+ substructure_featurizer = SubStructureFeaturizer(
375
+ token_array=token_array, atom_array=atom_array
376
+ )
377
+ feature_dict["substructure"] = substructure_featurizer.generate_spec_constraint(
378
+ substructure_specifics, feature_type="one_hot"
379
+ )
380
+
381
+ logger.info(
382
+ f"Loaded constraint feature: #atom contact:{len(atom_contact_specifics)} #contact:{len(token_contact_specifics)} #pocket:{len(pocket_specifics)}"
383
+ )
384
+ ConstraintFeatureGenerator._log_constraint_feature(
385
+ atom_array, token_array, feature_dict
386
+ )
387
+
388
+ return feature_dict, token_array, atom_array
389
+
390
+ # function set for training
391
+ def generate(
392
+ self,
393
+ atom_array: AtomArray,
394
+ token_array: TokenArray,
395
+ sample_indice: pd.core.series.Series,
396
+ pdb_indice: pd.core.series.Series,
397
+ msa_features: dict[str, np.ndarray],
398
+ max_entity_mol_id: int,
399
+ full_atom_array: AtomArray,
400
+ ) -> tuple[
401
+ TokenArray,
402
+ AtomArray,
403
+ dict[str, np.ndarray],
404
+ dict[str, torch.Tensor],
405
+ dict[str, Any],
406
+ torch.Tensor,
407
+ AtomArray,
408
+ ]:
409
+ """Generate all constraint features
410
+
411
+ Args:
412
+ idx: Data index
413
+ atom_array: Atom array data
414
+ token_array: Token array data
415
+ sample_indice: Sample index information
416
+ pdb_indice: PDB index information
417
+ msa_features: MSA features
418
+ max_entity_mol_id: Maximum entity mol id
419
+ full_atom_array: Full atom array data
420
+ Returns:
421
+ Dictionary of constraint features
422
+ """
423
+ # Setup constraint generator
424
+ constraint_generator = self._setup_constraint_generator(sample_indice)
425
+
426
+ # Get base features for constraint featurizer
427
+ features_dict = self._get_base_features(token_array, atom_array)
428
+
429
+ # Generate contact atom features
430
+ (_, contact_atom_featurizer, contact_pairs, tokens_w_atom_contact) = (
431
+ self._generate_contact_atom_features(
432
+ atom_array,
433
+ token_array,
434
+ constraint_generator,
435
+ features_dict,
436
+ pdb_indice,
437
+ )
438
+ )
439
+
440
+ # Expand token according to atom-contact pairs
441
+ if len(contact_pairs) > 0:
442
+ logger.info("Expanding tokens for contact atom constraint feature")
443
+ token_array, atom_array, contact_pairs, token_map, full_atom_array = (
444
+ ConstraintFeatureGenerator.expand_tokens(
445
+ token_array,
446
+ atom_array,
447
+ contact_pairs,
448
+ features_dict["atom_to_token_dict"],
449
+ full_atom_array,
450
+ )
451
+ )
452
+ features_dict = self._get_base_features(token_array, atom_array)
453
+ else:
454
+ token_map = {}
455
+ # make atom contact feature
456
+ contact_atom_constraint_feature, tokens_w_atom_contact = (
457
+ contact_atom_featurizer.generate_spec_constraint(
458
+ contact_pairs,
459
+ feature_type=self.constraint.get("contact_atom", {}).get(
460
+ "feature_type", "continuous"
461
+ ),
462
+ shape=(len(token_array), len(token_array), 2),
463
+ )
464
+ )
465
+
466
+ # Expand MSA features
467
+ if len(msa_features) > 0 and len(contact_pairs) > 0:
468
+ msa_features = self.expand_msa_features(msa_features, token_map)
469
+
470
+ # Generate pocket features
471
+ pocket_constraint_feature, _, tokens_w_pocket = self._generate_pocket_features(
472
+ atom_array,
473
+ token_array,
474
+ constraint_generator,
475
+ sample_indice,
476
+ )
477
+
478
+ # Generate contact features
479
+ contact_constraint_feature, _, _, tokens_w_contact = (
480
+ self._generate_contact_features(
481
+ atom_array,
482
+ token_array,
483
+ constraint_generator,
484
+ features_dict,
485
+ pdb_indice,
486
+ )
487
+ )
488
+
489
+ # Generate substructure features
490
+ (
491
+ substructure_constraint_feature,
492
+ substructure_featurizer,
493
+ tokens_w_substructure,
494
+ ) = self._generate_substructure_features(
495
+ atom_array,
496
+ token_array,
497
+ constraint_generator,
498
+ sample_indice,
499
+ )
500
+
501
+ # Combine features
502
+ constraint_feature = {
503
+ "contact": contact_constraint_feature,
504
+ "pocket": pocket_constraint_feature,
505
+ "contact_atom": contact_atom_constraint_feature,
506
+ "substructure": substructure_constraint_feature,
507
+ }
508
+
509
+ # change entity_mol_id in case of permutation of constraint pairs
510
+ featured_tokens = (
511
+ tokens_w_contact
512
+ | tokens_w_atom_contact
513
+ | tokens_w_pocket
514
+ | tokens_w_substructure
515
+ )
516
+
517
+ if max_entity_mol_id is not None:
518
+ atom_array, full_atom_array = self.change_entity_mol_id(
519
+ token_array,
520
+ atom_array,
521
+ max_entity_mol_id,
522
+ full_atom_array,
523
+ featured_tokens,
524
+ )
525
+
526
+ # Log feature statistics
527
+ feature_info = self._get_feature_statistics(
528
+ constraint_feature,
529
+ atom_array,
530
+ token_array,
531
+ features_dict,
532
+ contact_atom_featurizer,
533
+ substructure_featurizer,
534
+ )
535
+ log_constraint = self._log_feature_statistics(feature_info)
536
+ return (
537
+ token_array,
538
+ atom_array,
539
+ msa_features,
540
+ constraint_feature,
541
+ feature_info,
542
+ log_constraint,
543
+ full_atom_array,
544
+ )
545
+
546
+ @staticmethod
547
+ def expand_tokens(
548
+ token_array: TokenArray,
549
+ atom_array: AtomArray,
550
+ contact_pairs: list[tuple[int, int, float, float]],
551
+ atom_to_token: dict[int, int],
552
+ full_atom_array: AtomArray,
553
+ ) -> tuple[TokenArray, AtomArray, list[tuple[int, int, float, float]]]:
554
+ """
555
+ Expand selected tokens into atom-level tokens and update related arrays.
556
+
557
+ Args:
558
+ token_array: Original token array
559
+ atom_array: Original atom array
560
+ contact_pairs: Original contact pairs
561
+ atom_to_token: Atom to token mapping
562
+ full_atom_array: Full atom array
563
+ Returns:
564
+ Updated token array, atom array and transformed constraint pairs
565
+ """
566
+ # Update token array
567
+ tokens_to_expand = set()
568
+ for atom_i, atom_j, _, _ in contact_pairs:
569
+ tokens_to_expand.add(atom_to_token[atom_i])
570
+ tokens_to_expand.add(atom_to_token[atom_j])
571
+
572
+ if len(tokens_to_expand) == 0:
573
+ return token_array, atom_array, contact_pairs, {}, full_atom_array
574
+
575
+ new_tokens = []
576
+ # Maps old token idx to list of new token indices
577
+ token_map = {}
578
+ curr_token_idx = 0
579
+
580
+ for old_token_idx, token in enumerate(token_array):
581
+ if old_token_idx in tokens_to_expand:
582
+ # Check if token represents standard residue
583
+ centre_atom = atom_array[token.centre_atom_index]
584
+ if (
585
+ centre_atom.res_name in STD_RESIDUES
586
+ and centre_atom.mol_type != "ligand"
587
+ ):
588
+ # Expand token into atom-level tokens
589
+ atom_tokens = []
590
+ for atom_idx, atom_name in zip(
591
+ token.atom_indices, token.atom_names
592
+ ):
593
+ atom = atom_array[atom_idx]
594
+ atom_token = Token(ELEMS[atom.element])
595
+ atom_token.atom_indices = [atom_idx]
596
+ atom_token.atom_names = [atom_name]
597
+ atom_token.centre_atom_index = atom_idx
598
+ atom_tokens.append(atom_token)
599
+ new_tokens.extend(atom_tokens)
600
+ token_map[old_token_idx] = list(
601
+ range(curr_token_idx, curr_token_idx + len(atom_tokens))
602
+ )
603
+ curr_token_idx += len(atom_tokens)
604
+ else:
605
+ new_tokens.append(token)
606
+ token_map[old_token_idx] = [curr_token_idx]
607
+ curr_token_idx += 1
608
+ else:
609
+ new_tokens.append(token)
610
+ token_map[old_token_idx] = [curr_token_idx]
611
+ curr_token_idx += 1
612
+
613
+ updated_token_array = TokenArray(new_tokens)
614
+
615
+ # Create atom_idx to new_token_idx mapping for expanded tokens
616
+ atom_to_new_token = {}
617
+ for new_token_idx, token in enumerate(updated_token_array):
618
+ for atom_idx in token.atom_indices:
619
+ atom_to_new_token[atom_idx] = new_token_idx
620
+
621
+ # Update atom array annotations
622
+ atom_array.centre_atom_mask = np.zeros(len(atom_array), dtype=bool)
623
+ for token in updated_token_array:
624
+ atom_array.centre_atom_mask[token.centre_atom_index] = True
625
+
626
+ # Update tokatom_idx and distogram_rep_atom_mask
627
+ expanded_atoms = set()
628
+
629
+ for tokens in token_map.values():
630
+ if len(tokens) > 1: # Expanded tokens
631
+ for token_idx in tokens:
632
+ token = updated_token_array[token_idx]
633
+ expanded_atoms.update(token.atom_indices)
634
+
635
+ atom_array.tokatom_idx = np.array(
636
+ [
637
+ 0 if i in expanded_atoms else idx
638
+ for i, idx in enumerate(atom_array.tokatom_idx)
639
+ ]
640
+ )
641
+
642
+ atom_array.distogram_rep_atom_mask = np.array(
643
+ [
644
+ 1 if i in expanded_atoms else mask
645
+ for i, mask in enumerate(atom_array.distogram_rep_atom_mask)
646
+ ]
647
+ )
648
+ if len(expanded_atoms) > 0:
649
+ logger.info(f"Expanded atoms: {expanded_atoms}")
650
+
651
+ # Create mapping between atom_array and full_atom_array atoms
652
+ if full_atom_array is not None:
653
+ expanded_atom_keys = set()
654
+ for atom_idx in expanded_atoms:
655
+ atom = atom_array[atom_idx]
656
+ # Create unique key using chain_id, res_id and atom_name
657
+ atom_key = (atom.chain_id, atom.res_id, atom.atom_name)
658
+ expanded_atom_keys.add(atom_key)
659
+
660
+ # Update full_atom_array centre_atom_mask using the mapping
661
+ full_atom_array.centre_atom_mask = np.array(
662
+ [
663
+ (
664
+ 1
665
+ if (atom.chain_id, atom.res_id, atom.atom_name)
666
+ in expanded_atom_keys
667
+ else mask
668
+ )
669
+ for atom, mask in zip(
670
+ full_atom_array, full_atom_array.centre_atom_mask
671
+ )
672
+ ]
673
+ )
674
+ # Transform constraint pairs using atom_to_new_token mapping
675
+ transformed_pairs = []
676
+ for atom_i, atom_j, min_dist, max_dist in contact_pairs:
677
+ new_token_i = atom_to_new_token[atom_i]
678
+ new_token_j = atom_to_new_token[atom_j]
679
+ transformed_pairs.append((new_token_i, new_token_j, min_dist, max_dist))
680
+
681
+ return (
682
+ updated_token_array,
683
+ atom_array,
684
+ transformed_pairs,
685
+ token_map,
686
+ full_atom_array,
687
+ )
688
+
689
+ def expand_msa_features(
690
+ self,
691
+ msa_features: dict[str, np.ndarray],
692
+ token_map: dict[
693
+ int, list[int]
694
+ ], # Maps old token idx to list of new token indices
695
+ ) -> dict[str, np.ndarray]:
696
+ """
697
+ Expand MSA features for expanded tokens.
698
+
699
+ Args:
700
+ msa_features: Original MSA features
701
+ token_map: Mapping from old token indices to new token indices
702
+
703
+ Returns:
704
+ Updated MSA features with expanded tokens
705
+ """
706
+ # Calculate new number of tokens
707
+ num_new_tokens = max(max(new_idxs) for new_idxs in token_map.values()) + 1
708
+
709
+ old_indices = []
710
+ new_indices = []
711
+ for old_idx, new_idxs in token_map.items():
712
+ old_indices.extend([old_idx] * len(new_idxs))
713
+ new_indices.extend(new_idxs)
714
+ old_indices = np.array(old_indices, dtype=int)
715
+ new_indices = np.array(new_indices, dtype=int)
716
+
717
+ # For sequence-based features (msa, has_deletion, deletion_value)
718
+ for feat_name in ["msa", "has_deletion", "deletion_value"]:
719
+ if feat_name not in msa_features:
720
+ continue
721
+
722
+ feat = msa_features[feat_name]
723
+ num_seqs = feat.shape[0] # Number of sequences in MSA
724
+
725
+ # Create new feature array
726
+ new_feat = np.zeros((num_seqs, num_new_tokens), dtype=feat.dtype)
727
+
728
+ # Copy features according to token mapping
729
+ new_feat = np.zeros((num_seqs, num_new_tokens), dtype=feat.dtype)
730
+ new_feat[:, new_indices] = feat[:, old_indices]
731
+ msa_features[feat_name] = new_feat
732
+
733
+ # Handle deletion_mean (1D array)
734
+ if "deletion_mean" in msa_features:
735
+ feat = msa_features["deletion_mean"]
736
+ new_feat = np.zeros(num_new_tokens, dtype=feat.dtype)
737
+ new_feat[new_indices] = feat[old_indices]
738
+ msa_features["deletion_mean"] = new_feat
739
+
740
+ # Handle profile (2D array: tokens x channels)
741
+ if "profile" in msa_features:
742
+ feat = msa_features["profile"]
743
+ num_channels = feat.shape[1]
744
+ new_feat = np.zeros((num_new_tokens, num_channels), dtype=feat.dtype)
745
+ new_feat[new_indices, :] = feat[old_indices, :]
746
+ msa_features["profile"] = new_feat
747
+
748
+ return msa_features
749
+
750
+ def change_entity_mol_id(
751
+ self,
752
+ token_array: TokenArray,
753
+ atom_array: AtomArray,
754
+ max_entity_mol_id: int,
755
+ full_atom_array: AtomArray,
756
+ featured_tokens: set[int],
757
+ ) -> tuple[AtomArray, AtomArray]:
758
+ """Update entity_mol_id for atoms involved in constraints"""
759
+ if max_entity_mol_id is None or len(featured_tokens) == 0:
760
+ return atom_array, full_atom_array
761
+
762
+ # Get atom indices for all constrained tokens
763
+ constrained_atom_indices = set()
764
+ centre_atom_indices = token_array.get_annotation("centre_atom_index")
765
+
766
+ for token_idx in featured_tokens:
767
+ constrained_atom_indices.add(centre_atom_indices[token_idx])
768
+
769
+ # Get mol_ids for constrained atoms
770
+ constrained_mol_ids = set(atom_array.mol_id[list(constrained_atom_indices)])
771
+
772
+ # Create mapping for new entity_mol_ids
773
+ new_id = max_entity_mol_id + 1
774
+ id_mapping = {}
775
+ for old_id in constrained_mol_ids:
776
+ id_mapping[old_id] = new_id
777
+ new_id += 1
778
+
779
+ # Update entity_mol_ids in atom_array
780
+ for old_id, new_id in id_mapping.items():
781
+ mask = atom_array.mol_id == old_id
782
+ atom_array.entity_mol_id[mask] = new_id
783
+ mask = full_atom_array.mol_id == old_id
784
+ full_atom_array.entity_mol_id[mask] = new_id
785
+
786
+ return atom_array, full_atom_array
787
+
788
+ def _get_chain_interface_mask(
789
+ self,
790
+ pdb_indice: pd.core.series.Series,
791
+ atom_array_chain_id: np.array,
792
+ ) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
793
+ df = pdb_indice.copy()
794
+
795
+ def get_atom_mask(row):
796
+ chain_1_mask = atom_array_chain_id == row["chain_1_id"]
797
+ if row["type"] == "chain":
798
+ chain_2_mask = chain_1_mask
799
+ else:
800
+ chain_2_mask = atom_array_chain_id == row["chain_2_id"]
801
+ chain_1_mask = torch.tensor(chain_1_mask).bool()
802
+ chain_2_mask = torch.tensor(chain_2_mask).bool()
803
+ if chain_1_mask.sum() == 0 or chain_2_mask.sum() == 0:
804
+ return None, None
805
+ return chain_1_mask, chain_2_mask
806
+
807
+ df["chain_1_mask"], df["chain_2_mask"] = zip(*df.apply(get_atom_mask, axis=1))
808
+ df = df[df["chain_1_mask"].notna()] # drop NaN
809
+
810
+ chain_1_mask = df["chain_1_mask"].tolist() # [N_eval, N_atom]
811
+ chain_2_mask = df["chain_2_mask"].tolist() # [N_eval, N_atom]
812
+
813
+ return chain_1_mask, chain_2_mask
814
+
815
+ def _setup_constraint_generator(self, sample_indice: pd.core.series.Series) -> Any:
816
+ """Setup random generator with optional fixed seed"""
817
+ if not self.constraint.get("fix_seed", False):
818
+ return None
819
+
820
+ constraint_seed = int(
821
+ hashlib.sha256(sample_indice.pdb_id.encode("utf-8")).hexdigest(), 16
822
+ ) % (2**32)
823
+ logger.info(
824
+ f"[constraint_seed seed]: {constraint_seed} for pdb {sample_indice.pdb_id}"
825
+ )
826
+ return torch.Generator().manual_seed(constraint_seed)
827
+
828
+ def _get_base_features(
829
+ self, token_array: TokenArray, atom_array: AtomArray
830
+ ) -> dict[str, Any]:
831
+ """Get base features for constraint featurizer"""
832
+ features_dict = {}
833
+ centre_atoms_indices = token_array.get_annotation("centre_atom_index")
834
+ centre_atoms = atom_array[centre_atoms_indices]
835
+ features_dict["asym_id"] = torch.Tensor(centre_atoms.asym_id_int).long()
836
+
837
+ atom_to_token_idx_dict = {}
838
+ for idx, token in enumerate(token_array.tokens):
839
+ for atom_idx in token.atom_indices:
840
+ atom_to_token_idx_dict[atom_idx] = idx
841
+
842
+ # ensure the order of the atom_to_token_idx is the same as the atom_array
843
+ atom_to_token_idx = [
844
+ atom_to_token_idx_dict[atom_idx] for atom_idx in range(len(atom_array))
845
+ ]
846
+ features_dict["atom_to_token_idx"] = torch.Tensor(atom_to_token_idx).long()
847
+
848
+ features_dict["is_dna"] = torch.Tensor(atom_array.is_dna).bool()
849
+ features_dict["is_rna"] = torch.Tensor(atom_array.is_rna).bool()
850
+ features_dict["is_ligand"] = torch.Tensor(atom_array.is_ligand).bool()
851
+ features_dict["atom_to_token_dict"] = atom_to_token_idx_dict
852
+ return features_dict
853
+
854
+ def _generate_pocket_features(
855
+ self,
856
+ atom_array: AtomArray,
857
+ token_array: TokenArray,
858
+ generator: torch.Generator,
859
+ sample_indice: pd.core.series.Series,
860
+ ) -> tuple[torch.Tensor, Any, set[int]]:
861
+ """Generate pocket constraint features"""
862
+ # pocket feature
863
+ pocket_featurizer = PocketFeaturizer(
864
+ token_array=token_array,
865
+ atom_array=atom_array,
866
+ generator=generator,
867
+ )
868
+ use_random_pocket = np.random.rand() < self.constraint.get("pocket", {}).get(
869
+ "prob", 0
870
+ )
871
+ if use_random_pocket:
872
+ binder_asym_id = None
873
+ if self.constraint["pocket"].get("spec_binder_chain", False):
874
+ # find ligand binder
875
+ if sample_indice.mol_1_type == "ligand":
876
+ binder_chain_id = str(sample_indice.chain_1_id)
877
+ elif sample_indice.mol_2_type == "ligand":
878
+ binder_chain_id = str(sample_indice.chain_2_id)
879
+ # find antibody binder
880
+ elif (
881
+ f"{sample_indice.pdb_id.lower()}_{sample_indice.entity_1_id}"
882
+ in self.ab_top2_clusters
883
+ ):
884
+ binder_chain_id = str(sample_indice.chain_1_id)
885
+ elif (
886
+ f"{sample_indice.pdb_id.lower()}_{sample_indice.entity_2_id}"
887
+ in self.ab_top2_clusters
888
+ ):
889
+ binder_chain_id = str(sample_indice.chain_2_id)
890
+ else:
891
+ binder_chain_id = -1
892
+ logger.info(f"No binder found!")
893
+ binder_asym_id = atom_array.asym_id_int[
894
+ atom_array.chain_id == binder_chain_id
895
+ ]
896
+ num_unique = len(np.unique(binder_asym_id))
897
+ assert num_unique <= 1
898
+ logger.info(f"found binder chains: {num_unique}")
899
+ binder_asym_id = binder_asym_id[0] if num_unique == 1 else -1
900
+ pocket_constraint_feature, constrained_tokens = pocket_featurizer.generate(
901
+ size=self.constraint["pocket"].get("size", 0.0),
902
+ feature_type=self.constraint["pocket"].get(
903
+ "feature_type", "continuous"
904
+ ),
905
+ max_distance_range=self.constraint["pocket"].get(
906
+ "max_distance_range", {}
907
+ ),
908
+ sample_group=self.constraint["pocket"].get("group", "complex"),
909
+ distance_type=self.constraint["pocket"].get(
910
+ "distance_type", "center_atom"
911
+ ),
912
+ spec_binder_asym_id=binder_asym_id,
913
+ )
914
+ else:
915
+ pocket_constraint_feature, constrained_tokens = pocket_featurizer.generate(
916
+ {}, "continuous", size=0, distance_type=None
917
+ )
918
+ return pocket_constraint_feature, pocket_featurizer, constrained_tokens
919
+
920
+ def _generate_contact_features(
921
+ self,
922
+ atom_array: AtomArray,
923
+ token_array: TokenArray,
924
+ generator: torch.Generator,
925
+ features_dict: dict[str, Any],
926
+ pdb_indice: pd.core.series.Series,
927
+ ) -> tuple[torch.Tensor, Any, list[tuple[int, int]], set[int]]:
928
+ """Generate contact constraint features"""
929
+ contact_featurizer = ContactFeaturizer(
930
+ token_array=token_array,
931
+ atom_array=atom_array,
932
+ generator=generator,
933
+ )
934
+ # contact_feature_prob =
935
+ use_random_contact = np.random.rand() < self.constraint.get("contact", {}).get(
936
+ "prob", 0
937
+ )
938
+ selected_pairs = []
939
+ if use_random_contact:
940
+ # collecte all interfaces in cropped_tokens
941
+ interface_asym_pairs = []
942
+ if self.constraint["contact"].get("group", "complex") == "interface":
943
+
944
+ def _get_asym_id(atom_mask):
945
+ token_idx = features_dict["atom_to_token_idx"][
946
+ atom_mask.bool()
947
+ ].long()
948
+ asym_ids = features_dict["asym_id"][token_idx]
949
+ assert len(torch.unique(asym_ids)) == 1
950
+ return asym_ids[0].item()
951
+
952
+ chain_1_mask, chain_2_mask = self._get_chain_interface_mask(
953
+ pdb_indice=pdb_indice, atom_array_chain_id=atom_array.chain_id
954
+ )
955
+
956
+ interface_asym_pairs = [
957
+ (_get_asym_id(c1), _get_asym_id(c2))
958
+ for c1, c2 in zip(chain_1_mask, chain_2_mask)
959
+ if not torch.equal(c1, c2)
960
+ ]
961
+
962
+ if len(interface_asym_pairs) == 0:
963
+ logger.info("Interface for constraint feature is not found")
964
+
965
+ contact_constraint_feature, selected_pairs, constrained_tokens = (
966
+ contact_featurizer.generate(
967
+ size=self.constraint["contact"].get("size", 0.0),
968
+ chain_pairs=interface_asym_pairs,
969
+ feature_type=self.constraint["contact"].get(
970
+ "feature_type", "continuous"
971
+ ),
972
+ max_distance_range=self.constraint["contact"].get(
973
+ "max_distance_range", {}
974
+ ),
975
+ min_distance_threshold=self.constraint["contact"].get(
976
+ "min_distance", 0.0
977
+ ),
978
+ sample_group=self.constraint["contact"].get("group", "complex"),
979
+ distance_type=self.constraint["contact"].get(
980
+ "distance_type", "center_atom"
981
+ ),
982
+ )
983
+ )
984
+ else:
985
+ contact_constraint_feature, selected_pairs, constrained_tokens = (
986
+ contact_featurizer.generate(
987
+ {}, None, "continuous", size=0, distance_type=None
988
+ )
989
+ )
990
+ return (
991
+ contact_constraint_feature,
992
+ contact_featurizer,
993
+ selected_pairs,
994
+ constrained_tokens,
995
+ )
996
+
997
+ def _generate_contact_atom_features(
998
+ self,
999
+ atom_array: AtomArray,
1000
+ token_array: TokenArray,
1001
+ generator: torch.Generator,
1002
+ features_dict: dict[str, Any],
1003
+ pdb_indice: pd.core.series.Series,
1004
+ ) -> tuple[torch.Tensor, Any, list[tuple[int, int]], set[int]]:
1005
+ """Generate contact atom constraint features"""
1006
+ contact_atom_featurizer = ContactAtomFeaturizer(
1007
+ token_array=token_array, atom_array=atom_array, generator=generator
1008
+ )
1009
+ use_random_contact_atom = np.random.rand() < self.constraint.get(
1010
+ "contact_atom", {}
1011
+ ).get("prob", 0)
1012
+ selected_pairs = []
1013
+ if use_random_contact_atom:
1014
+ # collecte all interfaces in cropped_tokens
1015
+ interface_asym_pairs = []
1016
+ if self.constraint["contact_atom"].get("group", "complex") == "interface":
1017
+
1018
+ def _get_asym_id(atom_mask):
1019
+ token_idx = features_dict["atom_to_token_idx"][
1020
+ atom_mask.bool()
1021
+ ].long()
1022
+ asym_ids = features_dict["asym_id"][token_idx]
1023
+ assert len(torch.unique(asym_ids)) == 1
1024
+ return asym_ids[0].item()
1025
+
1026
+ chain_1_mask, chain_2_mask = self._get_chain_interface_mask(
1027
+ pdb_indice=pdb_indice, atom_array_chain_id=atom_array.chain_id
1028
+ )
1029
+
1030
+ interface_asym_pairs = [
1031
+ (_get_asym_id(c1), _get_asym_id(c2))
1032
+ for c1, c2 in zip(chain_1_mask, chain_2_mask)
1033
+ if not torch.equal(c1, c2)
1034
+ ]
1035
+
1036
+ if len(interface_asym_pairs) == 0:
1037
+ logger.info("Interface for constraint feature is not found")
1038
+
1039
+ contact_atom_constraint_feature, selected_pairs, constrained_tokens = (
1040
+ contact_atom_featurizer.generate(
1041
+ size=self.constraint["contact_atom"].get("size", 0.0),
1042
+ chain_pairs=interface_asym_pairs,
1043
+ feature_type=self.constraint["contact_atom"].get(
1044
+ "feature_type", "continuous"
1045
+ ),
1046
+ max_distance_range=self.constraint["contact_atom"].get(
1047
+ "max_distance_range", {}
1048
+ ),
1049
+ min_distance_threshold=self.constraint["contact_atom"].get(
1050
+ "min_distance", 0.0
1051
+ ),
1052
+ sample_group=self.constraint["contact_atom"].get(
1053
+ "group", "complex"
1054
+ ),
1055
+ distance_type=self.constraint["contact_atom"].get(
1056
+ "distance_type", "atom"
1057
+ ),
1058
+ )
1059
+ )
1060
+ else:
1061
+ contact_atom_constraint_feature, selected_pairs, constrained_tokens = (
1062
+ contact_atom_featurizer.generate(
1063
+ {}, None, "continuous", size=0, distance_type=None
1064
+ )
1065
+ )
1066
+ return (
1067
+ contact_atom_constraint_feature,
1068
+ contact_atom_featurizer,
1069
+ selected_pairs,
1070
+ constrained_tokens,
1071
+ )
1072
+
1073
+ def _generate_substructure_features(
1074
+ self,
1075
+ atom_array: AtomArray,
1076
+ token_array: TokenArray,
1077
+ generator: torch.Generator,
1078
+ sample_indice: pd.core.series.Series,
1079
+ ) -> tuple[torch.Tensor, Any, set[int]]:
1080
+ """Generate substructure constraint features"""
1081
+ substructure_featurizer = SubStructureFeaturizer(
1082
+ token_array=token_array,
1083
+ atom_array=atom_array,
1084
+ generator=generator,
1085
+ )
1086
+ use_random_substructure = np.random.rand() < self.constraint.get(
1087
+ "substructure", {}
1088
+ ).get("prob", 0)
1089
+ if use_random_substructure:
1090
+ spec_asym_id = None
1091
+ if self.constraint["substructure"].get("spec_asym_id", False):
1092
+ # find ligand chain
1093
+ if sample_indice.mol_1_type == "ligand":
1094
+ binder_chain_id = str(sample_indice.chain_1_id)
1095
+ target_chain_id = str(sample_indice.chain_2_id)
1096
+ elif sample_indice.mol_2_type == "ligand":
1097
+ binder_chain_id = str(sample_indice.chain_2_id)
1098
+ target_chain_id = str(sample_indice.chain_1_id)
1099
+ # find antibody chain
1100
+ elif (
1101
+ f"{sample_indice.pdb_id.lower()}_{sample_indice.entity_1_id}"
1102
+ in self.ab_top2_clusters
1103
+ ):
1104
+ binder_chain_id = str(sample_indice.chain_1_id)
1105
+ target_chain_id = str(sample_indice.chain_2_id)
1106
+ elif (
1107
+ f"{sample_indice.pdb_id.lower()}_{sample_indice.entity_2_id}"
1108
+ in self.ab_top2_clusters
1109
+ ):
1110
+ binder_chain_id = str(sample_indice.chain_2_id)
1111
+ target_chain_id = str(sample_indice.chain_1_id)
1112
+ else:
1113
+ target_chain_id = binder_chain_id = -1
1114
+ logger.info(f"No specific chain found!")
1115
+
1116
+ chain_choice = self.constraint["substructure"].get(
1117
+ "spec_chain_type", "binder"
1118
+ )
1119
+ if chain_choice == "binder":
1120
+ spec_chain_id = binder_chain_id
1121
+ elif chain_choice == "target":
1122
+ spec_chain_id = target_chain_id
1123
+ else:
1124
+ raise ValueError(
1125
+ f"Invalid spec_chain_type: {self.constraint['substructure'].get('spec_chain_type', 'binder')}"
1126
+ )
1127
+
1128
+ if spec_chain_id != -1:
1129
+ spec_asym_id = atom_array.asym_id_int[
1130
+ atom_array.chain_id == spec_chain_id
1131
+ ]
1132
+ num_unique = len(np.unique(spec_asym_id))
1133
+ assert num_unique <= 1
1134
+ logger.info(f"found {chain_choice} chain: {num_unique}")
1135
+ spec_asym_id = spec_asym_id[0] if num_unique == 1 else -1
1136
+
1137
+ substructure_constraint_feature, constrained_tokens = (
1138
+ substructure_featurizer.generate(
1139
+ mol_type_pairs=self.constraint["substructure"].get(
1140
+ "mol_type_pairs", {}
1141
+ ),
1142
+ feature_type=self.constraint["substructure"].get(
1143
+ "feature_type", "one_hot"
1144
+ ),
1145
+ size=self.constraint["substructure"].get("size", 0),
1146
+ ratios=self.constraint["substructure"].get(
1147
+ "ratios", {"full": [0.0, 0.5, 1.0], "partial": 0.3}
1148
+ ),
1149
+ coord_noise_scale=self.constraint["substructure"].get(
1150
+ "coord_noise_scale", 0.05
1151
+ ),
1152
+ spec_asym_id=spec_asym_id,
1153
+ )
1154
+ )
1155
+ else:
1156
+ substructure_constraint_feature, constrained_tokens = (
1157
+ substructure_featurizer.generate(
1158
+ mol_type_pairs={},
1159
+ feature_type="one_hot",
1160
+ size=0,
1161
+ ratios={"full": [0.0, 0.5, 1.0], "partial": 0.3},
1162
+ coord_noise_scale=0.05,
1163
+ spec_asym_id=None,
1164
+ )
1165
+ )
1166
+ return (
1167
+ substructure_constraint_feature,
1168
+ substructure_featurizer,
1169
+ constrained_tokens,
1170
+ )
1171
+
1172
+ def _get_feature_statistics(
1173
+ self,
1174
+ constraint_feature: dict[str, torch.Tensor],
1175
+ atom_array: AtomArray,
1176
+ token_array: TokenArray,
1177
+ features_dict: dict[str, Any],
1178
+ contact_atom_featurizer: Any,
1179
+ substructure_featurizer: Any,
1180
+ ) -> dict[str, Any]:
1181
+ """Log statistics about generated features"""
1182
+ token_idx_1, token_idx_2 = torch.nonzero(
1183
+ torch.triu(constraint_feature["contact"][..., 1]), as_tuple=True
1184
+ )
1185
+ asym_id_1 = features_dict["asym_id"][token_idx_1]
1186
+ asym_id_2 = features_dict["asym_id"][token_idx_2]
1187
+
1188
+ res_id_1 = atom_array.res_id[
1189
+ token_array[token_idx_1].get_annotation("centre_atom_index")
1190
+ ]
1191
+ res_id_2 = atom_array.res_id[
1192
+ token_array[token_idx_2].get_annotation("centre_atom_index")
1193
+ ]
1194
+ contact_distance = constraint_feature["contact"][token_idx_1, token_idx_2, 1]
1195
+
1196
+ # logging contact atom feature
1197
+ atom_idx_1, atom_idx_2 = torch.nonzero(
1198
+ torch.triu(constraint_feature["contact_atom"][..., 1]), as_tuple=True
1199
+ )
1200
+ contact_atom_real_distance = (
1201
+ contact_atom_featurizer.get_real_distance(atom_idx_1, atom_idx_2)
1202
+ if len(atom_idx_1) > 0
1203
+ else None
1204
+ )
1205
+ contact_atom_max_distance = constraint_feature["contact_atom"][
1206
+ atom_idx_1, atom_idx_2, 1
1207
+ ]
1208
+ contact_atom_min_distance = constraint_feature["contact_atom"][
1209
+ atom_idx_1, atom_idx_2, 0
1210
+ ]
1211
+ num_contact_atom = contact_atom_max_distance.shape[0]
1212
+
1213
+ # logging pocket feature
1214
+ binder_idx, pocket_idx = torch.nonzero(
1215
+ constraint_feature["pocket"].squeeze(-1), as_tuple=True
1216
+ )
1217
+ binder_idx = binder_idx.unique()
1218
+ pocket_idx = pocket_idx.unique()
1219
+ if binder_idx.numel() > 1:
1220
+ pocket_distance = constraint_feature["pocket"][binder_idx[0], pocket_idx, 0]
1221
+ else:
1222
+ pocket_distance = -1
1223
+
1224
+ # logging substructure feature
1225
+ sub_structure_info = substructure_featurizer.analyze_features(
1226
+ constraint_feature["substructure"]
1227
+ )
1228
+
1229
+ feature_info = {
1230
+ "contact_info": {
1231
+ "asym_id": torch.stack([asym_id_1, asym_id_2]),
1232
+ "token_id": torch.stack([token_idx_1, token_idx_2]),
1233
+ "res_id": torch.tensor(np.stack([res_id_1, res_id_2])),
1234
+ "distance": contact_distance,
1235
+ },
1236
+ "contact_atom_info": {
1237
+ "distance": contact_atom_real_distance,
1238
+ "max_distance": contact_atom_max_distance,
1239
+ "min_distance": contact_atom_min_distance,
1240
+ "num_contact_atom": num_contact_atom,
1241
+ },
1242
+ "pocket_info": {
1243
+ "binder_tokenid": binder_idx,
1244
+ "pocket_tokenid": pocket_idx,
1245
+ "distance": pocket_distance,
1246
+ },
1247
+ "substructure_info": sub_structure_info,
1248
+ }
1249
+
1250
+ return feature_info
1251
+
1252
+ def _log_feature_statistics(self, feature_info: dict[str, Any]) -> dict[str, Any]:
1253
+ log_constraint = {}
1254
+ if feature_info.get("pocket_info", None) is not None:
1255
+ binder_tokens, pocket_tokens, distance = feature_info[
1256
+ "pocket_info"
1257
+ ].values()
1258
+ pocket_msg = ";".join(
1259
+ [
1260
+ ",".join(map(str, binder_tokens.flatten().tolist())),
1261
+ ",".join(map(str, pocket_tokens.flatten().tolist())),
1262
+ ]
1263
+ )
1264
+ log_constraint["pocket_msg"] = pocket_msg
1265
+ log_constraint["pocket_N_binder"] = torch.tensor(
1266
+ feature_info["pocket_info"]["binder_tokenid"].shape[0]
1267
+ )
1268
+ log_constraint["pocket_N_pocket"] = torch.tensor(
1269
+ feature_info["pocket_info"]["pocket_tokenid"].shape[0]
1270
+ )
1271
+ if feature_info.get("contact_info", None) is not None:
1272
+ asym_id, token_id, res_id, distance = feature_info["contact_info"].values()
1273
+ N_contact = asym_id.shape[-1]
1274
+ contact_msg = ";".join(
1275
+ [
1276
+ ",".join(map(str, asym_id.flatten().tolist())),
1277
+ ",".join(map(str, token_id.flatten().tolist())),
1278
+ ",".join(map(str, distance.flatten().tolist())),
1279
+ ]
1280
+ )
1281
+ log_constraint["contact_N_pair"] = torch.tensor(N_contact)
1282
+ log_constraint["contact_msg"] = contact_msg
1283
+ if feature_info.get("contact_atom_info", None) is not None:
1284
+ distance, max_distance, min_distance, num_contact_atom = feature_info[
1285
+ "contact_atom_info"
1286
+ ].values()
1287
+ N_contact = num_contact_atom
1288
+ log_constraint["contact_atom_N_pair"] = torch.tensor(N_contact)
1289
+ log_constraint["contact_atom_distance"] = distance
1290
+ log_constraint["contact_atom_max_distance"] = max_distance
1291
+ log_constraint["contact_atom_min_distance"] = min_distance
1292
+
1293
+ if feature_info.get("substructure_info", None) is not None:
1294
+ log_constraint["substructure_active_tokens"] = torch.tensor(
1295
+ feature_info["substructure_info"]["num_active_tokens"]
1296
+ )
1297
+ log_constraint["substructure_active_token_ratio"] = torch.tensor(
1298
+ feature_info["substructure_info"]["active_token_ratio"]
1299
+ )
1300
+ log_constraint["substructure_bin0_cnt"] = torch.tensor(
1301
+ feature_info["substructure_info"]["distance_distribution"][
1302
+ "bin_0_count"
1303
+ ]
1304
+ )
1305
+ log_constraint["substructure_bin1_cnt"] = torch.tensor(
1306
+ feature_info["substructure_info"]["distance_distribution"][
1307
+ "bin_1_count"
1308
+ ]
1309
+ )
1310
+ log_constraint["substructure_bin2_cnt"] = torch.tensor(
1311
+ feature_info["substructure_info"]["distance_distribution"][
1312
+ "bin_2_count"
1313
+ ]
1314
+ )
1315
+ log_constraint["substructure_bin3_cnt"] = torch.tensor(
1316
+ feature_info["substructure_info"]["distance_distribution"][
1317
+ "bin_3_count"
1318
+ ]
1319
+ )
1320
+
1321
+ return log_constraint
1322
+
1323
+
1324
+ class ConstraintFeaturizer(object):
1325
+ def __init__(
1326
+ self,
1327
+ token_array: TokenArray,
1328
+ atom_array: AtomArray,
1329
+ pad_value: float = 0,
1330
+ generator=None,
1331
+ ):
1332
+ self.token_array = token_array
1333
+ self.atom_array = atom_array
1334
+ self.pad_value = pad_value
1335
+ self.generator = generator
1336
+ self._get_base_info()
1337
+
1338
+ @staticmethod
1339
+ def one_hot_encoder(feature: torch.Tensor, num_classes: int):
1340
+ # Create mask for padding values (-1)
1341
+ pad_mask = feature == -1
1342
+
1343
+ # Replace -1 with 0 temporarily for F.one_hot
1344
+ feature = torch.where(pad_mask, torch.zeros_like(feature), feature)
1345
+
1346
+ # Convert to one-hot
1347
+ one_hot = F.one_hot(feature, num_classes=num_classes).float()
1348
+
1349
+ # Zero out the one-hot vectors for padding positions
1350
+ one_hot[pad_mask] = 0.0
1351
+
1352
+ return one_hot
1353
+
1354
+ def encode(self, feature: torch.Tensor, feature_type: str, **kwargs):
1355
+ if feature_type == "one_hot":
1356
+ return ConstraintFeaturizer.one_hot_encoder(
1357
+ feature, num_classes=kwargs.get("num_classes", -1)
1358
+ )
1359
+ elif feature_type == "continuous":
1360
+ return feature
1361
+ else:
1362
+ raise RuntimeError(f"Invalid feature_type: {feature_type}")
1363
+
1364
+ def _get_base_info(self):
1365
+ token_centre_atom_indices = self.token_array.get_annotation("centre_atom_index")
1366
+ centre_atoms = self.atom_array[token_centre_atom_indices]
1367
+ self.asymid = torch.tensor(centre_atoms.asym_id_int, dtype=torch.long)
1368
+ self.is_ligand = torch.tensor(centre_atoms.is_ligand, dtype=torch.bool)
1369
+ self.is_protein = torch.tensor(centre_atoms.is_protein, dtype=torch.bool)
1370
+ self.entity_type_dict = {"P": self.is_protein, "L": self.is_ligand}
1371
+
1372
+ def _get_generation_basics(self, distance_type: str = "center_atom"):
1373
+ token_centre_atom_indices = self.token_array.get_annotation("centre_atom_index")
1374
+ centre_atoms = self.atom_array[token_centre_atom_indices]
1375
+
1376
+ # is_resolved mask
1377
+ self.token_resolved_mask = torch.tensor(
1378
+ centre_atoms.is_resolved, dtype=torch.bool
1379
+ )
1380
+ self.token_resolved_maskmat = (
1381
+ self.token_resolved_mask[:, None] * self.token_resolved_mask[None, :]
1382
+ )
1383
+
1384
+ # distance matrix
1385
+ if distance_type == "center_atom":
1386
+ # center atom distance
1387
+ self.token_distance = torch.tensor(
1388
+ cdist(centre_atoms.coord, centre_atoms.coord), dtype=torch.float64
1389
+ )
1390
+ elif distance_type == "any_atom":
1391
+ # any atom distance
1392
+ all_atom_resolved_mask = (
1393
+ self.atom_array.is_resolved[:, None]
1394
+ * self.atom_array.is_resolved[None, :]
1395
+ )
1396
+ all_atom_distance = cdist(self.atom_array.coord, self.atom_array.coord)
1397
+ all_atom_distance[~all_atom_resolved_mask] = np.inf
1398
+
1399
+ token_atoms_num = [
1400
+ len(_atoms)
1401
+ for _atoms in self.token_array.get_annotation("atom_indices")
1402
+ ]
1403
+ atom_token_num = np.repeat(
1404
+ np.arange(len(self.token_array)), token_atoms_num
1405
+ )
1406
+
1407
+ self.token_distance = torch.zeros(
1408
+ (len(centre_atoms), len(centre_atoms)), dtype=torch.float64
1409
+ )
1410
+ for i, j in np.ndindex(self.token_distance.shape):
1411
+ atom_pairs_mask = np.ix_(atom_token_num == i, atom_token_num == j)
1412
+ self.token_distance[i, j] = np.min(all_atom_distance[atom_pairs_mask])
1413
+ elif distance_type == "atom":
1414
+ raise ValueError(
1415
+ "Not implement in this class, please use ContactAtomFeaturizer"
1416
+ )
1417
+ else:
1418
+ raise ValueError(f"Not recognized distance_type: {distance_type}")
1419
+
1420
+ def generate(self):
1421
+ pass
1422
+
1423
+ def generate_spec_constraint(self):
1424
+ pass
1425
+
1426
+
1427
+ class ContactFeaturizer(ConstraintFeaturizer):
1428
+ def __init__(self, **kargs):
1429
+ super().__init__(**kargs)
1430
+
1431
+ def get_valid_contact_feature(
1432
+ self,
1433
+ valid_contact_type: str,
1434
+ max_distance_threshold: float,
1435
+ min_distance_threshold: float,
1436
+ ) -> torch.Tensor:
1437
+ """
1438
+ Find all valid pairs of entities that satisfy the given contact distance requirements.
1439
+
1440
+ Parameters:
1441
+ - valid_contact_type: A two-charactor type to represent entity pairs under consideration.
1442
+ e.g. PP, PL, P_
1443
+ - max_distance_threshold: The maximum allowable distance for a contact to be considered valid.
1444
+ - min_distance_threshold: The minimum allowable distance for a contact to be considered valid.
1445
+ Returns:
1446
+ - shape=(N_token, N_token), A matrix generate contact within the specified distance range.
1447
+ """
1448
+ # get valid contact type
1449
+ query_type, key_type = valid_contact_type
1450
+
1451
+ if key_type == "_":
1452
+ # intra chain contact
1453
+ assert query_type == "P", "only support intra-protein contact for now"
1454
+
1455
+ valid_chain_mask = self.asymid[:, None] == self.asymid[None, :]
1456
+
1457
+ # skip closest squence pairs
1458
+ n_neighbor = 20 # TODO: tune this parameter
1459
+ valid_chain_mask_right = torch.triu(valid_chain_mask, diagonal=n_neighbor)
1460
+ valid_chain_mask_left = torch.tril(valid_chain_mask, diagonal=-n_neighbor)
1461
+ valid_chain_mask = valid_chain_mask_right | valid_chain_mask_left
1462
+
1463
+ else:
1464
+ # inter chain contact
1465
+ assert (
1466
+ query_type in "PL" and key_type in "PL"
1467
+ ), f"[Error]contact type not support: {valid_contact_type}"
1468
+
1469
+ valid_type_mask = (
1470
+ self.entity_type_dict[query_type][None, :]
1471
+ & self.entity_type_dict[key_type][:, None]
1472
+ )
1473
+ valid_type_mask |= (
1474
+ self.entity_type_dict[key_type][None, :]
1475
+ & self.entity_type_dict[query_type][:, None]
1476
+ )
1477
+ # get different chain mask
1478
+ valid_chain_mask = self.asymid[:, None] != self.asymid[None, :]
1479
+ valid_chain_mask &= valid_type_mask
1480
+
1481
+ # get min & max distance threshold
1482
+ if min_distance_threshold == -1: # default false, hard to determine
1483
+ # random select a min threshold in [0, max_distance_threshold), but may lead to zero contact pairs
1484
+ min_distance_threshold = (
1485
+ torch.zeros(1).uniform_(to=max_distance_threshold).item()
1486
+ )
1487
+ valid_dist_mask = (self.token_distance <= max_distance_threshold) & (
1488
+ self.token_distance >= min_distance_threshold
1489
+ )
1490
+
1491
+ # make feature
1492
+ contact_valid_mask = (
1493
+ valid_chain_mask & self.token_resolved_maskmat & valid_dist_mask
1494
+ )
1495
+ return contact_valid_mask
1496
+
1497
+ def _get_constraint_size(self, group: str, size: Any) -> list[int]:
1498
+ """
1499
+ If size is not fixed, then we generate it randomly for each group
1500
+
1501
+ Args:
1502
+ - group: groups types
1503
+ """
1504
+ if group == "complex":
1505
+ k = 1
1506
+ elif group == "interface":
1507
+ N_asym = torch.unique(self.asymid).shape[0]
1508
+ k = (N_asym * N_asym - N_asym) // 2
1509
+ if size < 1 and size > 0:
1510
+ samples = torch.zeros(k).geometric_(size).int().tolist()
1511
+ return samples
1512
+ elif size >= 1 and isinstance(size, int):
1513
+ return [size] * k
1514
+ else:
1515
+ raise NotImplementedError
1516
+
1517
+ def _sample_contacts(
1518
+ self,
1519
+ contact_valid_mask: torch.Tensor,
1520
+ contact_distance_mat: torch.Tensor,
1521
+ size: list[int],
1522
+ sample_group: str,
1523
+ chain_pairs: list[tuple[int, int]],
1524
+ ) -> tuple[torch.Tensor, list[tuple[int, int, float, float]]]:
1525
+ """
1526
+ Randomly select contact from all valid contact pairs
1527
+
1528
+ Args:
1529
+ - contact_valid_mask: shape=(N_token, N_token), all valid contact pairs, bool
1530
+ - size: how many contacts to sample
1531
+ - sample_group: support sample group by 1. whole complex 2. interface
1532
+ """
1533
+ result_mat = torch.full(
1534
+ (contact_valid_mask.shape[0], contact_valid_mask.shape[1], 2),
1535
+ fill_value=self.pad_value,
1536
+ dtype=torch.float32,
1537
+ )
1538
+ selected_pairs = []
1539
+
1540
+ if not contact_valid_mask.any().item():
1541
+ return result_mat, selected_pairs
1542
+
1543
+ def _sample(valid_mask, cur_size):
1544
+ nonlocal selected_pairs
1545
+
1546
+ valid_indices = torch.nonzero(torch.triu(valid_mask))
1547
+ selected_indices = valid_indices[
1548
+ torch.randperm(valid_indices.shape[0], generator=self.generator)[
1549
+ :cur_size
1550
+ ]
1551
+ ]
1552
+
1553
+ selected_indices = tuple(zip(*selected_indices))
1554
+ if len(selected_indices) == 0:
1555
+ return
1556
+
1557
+ # Convert to pairs format
1558
+ for i, j in zip(*selected_indices):
1559
+ min_dist, max_dist = contact_distance_mat[i, j]
1560
+ selected_pairs.append(
1561
+ (i.item(), j.item(), min_dist.item(), max_dist.item())
1562
+ )
1563
+ selected_pairs.append(
1564
+ (j.item(), i.item(), min_dist.item(), max_dist.item())
1565
+ ) # Add symmetric pair
1566
+
1567
+ # add symmetry indices
1568
+ selected_indices = (
1569
+ selected_indices[0] + selected_indices[1],
1570
+ selected_indices[1] + selected_indices[0],
1571
+ )
1572
+ result_mat[selected_indices] = contact_distance_mat[selected_indices]
1573
+ return
1574
+
1575
+ # sample contacts by complex
1576
+ if sample_group == "complex":
1577
+ _sample(contact_valid_mask, size[0])
1578
+ # if group by interface, get all unique interfaces, and sample contact from each interface
1579
+ elif sample_group == "interface":
1580
+ # sample contacts from each interface iteratively
1581
+
1582
+ idx = 0
1583
+ for asym1, asym2 in chain_pairs:
1584
+ asym1_mask, asym2_mask = self.asymid == asym1, self.asymid == asym2
1585
+
1586
+ cur_interface_mask = asym1_mask[..., None, :] & asym2_mask[..., :, None]
1587
+ cur_interface_mask |= (
1588
+ asym1_mask[..., :, None] & asym2_mask[..., None, :]
1589
+ )
1590
+ valid_contacts_interface = contact_valid_mask & cur_interface_mask
1591
+ _sample(valid_contacts_interface, size[idx])
1592
+ idx += 1
1593
+ else:
1594
+ raise NotImplementedError
1595
+ return result_mat, selected_pairs
1596
+
1597
+ def generate(
1598
+ self,
1599
+ max_distance_range: dict[str, tuple[float, float]],
1600
+ sample_group: str,
1601
+ feature_type: str,
1602
+ size: Any,
1603
+ distance_type: str,
1604
+ min_distance_threshold: float = 0.0,
1605
+ chain_pairs: list[tuple[int, int]] = [],
1606
+ **kwargs,
1607
+ ) -> tuple[torch.Tensor, list[tuple[int, int, float, float]], set[int]]:
1608
+ """
1609
+ training & evaluation
1610
+ """
1611
+ constrained_tokens = set()
1612
+ if size == 0:
1613
+ return (
1614
+ self.encode(
1615
+ torch.full(
1616
+ (self.asymid.shape[0], self.asymid.shape[0], 2),
1617
+ fill_value=self.pad_value,
1618
+ dtype=torch.float32,
1619
+ ),
1620
+ feature_type=feature_type,
1621
+ ),
1622
+ [],
1623
+ constrained_tokens,
1624
+ )
1625
+ self._get_generation_basics(distance_type=distance_type)
1626
+
1627
+ # contact mask
1628
+ n_token = len(self.asymid)
1629
+ contact_valid_mask = torch.zeros((n_token, n_token), dtype=torch.bool)
1630
+ contact_distance_mat = torch.zeros((n_token, n_token, 2), dtype=torch.float32)
1631
+
1632
+ for contact_type, max_d_range in max_distance_range.items():
1633
+ # generate max_distance_mask for different contact type
1634
+ max_distance_threshold = torch.zeros(1).uniform_(*max_d_range).item()
1635
+
1636
+ # get all valid contact pairs
1637
+ contact_mask = self.get_valid_contact_feature(
1638
+ contact_type,
1639
+ max_distance_threshold=max_distance_threshold,
1640
+ min_distance_threshold=min_distance_threshold,
1641
+ )
1642
+ contact_valid_mask |= contact_mask
1643
+ contact_distance_mat[contact_mask] = torch.tensor(
1644
+ [0, max_distance_threshold], dtype=torch.float32
1645
+ )
1646
+
1647
+ # random select contact
1648
+ size = self._get_constraint_size(sample_group, size)
1649
+ sampled_contact_feature, selected_pairs = self._sample_contacts(
1650
+ contact_valid_mask,
1651
+ contact_distance_mat,
1652
+ size,
1653
+ sample_group,
1654
+ chain_pairs,
1655
+ )
1656
+
1657
+ # encode the feature
1658
+ contact_feature = self.encode(
1659
+ feature=sampled_contact_feature, feature_type=feature_type
1660
+ )
1661
+ # Track constrained tokens
1662
+ constrained_tokens = set()
1663
+ for token_i, token_j, _, _ in selected_pairs:
1664
+ constrained_tokens.add(token_i)
1665
+ constrained_tokens.add(token_j)
1666
+
1667
+ return contact_feature, selected_pairs, constrained_tokens
1668
+
1669
+ def generate_spec_constraint(
1670
+ self,
1671
+ contact_specifics: list[tuple[int, int, float, float]],
1672
+ feature_type: str,
1673
+ ) -> tuple[torch.Tensor, set[int]]:
1674
+ """
1675
+ parse constraint from user specification
1676
+ """
1677
+
1678
+ contact_feature = torch.full(
1679
+ (self.asymid.shape[0], self.asymid.shape[0], 2),
1680
+ fill_value=self.pad_value,
1681
+ dtype=torch.float32,
1682
+ )
1683
+ constrained_tokens = set()
1684
+
1685
+ for token_list_1, token_list_2, max_distance, min_distance in contact_specifics:
1686
+ token_id_1 = token_list_1[
1687
+ torch.randint(
1688
+ high=token_list_1.shape[0], size=(1,), generator=self.generator
1689
+ ).item()
1690
+ ]
1691
+ token_id_2 = token_list_2[
1692
+ torch.randint(
1693
+ high=token_list_2.shape[0], size=(1,), generator=self.generator
1694
+ ).item()
1695
+ ]
1696
+
1697
+ contact_feature[token_id_1, token_id_2, 1] = max_distance
1698
+ contact_feature[token_id_2, token_id_1, 1] = max_distance
1699
+ contact_feature[token_id_1, token_id_2, 0] = min_distance
1700
+ contact_feature[token_id_2, token_id_1, 0] = min_distance
1701
+
1702
+ constrained_tokens.add(token_id_1)
1703
+ constrained_tokens.add(token_id_2)
1704
+
1705
+ contact_feature = self.encode(
1706
+ feature=contact_feature, feature_type=feature_type
1707
+ )
1708
+ return contact_feature, constrained_tokens
1709
+
1710
+
1711
+ class PocketFeaturizer(ConstraintFeaturizer):
1712
+ def __init__(self, **kargs):
1713
+ super().__init__(**kargs)
1714
+
1715
+ def get_valid_pocket_feature(
1716
+ self,
1717
+ binder_pocket_type: str,
1718
+ max_distance_threshold: float,
1719
+ asym_list: list[int],
1720
+ ) -> torch.Tensor:
1721
+ """
1722
+ Parameters:
1723
+ - binder_pocket_type: PP, LP
1724
+ - max_distance_threshold
1725
+ - asym list
1726
+ Returns:
1727
+ - binder_pocket_valid_masks, shape=(N_asym, N_token), A matrix of all pocket tokens within the specified distance range.
1728
+ - max_distance_value, shape=(N_asym, N_token), A matrix of the max_distance_threshold to determine the pocket token for each binder chain
1729
+ """
1730
+ # get valid binder-pocket type, not symmetry
1731
+ query_type, key_type = binder_pocket_type
1732
+ valid_type_mask = (
1733
+ self.entity_type_dict[query_type][:, None]
1734
+ & self.entity_type_dict[key_type][None, :]
1735
+ )
1736
+
1737
+ # get different chain mask
1738
+ diff_chain_mask = self.asymid[:, None] != self.asymid[None, :]
1739
+
1740
+ # get distance mask
1741
+ # Note: To simplify the implementation, we only consider token-token distance, instead of any heavy atom distance for each residue for now.
1742
+ dist_mask = (
1743
+ (self.token_distance <= max_distance_threshold)
1744
+ & self.token_resolved_maskmat
1745
+ & diff_chain_mask
1746
+ & valid_type_mask
1747
+ )
1748
+
1749
+ # pocket mask
1750
+ n_token = len(self.asymid)
1751
+ n_asym = len(asym_list)
1752
+ binder_pocket_valid_masks = torch.zeros((n_asym, n_token), dtype=torch.bool)
1753
+
1754
+ for idx, asym_id in enumerate(asym_list):
1755
+ cur_chain_dist_mask = (
1756
+ self.asymid[:, None].expand(-1, self.asymid.shape[0]) == asym_id
1757
+ )
1758
+ if cur_chain_dist_mask[:, 0].sum() > 1: # ensure num of binder tokens > 1
1759
+ cur_chain_dist_mask = cur_chain_dist_mask & dist_mask
1760
+ cur_valid_mask = cur_chain_dist_mask.any(dim=0)
1761
+ binder_pocket_valid_masks[idx] = cur_valid_mask
1762
+ return binder_pocket_valid_masks
1763
+
1764
+ def _sample_pocket(
1765
+ self,
1766
+ binder_pocket_valid_masks: torch.Tensor,
1767
+ size: Any,
1768
+ asym_list: list[int],
1769
+ max_distance_value: torch.Tensor,
1770
+ spec_binder_asym_id: Any = None,
1771
+ **_, # do not use
1772
+ ) -> torch.Tensor:
1773
+ pocket_dist_feature = torch.full(
1774
+ (self.asymid.shape[0], self.asymid.shape[0]),
1775
+ fill_value=self.pad_value,
1776
+ dtype=torch.float32,
1777
+ )
1778
+ if spec_binder_asym_id is None:
1779
+ # random select 1 binder with valid pocket
1780
+ binders_with_valid_pocket = torch.nonzero(
1781
+ binder_pocket_valid_masks.any(-1), as_tuple=True
1782
+ )[0]
1783
+ if len(binders_with_valid_pocket) == 0:
1784
+ return pocket_dist_feature
1785
+ selected_binder = binders_with_valid_pocket[
1786
+ torch.randperm(
1787
+ binders_with_valid_pocket.shape[0], generator=self.generator
1788
+ )[0]
1789
+ ]
1790
+ else:
1791
+ selected_binder = torch.nonzero(
1792
+ asym_list == spec_binder_asym_id, as_tuple=True
1793
+ )[0].squeeze()
1794
+ selected_all_pocket_res = binder_pocket_valid_masks[selected_binder]
1795
+ binder_asym_id = (
1796
+ asym_list[selected_binder]
1797
+ if spec_binder_asym_id is None
1798
+ else spec_binder_asym_id
1799
+ )
1800
+
1801
+ # random select k residues within the selected pocket
1802
+ selected_pocket_res_mask = torch.zeros(self.asymid.shape[0], dtype=torch.bool)
1803
+ valid_pockets = torch.nonzero(selected_all_pocket_res, as_tuple=True)[0]
1804
+ selected_pocket_res_indices = torch.randperm(
1805
+ valid_pockets.shape[0], generator=self.generator
1806
+ )[:size]
1807
+ selected_pocket_res_mask[valid_pockets[selected_pocket_res_indices]] = True
1808
+
1809
+ binder_pocket_mask_mat = (self.asymid == binder_asym_id)[
1810
+ :, None
1811
+ ] * selected_pocket_res_mask[None, :]
1812
+
1813
+ # set distance threshold to selected pocket
1814
+ # [binder_mask, pocket_mask]
1815
+ binder_dist_mat = max_distance_value[selected_binder][selected_pocket_res_mask][
1816
+ None, :
1817
+ ].expand((self.asymid == binder_asym_id).sum(), -1)
1818
+
1819
+ pocket_dist_feature[binder_pocket_mask_mat] = binder_dist_mat.reshape(-1)
1820
+
1821
+ return pocket_dist_feature
1822
+
1823
+ def generate(
1824
+ self,
1825
+ max_distance_range: dict[str, tuple[float, float]],
1826
+ feature_type: str,
1827
+ size: Any,
1828
+ distance_type: Any,
1829
+ spec_binder_asym_id: int = None,
1830
+ **_, # do not use
1831
+ ) -> tuple[torch.Tensor, set[int]]:
1832
+ """
1833
+ trainint & evaluation
1834
+ """
1835
+ constrained_tokens = set()
1836
+ if size == 0 or spec_binder_asym_id == -1:
1837
+ pocket_dist_feature = self.encode(
1838
+ torch.full(
1839
+ (self.asymid.shape[0], self.asymid.shape[0], 1),
1840
+ fill_value=self.pad_value,
1841
+ dtype=torch.float32,
1842
+ ),
1843
+ feature_type=feature_type,
1844
+ ) # [..., N_token, 1]
1845
+
1846
+ return pocket_dist_feature, constrained_tokens
1847
+ self._get_generation_basics(distance_type=distance_type)
1848
+
1849
+ # get all binder-pocket pairs & masks
1850
+ n_token = len(self.asymid)
1851
+ asym_list = torch.unique(self.asymid, sorted=True)
1852
+ n_asym = len(asym_list)
1853
+ binder_pocket_valid_masks = torch.zeros((n_asym, n_token), dtype=torch.bool)
1854
+ max_distance_value = torch.zeros((n_asym, n_token), dtype=torch.float32)
1855
+ for binder_pocket_type, max_d_range in max_distance_range.items():
1856
+ # generate max_distance_mask for different binder_pocket type
1857
+ max_distance_threshold = torch.zeros(1).uniform_(*max_d_range).item()
1858
+
1859
+ # get all valid binder-pocket pairs
1860
+ valid_pocket_token_mask = self.get_valid_pocket_feature(
1861
+ binder_pocket_type,
1862
+ max_distance_threshold=max_distance_threshold,
1863
+ asym_list=asym_list,
1864
+ )
1865
+ binder_pocket_valid_masks |= valid_pocket_token_mask
1866
+ max_distance_value[valid_pocket_token_mask] = max_distance_threshold
1867
+
1868
+ # random select k residues from pocket, only consider one binder
1869
+ size = self._get_constraint_size(size)
1870
+ sampled_pocket_feature = self._sample_pocket(
1871
+ binder_pocket_valid_masks,
1872
+ size,
1873
+ asym_list,
1874
+ max_distance_value,
1875
+ spec_binder_asym_id,
1876
+ ).unsqueeze(-1)
1877
+
1878
+ # encode the feature
1879
+ pocket_dist_feature = self.encode(
1880
+ feature=sampled_pocket_feature, feature_type=feature_type
1881
+ )
1882
+ # Track constrained tokens
1883
+ nonzero_indices = torch.nonzero(pocket_dist_feature)
1884
+ for i, j, _ in nonzero_indices:
1885
+ constrained_tokens.add(i.item())
1886
+ constrained_tokens.add(j.item())
1887
+
1888
+ return pocket_dist_feature, constrained_tokens
1889
+
1890
+ def generate_spec_constraint(
1891
+ self, pocket_specifics, feature_type: str
1892
+ ) -> tuple[torch.Tensor, set[int]]:
1893
+ """
1894
+ parse constraint from user specification
1895
+ """
1896
+ pocket_dist_mat = torch.full(
1897
+ (self.asymid.shape[0], self.asymid.shape[0], 1),
1898
+ fill_value=self.pad_value,
1899
+ dtype=torch.float32,
1900
+ )
1901
+
1902
+ for binder_token_list, pocket_token_list, max_distance in pocket_specifics:
1903
+ pocket_token_id = pocket_token_list[
1904
+ torch.randint(
1905
+ high=pocket_token_list.shape[0], size=(1,), generator=self.generator
1906
+ ).item()
1907
+ ]
1908
+
1909
+ binder_token_idx = torch.tensor(binder_token_list)[:, None]
1910
+ pocket_dist_mat[binder_token_idx, pocket_token_id, 0] = max_distance
1911
+
1912
+ pocket_dist_feature = self.encode(
1913
+ feature=pocket_dist_mat, feature_type=feature_type
1914
+ )
1915
+
1916
+ constrained_tokens = set()
1917
+ nonzero_indices = torch.nonzero(pocket_dist_feature)
1918
+ for binder_token_id, pocket_token_id, _ in nonzero_indices:
1919
+ constrained_tokens.add(binder_token_id)
1920
+ constrained_tokens.add(pocket_token_id)
1921
+ return pocket_dist_feature, constrained_tokens
1922
+
1923
+ def _get_constraint_size(self, size) -> list[int]:
1924
+ """
1925
+ If size is not fixed, then we generate it randomly for each group
1926
+ """
1927
+ if size < 1 and size > 0:
1928
+ # TODO: to be determined!
1929
+ samples = torch.zeros(1).geometric_(size).int().tolist()[0]
1930
+ return samples
1931
+ elif size >= 1 and isinstance(size, int):
1932
+ return size
1933
+ else:
1934
+ raise NotImplementedError
1935
+
1936
+
1937
+ class ContactAtomFeaturizer(ContactFeaturizer):
1938
+ def __init__(
1939
+ self,
1940
+ token_array: TokenArray,
1941
+ atom_array: AtomArray,
1942
+ pad_value: float = 0,
1943
+ generator=None,
1944
+ ):
1945
+ self.token_array = token_array
1946
+ self.atom_array = atom_array
1947
+ self.pad_value = pad_value
1948
+ self.generator = generator
1949
+ self._get_base_info()
1950
+
1951
+ def _get_base_info(self):
1952
+ self.asymid = torch.tensor(self.atom_array.asym_id_int, dtype=torch.long)
1953
+ self.is_ligand = torch.tensor(self.atom_array.is_ligand, dtype=torch.bool)
1954
+ self.is_protein = torch.tensor(self.atom_array.is_protein, dtype=torch.bool)
1955
+ self.entity_type_dict = {"P": self.is_protein, "L": self.is_ligand}
1956
+
1957
+ def _get_generation_basics(self, distance_type="atom"):
1958
+ # is_resolved mask
1959
+ self.token_resolved_mask = torch.tensor(
1960
+ self.atom_array.is_resolved, dtype=torch.bool
1961
+ )
1962
+ self.token_resolved_maskmat = (
1963
+ self.token_resolved_mask[:, None] * self.token_resolved_mask[None, :]
1964
+ )
1965
+
1966
+ # distance matrix
1967
+ self.token_distance = torch.tensor(
1968
+ cdist(self.atom_array.coord, self.atom_array.coord), dtype=torch.float64
1969
+ )
1970
+
1971
+ def generate_spec_constraint(
1972
+ self,
1973
+ contact_specifics: list[tuple[int, int, float, float]],
1974
+ feature_type: str,
1975
+ shape: tuple[int, int, int],
1976
+ ) -> tuple[torch.Tensor, set[int]]:
1977
+ """
1978
+ parse constraint from user specification
1979
+ """
1980
+
1981
+ contact_feature = torch.full(
1982
+ shape, fill_value=self.pad_value, dtype=torch.float32
1983
+ )
1984
+ for token_id_1, token_id_2, max_distance, min_distance in contact_specifics:
1985
+ contact_feature[token_id_1, token_id_2, 1] = max_distance
1986
+ contact_feature[token_id_2, token_id_1, 1] = max_distance
1987
+ contact_feature[token_id_1, token_id_2, 0] = min_distance
1988
+ contact_feature[token_id_2, token_id_1, 0] = min_distance
1989
+
1990
+ contact_feature = self.encode(
1991
+ feature=contact_feature, feature_type=feature_type
1992
+ )
1993
+ constrained_tokens = set()
1994
+ for token_id_1, token_id_2, _, _ in contact_specifics:
1995
+ constrained_tokens.add(token_id_1)
1996
+ constrained_tokens.add(token_id_2)
1997
+ return contact_feature, constrained_tokens
1998
+
1999
+ def get_real_distance(self, atom_idx_1: int, atom_idx_2: int) -> float:
2000
+ return self.token_distance[atom_idx_1, atom_idx_2]
2001
+
2002
+
2003
+ class SubStructureFeaturizer(ConstraintFeaturizer):
2004
+ def __init__(self, **kargs):
2005
+ super().__init__(**kargs)
2006
+ # Default distance bins
2007
+ self.distance_bins = torch.tensor([0, 4, 8, 16, torch.inf])
2008
+
2009
+ def _add_coordinate_noise(self, coord_noise_scale: float = 0.05) -> torch.Tensor:
2010
+ """Add Gaussian noise to coordinates"""
2011
+ # Convert coordinates to tensor first
2012
+ coords = torch.tensor(self.atom_array.coord)
2013
+ if coord_noise_scale <= 0:
2014
+ return coords
2015
+ noisy_coords = (
2016
+ coords
2017
+ + torch.randn(
2018
+ coords.shape,
2019
+ generator=self.generator,
2020
+ )
2021
+ * coord_noise_scale
2022
+ )
2023
+
2024
+ return noisy_coords
2025
+
2026
+ def _get_distance_feature(
2027
+ self,
2028
+ selected_token_mask: torch.Tensor,
2029
+ coord_noise_scale: float = 0.05,
2030
+ feature_type: str = "one_hot",
2031
+ ) -> torch.Tensor:
2032
+ """
2033
+ Encode pairwise distances between selected tokens into binned one-hot features
2034
+
2035
+ Parameters:
2036
+ - selected_token_mask: Boolean mask of selected tokens
2037
+ - coord_noise_scale: Scale of Gaussian noise to add to coordinates
2038
+ - feature_type: "one_hot" or "continuous"
2039
+
2040
+ Returns:
2041
+ - distance_feature: For one_hot: [..., N_token, N_token] tensor with bin indices
2042
+ For continuous: [..., N_token, N_token] tensor with distance values
2043
+ """
2044
+ n_tokens = len(self.asymid)
2045
+
2046
+ # Initialize output tensor
2047
+ distance_feature = torch.full(
2048
+ (n_tokens, n_tokens),
2049
+ fill_value=-1 if feature_type == "one_hot" else self.pad_value,
2050
+ dtype=torch.long if feature_type == "one_hot" else torch.float32,
2051
+ )
2052
+
2053
+ # Get selected token indices
2054
+ selected_tokens = torch.nonzero(selected_token_mask).squeeze(-1)
2055
+ if len(selected_tokens) <= 1:
2056
+ return distance_feature
2057
+
2058
+ # Add noise to coordinates and calculate distances
2059
+ noisy_coords = self._add_coordinate_noise(coord_noise_scale)
2060
+
2061
+ # Get token center atom indices
2062
+ token_centre_atom_indices = torch.tensor(
2063
+ self.token_array.get_annotation("centre_atom_index"), dtype=torch.long
2064
+ )
2065
+ selected_indices = token_centre_atom_indices[selected_tokens.long()]
2066
+ selected_coords = noisy_coords[selected_indices]
2067
+
2068
+ # Calculate pairwise distances between selected tokens
2069
+ pairwise_distances = torch.cdist(selected_coords, selected_coords)
2070
+
2071
+ if feature_type == "one_hot":
2072
+ # Digitize distances into bins
2073
+ binned_distances = (
2074
+ torch.bucketize(pairwise_distances, self.distance_bins, right=True) - 1
2075
+ )
2076
+
2077
+ # Create mask for valid bins
2078
+ valid_bins = binned_distances > 0
2079
+
2080
+ # Create indices for the full matrix
2081
+ rows = selected_tokens.repeat_interleave(len(selected_tokens))
2082
+ cols = selected_tokens.repeat(len(selected_tokens))
2083
+
2084
+ # Get valid indices and their corresponding bin values
2085
+ valid_mask = valid_bins.flatten()
2086
+ valid_rows = rows[valid_mask]
2087
+ valid_cols = cols[valid_mask]
2088
+ valid_bins = binned_distances.flatten()[valid_mask]
2089
+
2090
+ # Fill the distance feature matrix
2091
+ distance_feature[valid_rows, valid_cols] = valid_bins
2092
+ distance_feature[valid_cols, valid_rows] = valid_bins # symmetric
2093
+
2094
+ else: # continuous
2095
+ # Create indices for the full matrix
2096
+ rows = selected_tokens.repeat_interleave(len(selected_tokens))
2097
+ cols = selected_tokens.repeat(len(selected_tokens))
2098
+
2099
+ # Fill the distance feature matrix
2100
+ distance_feature[rows, cols] = pairwise_distances.flatten()
2101
+ distance_feature[cols, rows] = pairwise_distances.flatten() # symmetric
2102
+
2103
+ return distance_feature
2104
+
2105
+ def get_valid_substructure_feature(
2106
+ self, mol_type_pairs: dict[str, float]
2107
+ ) -> torch.Tensor:
2108
+ """
2109
+ Find valid chains that form interfaces based on mol_type_pairs
2110
+
2111
+ Parameters:
2112
+ - mol_type_pairs: Dict of type pairs (e.g. {'PP': threshold, 'LP': threshold})
2113
+ First type in pair is the one to be selected as substructure, threshold is the distance threshold to determine the interface
2114
+ Returns:
2115
+ - valid_asym_ids: List of valid asym_ids that can form interfaces
2116
+ """
2117
+ valid_asym_ids = []
2118
+
2119
+ for type_pair, threshold in mol_type_pairs.items():
2120
+ # Parse type pair (e.g. 'PP' -> ('P','P'))
2121
+ query_type, key_type = type_pair
2122
+
2123
+ # Get type masks
2124
+ valid_type_mask = (
2125
+ self.entity_type_dict[query_type][:, None]
2126
+ & self.entity_type_dict[key_type][None, :]
2127
+ )
2128
+
2129
+ # Get different chain mask
2130
+ diff_chain_mask = self.asymid[:, None] != self.asymid[None, :]
2131
+
2132
+ # Get distance mask
2133
+ dist_mask = (
2134
+ (self.token_distance <= threshold)
2135
+ & self.token_resolved_maskmat
2136
+ & diff_chain_mask
2137
+ & valid_type_mask
2138
+ )
2139
+
2140
+ # Find chains that form interfaces
2141
+ for asym_id in torch.unique(self.asymid):
2142
+ # Only consider chains of query_type
2143
+ if not self.entity_type_dict[query_type][self.asymid == asym_id].any():
2144
+ continue
2145
+
2146
+ # Check if this chain forms interface with any chain of key_type
2147
+ cur_chain_mask = self.asymid == asym_id
2148
+ other_chains_mask = ~cur_chain_mask
2149
+
2150
+ has_interface = (dist_mask[cur_chain_mask][:, other_chains_mask]).any()
2151
+
2152
+ if has_interface:
2153
+ valid_asym_ids.append(asym_id)
2154
+
2155
+ return torch.tensor(valid_asym_ids)
2156
+
2157
+ def _get_constraint_size(self, size: Any) -> int:
2158
+ """
2159
+ If size is not fixed, then we generate it randomly
2160
+
2161
+ Args:
2162
+ - size: If >=1, randomly select chains; if <1, select size chains
2163
+ Returns:
2164
+ - size: Number of chains/proportion of tokens to select
2165
+ """
2166
+ if size < 1 and size > 0:
2167
+ # For size < 1, use as proportion directly
2168
+ return torch.rand(1).geometric_(size, generator=self.generator).int().item()
2169
+ elif size >= 1 and isinstance(size, (int, float)):
2170
+ # For size >= 1, return the size directly
2171
+ return int(size)
2172
+ else:
2173
+ raise NotImplementedError(f"Invalid size: {size}")
2174
+
2175
+ def _sample_substructure(
2176
+ self,
2177
+ valid_asym_ids: torch.Tensor,
2178
+ size: Any,
2179
+ ratios: dict[str, list[float]],
2180
+ spec_asym_id: Any = None,
2181
+ ) -> torch.Tensor:
2182
+ """
2183
+ Sample substructure based on size and ratios
2184
+
2185
+ Parameters:
2186
+ - valid_asym_ids: List of valid asym_ids
2187
+ - size: Total number of chains to select
2188
+ - ratios: Dict containing:
2189
+ - full: List of possible proportions for full chain selection
2190
+ - partial: Proportion of tokens to select for partial chains [0,1]
2191
+ - spec_asym_id: If provided, select from this specific chain
2192
+ """
2193
+ selected_token_mask = torch.zeros(len(self.asymid), dtype=torch.bool)
2194
+
2195
+ if len(valid_asym_ids) == 0:
2196
+ return selected_token_mask
2197
+
2198
+ # Handle spec_asym_id case
2199
+ if spec_asym_id is not None:
2200
+ if spec_asym_id not in valid_asym_ids:
2201
+ return selected_token_mask
2202
+ # Use partial_ratio for spec_asym_id
2203
+ chain_mask = (self.asymid == spec_asym_id) & self.token_resolved_mask
2204
+ chain_tokens = torch.nonzero(chain_mask).squeeze()
2205
+ if len(chain_tokens) == 0: # Skip if no resolved tokens
2206
+ return selected_token_mask
2207
+ num_tokens = max(1, int(len(chain_tokens) * ratios["partial"]))
2208
+ selected_tokens = chain_tokens[
2209
+ torch.randperm(len(chain_tokens), generator=self.generator)[:num_tokens]
2210
+ ]
2211
+ selected_token_mask[selected_tokens] = True
2212
+ return selected_token_mask
2213
+
2214
+ # Regular case: sample based on size and ratios
2215
+ if size == 0:
2216
+ return selected_token_mask
2217
+
2218
+ # Randomly select full chain ratio from the list
2219
+ full_ratio_idx = torch.randint(
2220
+ len(ratios["full"]), (1,), generator=self.generator
2221
+ ).item()
2222
+ full_ratio = ratios["full"][full_ratio_idx]
2223
+
2224
+ # Calculate number of chains for full and partial selection
2225
+ num_full_chains = min(int(size * full_ratio), len(valid_asym_ids))
2226
+ num_partial_chains = min(
2227
+ size - num_full_chains, len(valid_asym_ids) - num_full_chains
2228
+ )
2229
+
2230
+ # Randomly shuffle and split valid_asym_ids
2231
+ shuffled_indices = torch.randperm(len(valid_asym_ids), generator=self.generator)
2232
+ full_chain_ids = valid_asym_ids[shuffled_indices[:num_full_chains]]
2233
+ partial_chain_ids = valid_asym_ids[
2234
+ shuffled_indices[num_full_chains : num_full_chains + num_partial_chains]
2235
+ ]
2236
+
2237
+ # Select full chains
2238
+ for asym_id in full_chain_ids:
2239
+ chain_mask = (self.asymid == asym_id) & self.token_resolved_mask
2240
+ selected_token_mask |= chain_mask
2241
+ # Select partial chains
2242
+ for asym_id in partial_chain_ids:
2243
+ chain_mask = (self.asymid == asym_id) & self.token_resolved_mask
2244
+ chain_tokens = torch.nonzero(chain_mask).squeeze(-1)
2245
+ if len(chain_tokens) == 0: # Skip if no resolved tokens
2246
+ continue
2247
+ num_tokens = max(
2248
+ 1,
2249
+ int(len(chain_tokens) * torch.rand(1, generator=self.generator).item()),
2250
+ )
2251
+ selected_tokens = chain_tokens[
2252
+ torch.randperm(len(chain_tokens), generator=self.generator)[:num_tokens]
2253
+ ]
2254
+ selected_token_mask[selected_tokens] = True
2255
+
2256
+ return selected_token_mask
2257
+
2258
+ def generate(
2259
+ self,
2260
+ mol_type_pairs: dict[str, float],
2261
+ feature_type: str,
2262
+ size: Any,
2263
+ ratios: dict[str, list[float]],
2264
+ coord_noise_scale: float,
2265
+ spec_asym_id: int = None,
2266
+ ) -> tuple[torch.Tensor, set[int]]:
2267
+ """
2268
+ Generate substructure features
2269
+
2270
+ Parameters:
2271
+ - mol_type_pairs: Dict of type pairs and their distance thresholds
2272
+ - feature_type: Type of feature encoding
2273
+ - size: Number of chains to select
2274
+ - ratios: Dict containing:
2275
+ - full: List of possible proportions for full chain selection
2276
+ - partial: Proportion of tokens to select for partial chains [0,1]
2277
+ - coord_noise_scale: Scale of Gaussian noise to add to coordinates
2278
+ - spec_asym_id: Specific chain to select from
2279
+ """
2280
+ constrained_tokens = set()
2281
+ if size == 0 or spec_asym_id == -1:
2282
+ distance_feature = torch.full(
2283
+ (self.asymid.shape[0], self.asymid.shape[0]),
2284
+ fill_value=-1 if feature_type == "one_hot" else self.pad_value,
2285
+ dtype=torch.long if feature_type == "one_hot" else torch.float32,
2286
+ )
2287
+ return (
2288
+ self.encode(
2289
+ feature=distance_feature,
2290
+ feature_type=feature_type,
2291
+ num_classes=len(self.distance_bins) - 1,
2292
+ ),
2293
+ constrained_tokens,
2294
+ )
2295
+
2296
+ self._get_generation_basics()
2297
+
2298
+ # Get valid asym_ids that form interfaces
2299
+ valid_asym_ids = self.get_valid_substructure_feature(mol_type_pairs)
2300
+
2301
+ if len(valid_asym_ids) == 0:
2302
+ distance_feature = torch.full(
2303
+ (self.asymid.shape[0], self.asymid.shape[0]),
2304
+ fill_value=-1 if feature_type == "one_hot" else self.pad_value,
2305
+ dtype=torch.long if feature_type == "one_hot" else torch.float32,
2306
+ )
2307
+ return (
2308
+ self.encode(
2309
+ feature=distance_feature,
2310
+ feature_type=feature_type,
2311
+ num_classes=len(self.distance_bins) - 1,
2312
+ ),
2313
+ constrained_tokens,
2314
+ )
2315
+
2316
+ size = self._get_constraint_size(size)
2317
+
2318
+ # Sample tokens based on ratio and spec_asym_id
2319
+ selected_token_mask = self._sample_substructure(
2320
+ valid_asym_ids, size, ratios, spec_asym_id
2321
+ )
2322
+
2323
+ # Get distance features (bin indices)
2324
+ distance_feature = self._get_distance_feature(
2325
+ selected_token_mask, coord_noise_scale, feature_type
2326
+ )
2327
+
2328
+ # Track constrained tokens
2329
+ constrained_tokens = set(torch.nonzero(selected_token_mask).flatten().tolist())
2330
+
2331
+ # Encode using base class method
2332
+ return (
2333
+ self.encode(
2334
+ feature=distance_feature,
2335
+ feature_type=feature_type,
2336
+ num_classes=len(self.distance_bins) - 1,
2337
+ ),
2338
+ constrained_tokens,
2339
+ )
2340
+
2341
+ def analyze_features(self, feature_tensor: torch.Tensor) -> dict[str, Any]:
2342
+ """
2343
+ Analyze the features generated by the generate method
2344
+ """
2345
+ is_one_hot = len(feature_tensor.shape) == 3
2346
+ n_tokens = feature_tensor.shape[0]
2347
+ if is_one_hot:
2348
+ # For one-hot features
2349
+ # A token is active if it has any non-zero one-hot vector
2350
+ has_valid_distance = torch.any(feature_tensor, dim=-1)
2351
+ active_tokens = torch.any(has_valid_distance, dim=1)
2352
+
2353
+ # Get distribution of distance bins (excluding zero vectors)
2354
+ valid_distances = feature_tensor[has_valid_distance]
2355
+ bin_counts = torch.sum(valid_distances, dim=0) # Sum over all valid pairs
2356
+ distance_stats = {
2357
+ f"bin_{i}_count": count.item() for i, count in enumerate(bin_counts)
2358
+ }
2359
+
2360
+ else:
2361
+ # For continuous features
2362
+ # A token is active if it has any non-zero distance to other tokens
2363
+ has_valid_distance = feature_tensor != 0
2364
+ active_tokens = torch.any(has_valid_distance, dim=1)
2365
+
2366
+ # Get distribution of actual distances (excluding zeros)
2367
+ valid_distances = feature_tensor[has_valid_distance]
2368
+ if len(valid_distances) > 0:
2369
+ distance_stats = {
2370
+ "mean": valid_distances.mean().item(),
2371
+ "std": valid_distances.std().item(),
2372
+ "min": valid_distances.min().item(),
2373
+ "max": valid_distances.max().item(),
2374
+ }
2375
+ else:
2376
+ distance_stats = {"mean": 0.0, "std": 0.0, "min": 0.0, "max": 0.0}
2377
+
2378
+ stats = {
2379
+ "num_active_tokens": active_tokens.sum().item(),
2380
+ "active_token_ratio": (active_tokens.sum().item() / n_tokens),
2381
+ "distance_distribution": distance_stats,
2382
+ }
2383
+
2384
+ return stats
2385
+
2386
+ def generate_spec_constraint(self, substructure_specifics, feature_type):
2387
+ """parse constraint from user specification
2388
+
2389
+ Args:
2390
+ substructure_specifics (Dict): dictionary speicifing fixed tokens
2391
+ token_indices: List[int]
2392
+ token_coords: List[List[float]]
2393
+ feature_type: 'ont hot' by default
2394
+ """
2395
+ distance_feature_mat = torch.full(
2396
+ (self.asymid.shape[0], self.asymid.shape[0]),
2397
+ fill_value=-1 if feature_type == "one_hot" else self.pad_value,
2398
+ dtype=torch.long if feature_type == "one_hot" else torch.float32,
2399
+ )
2400
+
2401
+ if len(substructure_specifics["token_indices"]) > 0:
2402
+ token_indices = torch.tensor(substructure_specifics["token_indices"])
2403
+ coords = torch.tensor(substructure_specifics["token_coords"])
2404
+
2405
+ distance_mat = torch.cdist(coords, coords)
2406
+ distance_feature_mat[token_indices[:, None], token_indices[None, :]] = (
2407
+ distance_mat
2408
+ )
2409
+
2410
+ distance_feature_mat = self.encode(
2411
+ distance_feature_mat, feature_type, num_classes=len(self.distance_bins) - 1
2412
+ )
2413
+
2414
+ return distance_feature_mat
protenix/data/data_pipeline.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import os
17
+ from collections import defaultdict
18
+ from pathlib import Path
19
+ from typing import Any, Optional, Union
20
+
21
+ import biotite.structure.io as strucio
22
+ import numpy as np
23
+ import pandas as pd
24
+ import torch
25
+ from biotite.structure import AtomArray
26
+
27
+ from protenix.data.msa_featurizer import MSAFeaturizer
28
+ from protenix.data.parser import DistillationMMCIFParser, MMCIFParser
29
+ from protenix.data.tokenizer import AtomArrayTokenizer, TokenArray
30
+ from protenix.utils.cropping import CropData
31
+ from protenix.utils.file_io import load_gzip_pickle
32
+
33
+ torch.multiprocessing.set_sharing_strategy("file_system")
34
+
35
+
36
+ class DataPipeline(object):
37
+ """
38
+ DataPipeline class provides static methods to handle various data processing tasks related to bioassembly structures.
39
+ """
40
+
41
+ @staticmethod
42
+ def get_data_from_mmcif(
43
+ mmcif: Union[str, Path],
44
+ pdb_cluster_file: Union[str, Path, None] = None,
45
+ dataset: str = "WeightedPDB",
46
+ interface_radius: float = 5,
47
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
48
+ """
49
+ Get raw data from mmcif with tokenizer and a list of chains and interfaces for sampling.
50
+
51
+ Args:
52
+ mmcif (Union[str, Path]): The raw mmcif file.
53
+ pdb_cluster_file (Union[str, Path, None], optional): Cluster info txt file. Defaults to None.
54
+ dataset (str, optional): The dataset type, either "WeightedPDB" or "Distillation". Defaults to "WeightedPDB".
55
+ interface_radius (float, optional): The radius of the interface. Defaults to 5.
56
+ Returns:
57
+ tuple[list[dict[str, Any]], dict[str, Any]]:
58
+ sample_indices_list (list[dict[str, Any]]): The sample indices list (each one is a chain or an interface).
59
+ bioassembly_dict (dict[str, Any]): The bioassembly dict with sequence, atom_array, and token_array.
60
+ """
61
+ try:
62
+ if dataset == "WeightedPDB":
63
+ parser = MMCIFParser(mmcif_file=mmcif)
64
+ bioassembly_dict = parser.get_bioassembly()
65
+ elif dataset == "Distillation":
66
+ parser = DistillationMMCIFParser(mmcif_file=mmcif)
67
+ bioassembly_dict = parser.get_structure_dict()
68
+ else:
69
+ raise NotImplementedError(
70
+ 'Unsupported "dataset", please input either "WeightedPDB" or "Distillation".'
71
+ )
72
+
73
+ sample_indices_list = parser.make_indices(
74
+ bioassembly_dict=bioassembly_dict,
75
+ pdb_cluster_file=pdb_cluster_file,
76
+ interface_radius=interface_radius,
77
+ )
78
+ if len(sample_indices_list) == 0:
79
+ # empty indices and AtomArray
80
+ return [], bioassembly_dict
81
+
82
+ atom_array = bioassembly_dict["atom_array"]
83
+ atom_array.set_annotation(
84
+ "resolution", [parser.resolution] * len(atom_array)
85
+ )
86
+
87
+ tokenizer = AtomArrayTokenizer(atom_array)
88
+ token_array = tokenizer.get_token_array()
89
+ bioassembly_dict["msa_features"] = None
90
+ bioassembly_dict["template_features"] = None
91
+
92
+ bioassembly_dict["token_array"] = token_array
93
+ return sample_indices_list, bioassembly_dict
94
+
95
+ except Exception as e:
96
+ logging.warning("Gen data failed for %s due to %s", mmcif, e)
97
+ return [], {}
98
+
99
+ @staticmethod
100
+ def get_label_entity_id_to_asym_id_int(atom_array: AtomArray) -> dict[str, int]:
101
+ """
102
+ Get a dictionary that associates each label_entity_id with its corresponding asym_id_int.
103
+
104
+ Args:
105
+ atom_array (AtomArray): AtomArray object
106
+
107
+ Returns:
108
+ dict[str, int]: label_entity_id to its asym_id_int
109
+ """
110
+ entity_to_asym_id = defaultdict(set)
111
+ for atom in atom_array:
112
+ entity_id = atom.label_entity_id
113
+ entity_to_asym_id[entity_id].add(atom.asym_id_int)
114
+ return entity_to_asym_id
115
+
116
+ @staticmethod
117
+ def get_data_bioassembly(
118
+ bioassembly_dict_fpath: Union[str, Path],
119
+ ) -> dict[str, Any]:
120
+ """
121
+ Get the bioassembly dict.
122
+
123
+ Args:
124
+ bioassembly_dict_fpath (Union[str, Path]): The path to the bioassembly dictionary file.
125
+
126
+ Returns:
127
+ dict[str, Any]: The bioassembly dict with sequence, atom_array and token_array.
128
+
129
+ Raises:
130
+ AssertionError: If the bioassembly dictionary file does not exist.
131
+ """
132
+ assert os.path.exists(
133
+ bioassembly_dict_fpath
134
+ ), f"File not exists {bioassembly_dict_fpath}"
135
+ bioassembly_dict = load_gzip_pickle(bioassembly_dict_fpath)
136
+
137
+ return bioassembly_dict
138
+
139
+ @staticmethod
140
+ def _map_ref_chain(
141
+ one_sample: pd.Series, bioassembly_dict: dict[str, Any]
142
+ ) -> list[int]:
143
+ """
144
+ Map the chain or interface chain_x_id to the reference chain asym_id.
145
+
146
+ Args:
147
+ one_sample (pd.Series): A dict of one chain or interface from indices list.
148
+ bioassembly_dict (dict[str, Any]): The bioassembly dict with sequence, atom_array and token_array.
149
+
150
+ Returns:
151
+ list[int]: A list of asym_id_lnt of the chosen chain or interface, length 1 or 2.
152
+ """
153
+ atom_array = bioassembly_dict["atom_array"]
154
+ ref_chain_indices = []
155
+ for chain_id_field in ["chain_1_id", "chain_2_id"]:
156
+ chain_id = one_sample[chain_id_field]
157
+ assert np.isin(
158
+ chain_id, np.unique(atom_array.chain_id)
159
+ ), f"PDB {bioassembly_dict['pdb_id']} {chain_id_field}:{chain_id} not in atom_array"
160
+ chain_asym_id = atom_array[atom_array.chain_id == chain_id].asym_id_int[0]
161
+ ref_chain_indices.append(chain_asym_id)
162
+ if one_sample["type"] == "chain":
163
+ break
164
+ return ref_chain_indices
165
+
166
+ @staticmethod
167
+ def get_msa_raw_features(
168
+ bioassembly_dict: dict[str, Any],
169
+ selected_indices: np.ndarray,
170
+ msa_featurizer: Optional[MSAFeaturizer],
171
+ ) -> Optional[dict[str, np.ndarray]]:
172
+ """
173
+ Get tokenized MSA features of the bioassembly
174
+
175
+ Args:
176
+ bioassembly_dict (Mapping[str, Any]): The bioassembly dict with sequence, atom_array and token_array.
177
+ selected_indices (torch.Tensor): Cropped token indices.
178
+ msa_featurizer (MSAFeaturizer): MSAFeaturizer instance.
179
+
180
+ Returns:
181
+ Optional[dict[str, np.ndarray]]: The tokenized MSA features of the bioassembly.
182
+ """
183
+ if msa_featurizer is None:
184
+ return None
185
+
186
+ entity_to_asym_id_int = dict(
187
+ DataPipeline.get_label_entity_id_to_asym_id_int(
188
+ bioassembly_dict["atom_array"]
189
+ )
190
+ )
191
+
192
+ msa_feats = msa_featurizer(
193
+ bioassembly_dict=bioassembly_dict,
194
+ selected_indices=selected_indices,
195
+ entity_to_asym_id_int=entity_to_asym_id_int,
196
+ )
197
+
198
+ return msa_feats
199
+
200
+ @staticmethod
201
+ def get_template_raw_features(
202
+ bioassembly_dict: dict[str, Any],
203
+ selected_indices: np.ndarray,
204
+ template_featurizer: None,
205
+ ) -> Optional[dict[str, np.ndarray]]:
206
+ """
207
+ Get tokenized template features of the bioassembly.
208
+
209
+ Args:
210
+ bioassembly_dict (dict[str, Any]): The bioassembly dict with sequence, atom_array and token_array.
211
+ selected_indices (np.ndarray): Cropped token indices.
212
+ template_featurizer (None): Placeholder for the template featurizer.
213
+
214
+ Returns:
215
+ Optional[dict[str, np.ndarray]]: The tokenized template features of the bioassembly,
216
+ or None if the template featurizer is not provided.
217
+ """
218
+ if template_featurizer is None:
219
+ return None
220
+
221
+ entity_to_asym_id_int = dict(
222
+ DataPipeline.get_label_entity_id_to_asym_id_int(
223
+ bioassembly_dict["atom_array"]
224
+ )
225
+ )
226
+
227
+ template_feats = template_featurizer(
228
+ bioassembly_dict=bioassembly_dict,
229
+ selected_indices=selected_indices,
230
+ entity_to_asym_id_int=entity_to_asym_id_int,
231
+ )
232
+ return template_feats
233
+
234
+ @staticmethod
235
+ def crop(
236
+ one_sample: pd.Series,
237
+ bioassembly_dict: dict[str, Any],
238
+ crop_size: int,
239
+ msa_featurizer: Optional[MSAFeaturizer],
240
+ template_featurizer: None,
241
+ method_weights: list[float] = [0.2, 0.4, 0.4],
242
+ contiguous_crop_complete_lig: bool = False,
243
+ spatial_crop_complete_lig: bool = False,
244
+ drop_last: bool = False,
245
+ remove_metal: bool = False,
246
+ ) -> tuple[str, TokenArray, AtomArray, dict[str, Any], dict[str, Any]]:
247
+ """
248
+ Crop data based on the crop size and reference chain indices.
249
+
250
+ Args:
251
+ one_sample (pd.Series): A dict of one chain or interface from indices list.
252
+ bioassembly_dict (dict[str, Any]): A dict of bioassembly dict with sequence, atom_array and token_array.
253
+ crop_size (int): the crop size.
254
+ msa_featurizer (MSAFeaturizer): Default to an empty replacement for msa featurizer.
255
+ template_featurizer (None): Placeholder for the template featurizer.
256
+ method_weights (list[float]): The weights corresponding to these three cropping methods:
257
+ ["ContiguousCropping", "SpatialCropping", "SpatialInterfaceCropping"].
258
+ contiguous_crop_complete_lig (bool): Whether to crop the complete ligand in ContiguousCropping method.
259
+ spatial_crop_complete_lig (bool): Whether to crop the complete ligand in SpatialCropping method.
260
+ drop_last (bool): Whether to drop the last fragment in ContiguousCropping.
261
+ remove_metal (bool): Whether to remove metal atoms from the crop.
262
+
263
+ Returns:
264
+ tuple[str, TokenArray, AtomArray, dict[str, Any], dict[str, Any]]:
265
+ crop_method (str): The crop method.
266
+ cropped_token_array (TokenArray): TokenArray after cropping.
267
+ cropped_atom_array (AtomArray): AtomArray after cropping.
268
+ cropped_msa_features (dict[str, Any]): The cropped msa features.
269
+ cropped_template_features (dict[str, Any]): The cropped template features.
270
+ """
271
+ if crop_size <= 0:
272
+ selected_indices = None
273
+ # Prepare msa
274
+ msa_features = DataPipeline.get_msa_raw_features(
275
+ bioassembly_dict=bioassembly_dict,
276
+ selected_indices=selected_indices,
277
+ msa_featurizer=msa_featurizer,
278
+ )
279
+ # Prepare template
280
+ template_features = DataPipeline.get_template_raw_features(
281
+ bioassembly_dict=bioassembly_dict,
282
+ selected_indices=selected_indices,
283
+ template_featurizer=template_featurizer,
284
+ )
285
+ return (
286
+ "no_crop",
287
+ bioassembly_dict["token_array"],
288
+ bioassembly_dict["atom_array"],
289
+ msa_features or {},
290
+ template_features or {},
291
+ -1,
292
+ )
293
+
294
+ ref_chain_indices = DataPipeline._map_ref_chain(
295
+ one_sample=one_sample, bioassembly_dict=bioassembly_dict
296
+ )
297
+
298
+ crop = CropData(
299
+ crop_size=crop_size,
300
+ ref_chain_indices=ref_chain_indices,
301
+ token_array=bioassembly_dict["token_array"],
302
+ atom_array=bioassembly_dict["atom_array"],
303
+ method_weights=method_weights,
304
+ contiguous_crop_complete_lig=contiguous_crop_complete_lig,
305
+ spatial_crop_complete_lig=spatial_crop_complete_lig,
306
+ drop_last=drop_last,
307
+ remove_metal=remove_metal,
308
+ )
309
+ # Get crop method
310
+ crop_method = crop.random_crop_method()
311
+ # Get crop indices based crop method
312
+ selected_indices, reference_token_index = crop.get_crop_indices(
313
+ crop_method=crop_method
314
+ )
315
+ # Prepare msa
316
+ msa_features = DataPipeline.get_msa_raw_features(
317
+ bioassembly_dict=bioassembly_dict,
318
+ selected_indices=selected_indices,
319
+ msa_featurizer=msa_featurizer,
320
+ )
321
+ # Prepare template
322
+ template_features = DataPipeline.get_template_raw_features(
323
+ bioassembly_dict=bioassembly_dict,
324
+ selected_indices=selected_indices,
325
+ template_featurizer=template_featurizer,
326
+ )
327
+
328
+ (
329
+ cropped_token_array,
330
+ cropped_atom_array,
331
+ cropped_msa_features,
332
+ cropped_template_features,
333
+ ) = crop.crop_by_indices(
334
+ selected_token_indices=selected_indices,
335
+ msa_features=msa_features,
336
+ template_features=template_features,
337
+ )
338
+
339
+ if crop_method == "ContiguousCropping":
340
+ resovled_atom_num = cropped_atom_array.is_resolved.sum()
341
+ # The criterion of “more than 4 atoms” is chosen arbitrarily.
342
+ assert (
343
+ resovled_atom_num > 4
344
+ ), f"{resovled_atom_num=} <= 4 after ContiguousCropping"
345
+
346
+ return (
347
+ crop_method,
348
+ cropped_token_array,
349
+ cropped_atom_array,
350
+ cropped_msa_features,
351
+ cropped_template_features,
352
+ reference_token_index,
353
+ )
354
+
355
+ @staticmethod
356
+ def save_atoms_to_cif(
357
+ output_cif_file: str, atom_array: AtomArray, include_bonds: bool = False
358
+ ) -> None:
359
+ """
360
+ Save atom array data to a CIF file.
361
+
362
+ Args:
363
+ output_cif_file (str): The output path for saving atom array in cif
364
+ atom_array (AtomArray): The atom array to be saved
365
+ include_bonds (bool): Whether to include bond information in the CIF file. Default is False.
366
+
367
+ """
368
+ strucio.save_structure(
369
+ file_path=output_cif_file,
370
+ array=atom_array,
371
+ data_block=os.path.basename(output_cif_file).replace(".cif", ""),
372
+ include_bonds=include_bonds,
373
+ )
protenix/data/dataloader.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Iterator, Optional, Sequence
17
+
18
+ import torch
19
+ import torch.distributed as dist
20
+ from ml_collections.config_dict import ConfigDict
21
+ from torch.utils.data import DataLoader, DistributedSampler, Sampler
22
+
23
+ from protenix.data.dataset import Dataset, get_datasets
24
+ from protenix.utils.logger import get_logger
25
+ from protenix.utils.torch_utils import collate_fn_first
26
+
27
+ logger = get_logger(__name__)
28
+
29
+
30
+ class WeightedSampler(Sampler):
31
+ """
32
+ A weighted sampler for single node.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ weights: Sequence[float],
38
+ num_samples: int,
39
+ replacement: bool,
40
+ seed: int = 0,
41
+ ):
42
+ """
43
+ Args:
44
+ weights (list or numpy array): A list or numpy array of weights.
45
+ num_samples (int): The number of samples to be drawn.
46
+ replacement (bool): Whether sampling is done with replacement.
47
+ seed (int): The seed for the random number generator.
48
+ """
49
+ self.weights = torch.as_tensor(weights, dtype=torch.double)
50
+ self.replacement = replacement
51
+ self.seed = seed
52
+ self.epoch = 0
53
+ self.num_samples = num_samples
54
+
55
+ def __iter__(self) -> Iterator[int]:
56
+ """
57
+ Generates an iterator over the sampled indices.
58
+
59
+ This method uses a random number generator to sample indices based on the provided weights.
60
+ The generator is seeded with the current seed and epoch to ensure reproducibility.
61
+
62
+ Returns:
63
+ iter: An iterator over the sampled indices.
64
+ """
65
+ g = torch.Generator()
66
+ g.manual_seed(self.seed + self.epoch)
67
+ indices = torch.multinomial(
68
+ self.weights, self.num_samples, self.replacement, generator=g
69
+ ).tolist()
70
+ return iter(indices)
71
+
72
+ def __len__(self) -> int:
73
+ return self.num_samples
74
+
75
+ def set_epoch(self, epoch: int) -> None:
76
+ self.epoch = epoch
77
+
78
+
79
+ class DistributedWeightedSampler(DistributedSampler):
80
+ """
81
+ A distributed weighted sampler for multiple nodes.
82
+ """
83
+
84
+ def __init__(
85
+ self,
86
+ dataset: Dataset,
87
+ weights: Sequence[float],
88
+ num_samples: int,
89
+ num_replicas: Optional[int] = None,
90
+ rank: Optional[int] = None,
91
+ replacement: bool = True,
92
+ seed: int = 0,
93
+ ):
94
+ """
95
+ Args:
96
+ dataset (Dataset): The dataset to be loaded.
97
+ weights (list): The weights associated with the dataset.
98
+ num_samples (int): The total number of samples to be drawn.
99
+ num_replicas (int, optional): The number of replicas to use for distributed sampling. Defaults to None.
100
+ rank (int, optional): The rank of the current process in a distributed environment. Defaults to None.
101
+ replacement (bool, optional): Whether to sample with replacement. Defaults to True.
102
+ seed (int, optional): The random seed for reproducibility. Defaults to 0.
103
+ """
104
+ super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=False)
105
+ self.weights = torch.as_tensor(weights, dtype=torch.double)
106
+ self.replacement = replacement
107
+ self.seed = seed
108
+ self.epoch = 0
109
+ self.num_samples = num_samples
110
+
111
+ self.num_samples_per_replica = int(
112
+ math.ceil(self.num_samples / self.num_replicas)
113
+ )
114
+ self.total_size = self.num_samples_per_replica * self.num_replicas
115
+
116
+ def __iter__(self) -> Iterator[int]:
117
+ """
118
+ Generates an iterator over the sampled indices for the current process in a distributed environment.
119
+
120
+ This method uses a random number generator to sample indices based on the provided weights.
121
+ The generator is seeded with the current seed and epoch to ensure reproducibility.
122
+ The sampled indices are then distributed across the replicas according to the rank of the current process.
123
+
124
+ Returns:
125
+ iter: An iterator over the sampled indices for the current process.
126
+ """
127
+ g = torch.Generator()
128
+ g.manual_seed(self.seed + self.epoch)
129
+ indices = torch.multinomial(
130
+ self.weights, self.num_samples, self.replacement, generator=g
131
+ ).tolist()
132
+ indices = indices[self.rank : self.total_size : self.num_replicas]
133
+ return iter(indices)
134
+
135
+ def __len__(self) -> int:
136
+ return self.num_samples // self.num_replicas
137
+
138
+ def set_epoch(self, epoch: int) -> None:
139
+ self.epoch = epoch
140
+
141
+
142
+ class KeySumBalancedSampler(Sampler):
143
+ def __init__(
144
+ self,
145
+ dataset: Dataset,
146
+ key: str,
147
+ value_scale: float = 1.0,
148
+ seed: Optional[int] = None,
149
+ num_replicas: Optional[int] = None,
150
+ rank: Optional[int] = None,
151
+ ):
152
+ """
153
+ This method initializes the KeySumBalancedSampler.
154
+ It calls the `get_balanced_assignments` method to distribute the dataset indices across workers based on the key sum.
155
+
156
+ Args:
157
+ dataset (Dataset): The dataset to sample from.
158
+ key (str): The key by which data will be balanced (integer value).
159
+ value_scale (float): The multiplier of key value when computing the worker assignment weight
160
+ num_replicas (int, optional): Number of processes participating in distributed training.
161
+ rank (int, optional): Rank of the current process within num_replicas.
162
+ """
163
+ self.dataset = dataset
164
+ self.key = key
165
+ self.value_scale = value_scale
166
+ self.seed = seed
167
+ self.num_replicas = num_replicas or dist.get_world_size()
168
+ self.rank = rank or dist.get_rank()
169
+
170
+ # Get indices for this process after balancing by key sum
171
+ worker_assignments = self.get_balanced_assignments()
172
+ self.indices = worker_assignments[self.rank]
173
+
174
+ def get_balanced_assignments(self):
175
+ """
176
+ Distribute dataset indices across workers such that the sum of key values
177
+ assigned to each worker is as balanced as possible.
178
+ """
179
+ if self.seed is not None:
180
+ # deterministically shuffle based on seed
181
+ g = torch.Generator()
182
+ g.manual_seed(self.seed)
183
+ indices = torch.randperm(len(self.dataset), generator=g).tolist()
184
+ else:
185
+ indices = list(range(len(self.dataset)))
186
+
187
+ # pad for len(dataset) to self.num_replicas if len(dataset) < self.num_replicas
188
+ while len(indices) < self.num_replicas:
189
+ indices += indices[: (self.num_replicas - len(indices))]
190
+
191
+ if isinstance(self.dataset.indices_list, list):
192
+ # e.g. recentPDB test set
193
+ dataset_values = [
194
+ x[self.key].astype(int)[0] for x in self.dataset.indices_list
195
+ ]
196
+ else:
197
+ # e.g. posebuster test set
198
+ dataset_values = self.dataset.indices_list[self.key].astype(int).to_numpy()
199
+
200
+ # Sort indices by key value
201
+ key_value_pairs = [(idx, dataset_values[idx]) for idx in indices]
202
+ key_value_pairs.sort(key=lambda x: x[1], reverse=True)
203
+
204
+ # Calculate the target number of samples per worker
205
+ num_samples_per_worker = len(self.dataset) // self.num_replicas
206
+
207
+ # Initialize containers for worker assignments and their current key sum
208
+ worker_assignments = [[] for _ in range(self.num_replicas)]
209
+ worker_sums = [0] * self.num_replicas
210
+ total_samples = num_samples_per_worker * self.num_replicas
211
+
212
+ # Distribute samples using a greedy strategy to balance the key sum
213
+ for idx, key_value in key_value_pairs[:total_samples]:
214
+ # Find the worker with the smallest sum that hasn't exceeded its target sample count
215
+ min_worker = min(
216
+ range(self.num_replicas),
217
+ key=lambda i: (
218
+ worker_sums[i]
219
+ if len(worker_assignments[i]) < num_samples_per_worker
220
+ else float("inf")
221
+ ),
222
+ )
223
+ worker_assignments[min_worker].append(idx)
224
+ worker_sums[min_worker] += key_value**2
225
+
226
+ # Fix any discrepancies in the number of samples
227
+ all_indices = [idx for idx, _ in key_value_pairs]
228
+
229
+ # Assign remaining samples if the dataset isn't divisible perfectly
230
+ if len(all_indices) > total_samples:
231
+ for i in range(len(all_indices) - total_samples):
232
+ worker_assignments[i % self.num_replicas].append(
233
+ all_indices[total_samples + i]
234
+ )
235
+
236
+ # Return the indices assigned to the current worker
237
+ return worker_assignments
238
+
239
+ def __iter__(self):
240
+ return iter(self.indices)
241
+
242
+ def __len__(self):
243
+ return len(self.indices)
244
+
245
+
246
+ class IterDataLoader(DataLoader):
247
+ """
248
+ Iterative dataloader for single node.
249
+ """
250
+
251
+ def __init__(self, *args, **kwargs):
252
+ super(IterDataLoader, self).__init__(*args, **kwargs)
253
+ assert self.sampler is not None
254
+ self.counter = 0
255
+
256
+ def __iter__(self):
257
+ self.sampler.set_epoch(self.counter)
258
+ self.counter += 1
259
+ _iterator = super(IterDataLoader, self).__iter__()
260
+ return _iterator
261
+
262
+
263
+ class DistributedDataLoader(DataLoader):
264
+ """
265
+ Distributed dataloader for multiple nodes.
266
+ """
267
+
268
+ def __init__(
269
+ self,
270
+ dataset: Dataset,
271
+ batch_size: int,
272
+ num_workers: int = 0,
273
+ collate_fn=None,
274
+ seed: int = 42,
275
+ drop_last: bool = True,
276
+ shuffle: bool = True,
277
+ sampler: Sampler = None,
278
+ ):
279
+ if sampler is not None:
280
+ self.sampler = sampler
281
+ else:
282
+ self.sampler = DistributedSampler(
283
+ dataset, shuffle=shuffle, seed=seed, drop_last=drop_last
284
+ )
285
+
286
+ super(DistributedDataLoader, self).__init__(
287
+ dataset=dataset,
288
+ batch_size=batch_size,
289
+ num_workers=num_workers,
290
+ sampler=self.sampler,
291
+ shuffle=False,
292
+ collate_fn=collate_fn,
293
+ )
294
+ self.counter = 0
295
+
296
+ def __iter__(self):
297
+ self.sampler.set_epoch(self.counter)
298
+ self.counter += 1
299
+ _iterator = super(DistributedDataLoader, self).__iter__()
300
+ return _iterator
301
+
302
+
303
+ def get_dataloaders(
304
+ configs: ConfigDict, world_size: int, seed: int, error_dir: Optional[str] = None
305
+ ):
306
+ """
307
+ Generate data loaders for training and testing based on the given configurations and seed.
308
+
309
+ Args:
310
+ configs (ConfigDict): An object containing the data configuration information.
311
+ world_size (int): The number of processes in the distributed environment.
312
+ seed (int): The random seed used for data sampling.
313
+ error_dir (str, optional): The directory to store error information. Defaults to None.
314
+
315
+ Returns:
316
+ tuple: A tuple containing the training data loader and a dictionary of testing data loaders.
317
+
318
+ """
319
+ train_dataset, test_datasets = get_datasets(configs, error_dir)
320
+ if world_size > 1:
321
+ train_sampler = DistributedWeightedSampler(
322
+ train_dataset,
323
+ train_dataset.merged_datapoint_weights,
324
+ num_samples=configs.data.epoch_size,
325
+ replacement=True,
326
+ seed=seed,
327
+ )
328
+ train_dl = DistributedDataLoader(
329
+ dataset=train_dataset,
330
+ batch_size=1,
331
+ shuffle=False,
332
+ num_workers=configs.data.num_dl_workers,
333
+ collate_fn=collate_fn_first,
334
+ sampler=train_sampler,
335
+ )
336
+ else:
337
+
338
+ train_sampler = WeightedSampler(
339
+ weights=train_dataset.merged_datapoint_weights,
340
+ num_samples=configs.data.epoch_size,
341
+ replacement=True,
342
+ seed=seed,
343
+ )
344
+ train_dl = IterDataLoader(
345
+ dataset=train_dataset,
346
+ batch_size=1,
347
+ shuffle=False,
348
+ num_workers=configs.data.num_dl_workers,
349
+ collate_fn=collate_fn_first,
350
+ sampler=train_sampler,
351
+ )
352
+
353
+ test_dls = {}
354
+ test_dataset_sizes = {}
355
+ for test_name, test_dataset in test_datasets.items():
356
+ test_dataset_sizes[test_name] = len(test_dataset)
357
+ test_sampler = (
358
+ KeySumBalancedSampler(test_dataset, key="num_tokens", seed=configs.seed)
359
+ if world_size > 1
360
+ else None
361
+ )
362
+ test_dls[test_name] = DataLoader(
363
+ test_dataset,
364
+ batch_size=1,
365
+ shuffle=False,
366
+ num_workers=configs.data.num_dl_workers,
367
+ sampler=test_sampler,
368
+ collate_fn=collate_fn_first,
369
+ )
370
+ logger.info(
371
+ f"train data size: {len(train_dataset)}, test size: {test_dataset_sizes}"
372
+ )
373
+ return train_dl, test_dls
protenix/data/dataset.py ADDED
@@ -0,0 +1,1182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ import random
18
+ import traceback
19
+ from copy import deepcopy
20
+ from pathlib import Path
21
+ from typing import Any, Callable, Optional, Union
22
+
23
+ import numpy as np
24
+ import pandas as pd
25
+ import torch
26
+ from biotite.structure.atoms import AtomArray
27
+ from ml_collections.config_dict import ConfigDict
28
+ from torch.utils.data import Dataset
29
+
30
+ from protenix.data.constants import EvaluationChainInterface
31
+ from protenix.data.constraint_featurizer import ConstraintFeatureGenerator
32
+ from protenix.data.data_pipeline import DataPipeline
33
+ from protenix.data.featurizer import Featurizer
34
+ from protenix.data.msa_featurizer import MSAFeaturizer
35
+ from protenix.data.tokenizer import TokenArray
36
+ from protenix.data.utils import (
37
+ data_type_transform,
38
+ get_antibody_clusters,
39
+ make_dummy_feature,
40
+ )
41
+ from protenix.utils.cropping import CropData
42
+ from protenix.utils.file_io import read_indices_csv
43
+ from protenix.utils.logger import get_logger
44
+ from protenix.utils.torch_utils import dict_to_tensor
45
+
46
+ logger = get_logger(__name__)
47
+
48
+
49
+ class BaseSingleDataset(Dataset):
50
+ """
51
+ dataset for a single data source
52
+ data = self.__item__(idx)
53
+ return a dict of features and labels, the keys and the shape are defined in protenix.data.utils
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ mmcif_dir: Union[str, Path],
59
+ bioassembly_dict_dir: Optional[Union[str, Path]],
60
+ indices_fpath: Union[str, Path],
61
+ cropping_configs: dict[str, Any],
62
+ msa_featurizer: Optional[MSAFeaturizer] = None,
63
+ template_featurizer: Optional[Any] = None,
64
+ name: str = None,
65
+ **kwargs,
66
+ ) -> None:
67
+ super(BaseSingleDataset, self).__init__()
68
+
69
+ # Configs
70
+ self.mmcif_dir = mmcif_dir
71
+ self.bioassembly_dict_dir = bioassembly_dict_dir
72
+ self.indices_fpath = indices_fpath
73
+ self.cropping_configs = cropping_configs
74
+ self.name = name
75
+ # General dataset configs
76
+ self.ref_pos_augment = kwargs.get("ref_pos_augment", True)
77
+ self.lig_atom_rename = kwargs.get("lig_atom_rename", False)
78
+ self.reassign_continuous_chain_ids = kwargs.get(
79
+ "reassign_continuous_chain_ids", False
80
+ )
81
+ self.shuffle_mols = kwargs.get("shuffle_mols", False)
82
+ self.shuffle_sym_ids = kwargs.get("shuffle_sym_ids", False)
83
+
84
+ # Typically used for test sets
85
+ self.find_pocket = kwargs.get("find_pocket", False)
86
+ self.find_all_pockets = kwargs.get("find_all_pockets", False) # for dev
87
+ self.find_eval_chain_interface = kwargs.get("find_eval_chain_interface", False)
88
+ self.group_by_pdb_id = kwargs.get("group_by_pdb_id", False) # for test set
89
+ self.sort_by_n_token = kwargs.get("sort_by_n_token", False)
90
+
91
+ # Typically used for training set
92
+ self.random_sample_if_failed = kwargs.get("random_sample_if_failed", False)
93
+ self.use_reference_chains_only = kwargs.get("use_reference_chains_only", False)
94
+ self.is_distillation = kwargs.get("is_distillation", False)
95
+
96
+ # Configs for data filters
97
+ self.max_n_token = kwargs.get("max_n_token", -1)
98
+ self.pdb_list = kwargs.get("pdb_list", None)
99
+ if len(self.pdb_list) == 0:
100
+ self.pdb_list = None
101
+ # Used for removing rows in the indices list. Column names and excluded values are specified in this dict.
102
+ self.exclusion_dict = kwargs.get("exclusion", {})
103
+ self.limits = kwargs.get(
104
+ "limits", -1
105
+ ) # Limit number of indices rows, mainly for test
106
+ # Configs for constraint
107
+ self.constraint = kwargs.get("constraint", {})
108
+ if self.constraint.get("enable", False):
109
+ logger.info(f"[{self.name}] constraint config: {self.constraint}")
110
+ # Do not rely on new files for users who do not use constraint feature
111
+ self.ab_top2_clusters = get_antibody_clusters()
112
+ self.constraint_generator = ConstraintFeatureGenerator(
113
+ self.constraint, self.ab_top2_clusters
114
+ )
115
+
116
+ self.error_dir = kwargs.get("error_dir", None)
117
+ if self.error_dir is not None:
118
+ os.makedirs(self.error_dir, exist_ok=True)
119
+
120
+ self.msa_featurizer = msa_featurizer
121
+ self.template_featurizer = template_featurizer
122
+
123
+ # Read data
124
+ self.indices_list = self.read_indices_list(indices_fpath)
125
+
126
+ @staticmethod
127
+ def read_pdb_list(pdb_list: Union[list, str]) -> Optional[list]:
128
+ """
129
+ Reads a list of PDB IDs from a file or directly from a list.
130
+
131
+ Args:
132
+ pdb_list: A list of PDB IDs or a file path containing PDB IDs.
133
+
134
+ Returns:
135
+ A list of PDB IDs if the input is valid, otherwise None.
136
+ """
137
+ if pdb_list is None:
138
+ return None
139
+
140
+ if isinstance(pdb_list, list):
141
+ return pdb_list
142
+
143
+ with open(pdb_list, "r") as f:
144
+ pdb_filter_list = []
145
+ for l in f.readlines():
146
+ l = l.strip()
147
+ if l:
148
+ pdb_filter_list.append(l)
149
+ return pdb_filter_list
150
+
151
+ def read_indices_list(self, indices_fpath: Union[str, Path]) -> pd.DataFrame:
152
+ """
153
+ Reads and processes a list of indices from a CSV file.
154
+
155
+ Args:
156
+ indices_fpath: Path to the CSV file containing the indices.
157
+
158
+ Returns:
159
+ A DataFrame containing the processed indices.
160
+ """
161
+ indices_list = read_indices_csv(indices_fpath)
162
+ num_data = len(indices_list)
163
+ logger.info(f"#Rows in indices list: {num_data}")
164
+ # Filter by pdb_list
165
+ if self.pdb_list is not None:
166
+ pdb_filter_list = set(self.read_pdb_list(pdb_list=self.pdb_list))
167
+ indices_list = indices_list[indices_list["pdb_id"].isin(pdb_filter_list)]
168
+ logger.info(f"[filtered by pdb_list] #Rows: {len(indices_list)}")
169
+
170
+ # Filter by max_n_token
171
+ if self.max_n_token > 0:
172
+ valid_mask = indices_list["num_tokens"].astype(int) <= self.max_n_token
173
+ removed_list = indices_list[~valid_mask]
174
+ indices_list = indices_list[valid_mask]
175
+ logger.info(f"[removed] #Rows: {len(removed_list)}")
176
+ logger.info(f"[removed] #PDB: {removed_list['pdb_id'].nunique()}")
177
+ logger.info(
178
+ f"[filtered by n_token ({self.max_n_token})] #Rows: {len(indices_list)}"
179
+ )
180
+
181
+ # Filter by exclusion_dict
182
+ for col_name, exclusion_list in self.exclusion_dict.items():
183
+ cols = col_name.split("|")
184
+ exclusion_set = {tuple(excl.split("|")) for excl in exclusion_list}
185
+
186
+ def is_valid(row):
187
+ return tuple(row[col] for col in cols) not in exclusion_set
188
+
189
+ valid_mask = indices_list.apply(is_valid, axis=1)
190
+ indices_list = indices_list[valid_mask].reset_index(drop=True)
191
+ logger.info(
192
+ f"[Excluded by {col_name} -- {exclusion_list}] #Rows: {len(indices_list)}"
193
+ )
194
+ self.print_data_stats(indices_list)
195
+
196
+ # Group by pdb_id
197
+ # A list of dataframe. Each contains one pdb with multiple rows.
198
+ if self.group_by_pdb_id:
199
+ indices_list = [
200
+ df.reset_index() for _, df in indices_list.groupby("pdb_id", sort=True)
201
+ ]
202
+
203
+ if self.sort_by_n_token:
204
+ # Sort the dataset in a descending order, so that if OOM it will raise Error at an early stage.
205
+ if self.group_by_pdb_id:
206
+ indices_list = sorted(
207
+ indices_list,
208
+ key=lambda df: int(df["num_tokens"].iloc[0]),
209
+ reverse=True,
210
+ )
211
+ else:
212
+ indices_list = indices_list.sort_values(
213
+ by="num_tokens", key=lambda x: x.astype(int), ascending=False
214
+ ).reset_index(drop=True)
215
+
216
+ if self.find_eval_chain_interface:
217
+ # Remove data that does not contain eval_type in the EvaluationChainInterface list
218
+ if self.group_by_pdb_id:
219
+ indices_list = [
220
+ df
221
+ for df in indices_list
222
+ if len(
223
+ set(df["eval_type"].to_list()).intersection(
224
+ set(EvaluationChainInterface)
225
+ )
226
+ )
227
+ > 0
228
+ ]
229
+ else:
230
+ indices_list = indices_list[
231
+ indices_list["eval_type"].apply(
232
+ lambda x: x in EvaluationChainInterface
233
+ )
234
+ ]
235
+ if self.limits > 0 and len(indices_list) > self.limits:
236
+ logger.info(
237
+ f"Limit indices list size from {len(indices_list)} to {self.limits}"
238
+ )
239
+ indices_list = indices_list[: self.limits]
240
+ return indices_list
241
+
242
+ def print_data_stats(self, df: pd.DataFrame) -> None:
243
+ """
244
+ Prints statistics about the dataset, including the distribution of molecular group types.
245
+
246
+ Args:
247
+ df: A DataFrame containing the indices list.
248
+ """
249
+ if self.name:
250
+ logger.info("-" * 10 + f" Dataset {self.name}" + "-" * 10)
251
+ df["mol_group_type"] = df.apply(
252
+ lambda row: "_".join(
253
+ sorted(
254
+ [
255
+ str(row["mol_1_type"]),
256
+ str(row["mol_2_type"]).replace("nan", "intra"),
257
+ ]
258
+ )
259
+ ),
260
+ axis=1,
261
+ )
262
+
263
+ group_size_dict = dict(df["mol_group_type"].value_counts())
264
+ for i, n_i in group_size_dict.items():
265
+ logger.info(f"{i}: {n_i}/{len(df)}({round(n_i*100/len(df), 2)}%)")
266
+
267
+ logger.info("-" * 30)
268
+ if "cluster_id" in df.columns:
269
+ n_cluster = df["cluster_id"].nunique()
270
+ for i in group_size_dict:
271
+ n_i = df[df["mol_group_type"] == i]["cluster_id"].nunique()
272
+ logger.info(f"{i}: {n_i}/{n_cluster}({round(n_i*100/n_cluster, 2)}%)")
273
+ logger.info("-" * 30)
274
+
275
+ logger.info(f"Final pdb ids: {len(set(df.pdb_id.tolist()))}")
276
+ logger.info("-" * 30)
277
+
278
+ def __len__(self) -> int:
279
+ return len(self.indices_list)
280
+
281
+ def save_error_data(self, idx: int, error_message: str) -> None:
282
+ """
283
+ Saves the error data for a specific index to a JSON file in the error directory.
284
+
285
+ Args:
286
+ idx: The index of the data sample that caused the error.
287
+ error_message: The error message to be saved.
288
+ """
289
+ if self.error_dir is not None:
290
+ sample_indice = self._get_sample_indice(idx=idx)
291
+ data = sample_indice.to_dict()
292
+ data["error"] = error_message
293
+
294
+ filename = f"{sample_indice.pdb_id}-{sample_indice.chain_1_id}-{sample_indice.chain_2_id}.json"
295
+ fpath = os.path.join(self.error_dir, filename)
296
+ if not os.path.exists(fpath):
297
+ with open(fpath, "w") as f:
298
+ json.dump(data, f)
299
+
300
+ def __getitem__(self, idx: int):
301
+ """
302
+ Retrieves a data sample by processing the given index.
303
+ If an error occurs, it attempts to handle it by either saving the error data or randomly sampling another index.
304
+
305
+ Args:
306
+ idx: The index of the data sample to retrieve.
307
+
308
+ Returns:
309
+ A dictionary containing the processed data sample.
310
+ """
311
+ # Try at most 10 times
312
+ for _ in range(10):
313
+ try:
314
+ data = self.process_one(idx)
315
+ return data
316
+ except Exception as e:
317
+ error_message = f"{e} at idx {idx}:\n{traceback.format_exc()}"
318
+ self.save_error_data(idx, error_message)
319
+
320
+ if self.random_sample_if_failed:
321
+ logger.exception(f"[skip data {idx}] {error_message}")
322
+ # Random sample an index
323
+ idx = random.choice(range(len(self.indices_list)))
324
+ continue
325
+ else:
326
+ raise Exception(e)
327
+ return data
328
+
329
+ def _get_bioassembly_data(
330
+ self, idx: int
331
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
332
+ sample_indice = self._get_sample_indice(idx=idx)
333
+ if self.bioassembly_dict_dir is not None:
334
+ bioassembly_dict_fpath = os.path.join(
335
+ self.bioassembly_dict_dir, sample_indice.pdb_id + ".pkl.gz"
336
+ )
337
+ else:
338
+ bioassembly_dict_fpath = None
339
+
340
+ bioassembly_dict = DataPipeline.get_data_bioassembly(
341
+ bioassembly_dict_fpath=bioassembly_dict_fpath
342
+ )
343
+ bioassembly_dict["pdb_id"] = sample_indice.pdb_id
344
+ return sample_indice, bioassembly_dict, bioassembly_dict_fpath
345
+
346
+ @staticmethod
347
+ def _reassign_atom_array_chain_id(atom_array: AtomArray):
348
+ """
349
+ In experiments conducted to observe overfitting effects using training sets,
350
+ the pre-stored AtomArray in the training set may experience issues with discontinuous chain IDs due to filtering.
351
+ Consequently, a temporary patch has been implemented to resolve this issue.
352
+
353
+ e.g. 3x6u asym_id_int = [0, 1, 2, ... 18, 20] -> reassigned_asym_id_int [0, 1, 2, ..., 18, 19]
354
+ """
355
+
356
+ def _get_contiguous_array(array):
357
+ array_uniq = np.sort(np.unique(array))
358
+ map_dict = {i: idx for idx, i in enumerate(array_uniq)}
359
+ new_array = np.vectorize(map_dict.get)(array)
360
+ return new_array
361
+
362
+ atom_array.asym_id_int = _get_contiguous_array(atom_array.asym_id_int)
363
+ atom_array.entity_id_int = _get_contiguous_array(atom_array.entity_id_int)
364
+ atom_array.sym_id_int = _get_contiguous_array(atom_array.sym_id_int)
365
+ return atom_array
366
+
367
+ @staticmethod
368
+ def _shuffle_array_based_on_mol_id(token_array: TokenArray, atom_array: AtomArray):
369
+ """
370
+ Shuffle both token_array and atom_array.
371
+ Atoms/tokens with the same mol_id will be shuffled as a integrated component.
372
+ """
373
+
374
+ # Get token mol_id
375
+ centre_atom_indices = token_array.get_annotation("centre_atom_index")
376
+ token_mol_id = atom_array[centre_atom_indices].mol_id
377
+
378
+ # Get unique molecule IDs and shuffle them in place
379
+ shuffled_mol_ids = np.unique(token_mol_id).copy()
380
+ np.random.shuffle(shuffled_mol_ids)
381
+
382
+ # Get shuffled token indices
383
+ original_token_indices = np.arange(len(token_mol_id))
384
+ shuffled_token_indices = []
385
+ for mol_id in shuffled_mol_ids:
386
+ mol_token_indices = original_token_indices[token_mol_id == mol_id]
387
+ shuffled_token_indices.append(mol_token_indices)
388
+ shuffled_token_indices = np.concatenate(shuffled_token_indices)
389
+
390
+ # Get shuffled token and atom array
391
+ # Use `CropData.select_by_token_indices` to shuffle safely
392
+ token_array, atom_array, _, _ = CropData.select_by_token_indices(
393
+ token_array=token_array,
394
+ atom_array=atom_array,
395
+ selected_token_indices=shuffled_token_indices,
396
+ )
397
+
398
+ return token_array, atom_array
399
+
400
+ @staticmethod
401
+ def _assign_random_sym_id(atom_array: AtomArray):
402
+ """
403
+ Assign random sym_id for chains of the same entity_id
404
+ e.g.
405
+ when entity_id = 0
406
+ sym_id_int = [0, 1, 2] -> random_sym_id_int = [2, 0, 1]
407
+ when entity_id = 1
408
+ sym_id_int = [0, 1, 2, 3] -> random_sym_id_int = [3, 0, 1, 2]
409
+ """
410
+
411
+ def _shuffle(x):
412
+ x_unique = np.sort(np.unique(x))
413
+ x_shuffled = x_unique.copy()
414
+ np.random.shuffle(x_shuffled) # shuffle in-place
415
+ map_dict = dict(zip(x_unique, x_shuffled))
416
+ new_x = np.vectorize(map_dict.get)(x)
417
+ return new_x.copy()
418
+
419
+ for entity_id in np.unique(atom_array.label_entity_id):
420
+ mask = atom_array.label_entity_id == entity_id
421
+ atom_array.sym_id_int[mask] = _shuffle(atom_array.sym_id_int[mask])
422
+ return atom_array
423
+
424
+ def process_one(
425
+ self, idx: int, return_atom_token_array: bool = False
426
+ ) -> dict[str, dict]:
427
+ """
428
+ Processes a single data sample by retrieving bioassembly data, applying various transformations, and cropping the data.
429
+ It then extracts features and labels, and optionally returns the processed atom and token arrays.
430
+
431
+ Args:
432
+ idx: The index of the data sample to process.
433
+ return_atom_token_array: Whether to return the processed atom and token arrays.
434
+
435
+ Returns:
436
+ A dict containing the input features, labels, basic_info and optionally the processed atom and token arrays.
437
+ """
438
+
439
+ sample_indice, bioassembly_dict, bioassembly_dict_fpath = (
440
+ self._get_bioassembly_data(idx=idx)
441
+ )
442
+
443
+ if self.use_reference_chains_only:
444
+ # Get the reference chains
445
+ ref_chain_ids = [sample_indice.chain_1_id, sample_indice.chain_2_id]
446
+ if sample_indice.type == "chain":
447
+ ref_chain_ids.pop(-1)
448
+ # Remove other chains from the bioassembly_dict
449
+ # Remove them safely using the crop method
450
+ token_centre_atom_indices = bioassembly_dict["token_array"].get_annotation(
451
+ "centre_atom_index"
452
+ )
453
+ token_chain_id = bioassembly_dict["atom_array"][
454
+ token_centre_atom_indices
455
+ ].chain_id
456
+ is_ref_chain = np.isin(token_chain_id, ref_chain_ids)
457
+ bioassembly_dict["token_array"], bioassembly_dict["atom_array"], _, _ = (
458
+ CropData.select_by_token_indices(
459
+ token_array=bioassembly_dict["token_array"],
460
+ atom_array=bioassembly_dict["atom_array"],
461
+ selected_token_indices=np.arange(len(is_ref_chain))[is_ref_chain],
462
+ )
463
+ )
464
+
465
+ if self.shuffle_mols:
466
+ bioassembly_dict["token_array"], bioassembly_dict["atom_array"] = (
467
+ self._shuffle_array_based_on_mol_id(
468
+ token_array=bioassembly_dict["token_array"],
469
+ atom_array=bioassembly_dict["atom_array"],
470
+ )
471
+ )
472
+
473
+ if self.shuffle_sym_ids:
474
+ bioassembly_dict["atom_array"] = self._assign_random_sym_id(
475
+ bioassembly_dict["atom_array"]
476
+ )
477
+
478
+ if self.reassign_continuous_chain_ids:
479
+ bioassembly_dict["atom_array"] = self._reassign_atom_array_chain_id(
480
+ bioassembly_dict["atom_array"]
481
+ )
482
+
483
+ max_entity_mol_id = bioassembly_dict["atom_array"].entity_mol_id.max()
484
+
485
+ # Crop
486
+ (
487
+ crop_method,
488
+ cropped_token_array,
489
+ cropped_atom_array,
490
+ cropped_msa_features,
491
+ cropped_template_features,
492
+ reference_token_index,
493
+ ) = self.crop(
494
+ sample_indice=sample_indice,
495
+ bioassembly_dict=bioassembly_dict,
496
+ **self.cropping_configs,
497
+ )
498
+
499
+ feat, label, label_full = self.get_feature_and_label(
500
+ idx=idx,
501
+ token_array=cropped_token_array,
502
+ atom_array=cropped_atom_array,
503
+ msa_features=cropped_msa_features,
504
+ template_features=cropped_template_features,
505
+ full_atom_array=bioassembly_dict["atom_array"],
506
+ is_spatial_crop="spatial" in crop_method.lower(),
507
+ max_entity_mol_id=max_entity_mol_id,
508
+ )
509
+
510
+ # Basic info, e.g. dimension related items
511
+ basic_info = {
512
+ "pdb_id": (
513
+ bioassembly_dict["pdb_id"]
514
+ if self.is_distillation is False
515
+ else sample_indice["pdb_id"]
516
+ ),
517
+ "N_asym": torch.tensor([len(torch.unique(feat["asym_id"]))]),
518
+ "N_token": torch.tensor([feat["token_index"].shape[0]]),
519
+ "N_atom": torch.tensor([feat["atom_to_token_idx"].shape[0]]),
520
+ "N_msa": torch.tensor([feat["msa"].shape[0]]),
521
+ "bioassembly_dict_fpath": bioassembly_dict_fpath,
522
+ "N_msa_prot_pair": torch.tensor([feat["prot_pair_num_alignments"]]),
523
+ "N_msa_prot_unpair": torch.tensor([feat["prot_unpair_num_alignments"]]),
524
+ "N_msa_rna_pair": torch.tensor([feat["rna_pair_num_alignments"]]),
525
+ "N_msa_rna_unpair": torch.tensor([feat["rna_unpair_num_alignments"]]),
526
+ }
527
+
528
+ for mol_type in ("protein", "ligand", "rna", "dna"):
529
+ abbr = {"protein": "prot", "ligand": "lig"}
530
+ abbr_type = abbr.get(mol_type, mol_type)
531
+ mol_type_mask = feat[f"is_{mol_type}"].bool()
532
+ n_atom = int(mol_type_mask.sum(dim=-1).item())
533
+ n_token = len(torch.unique(feat["atom_to_token_idx"][mol_type_mask]))
534
+ basic_info[f"N_{abbr_type}_atom"] = torch.tensor([n_atom])
535
+ basic_info[f"N_{abbr_type}_token"] = torch.tensor([n_token])
536
+
537
+ # Add chain level chain_id
538
+ asymn_id_to_chain_id = {
539
+ atom.asym_id_int: atom.chain_id for atom in cropped_atom_array
540
+ }
541
+ chain_id_list = [
542
+ asymn_id_to_chain_id[asymn_id_int]
543
+ for asymn_id_int in sorted(asymn_id_to_chain_id.keys())
544
+ ]
545
+ basic_info["chain_id"] = chain_id_list
546
+
547
+ data = {
548
+ "input_feature_dict": feat,
549
+ "label_dict": label,
550
+ "label_full_dict": label_full,
551
+ "basic": basic_info,
552
+ }
553
+
554
+ if return_atom_token_array:
555
+ data["cropped_atom_array"] = cropped_atom_array
556
+ data["cropped_token_array"] = cropped_token_array
557
+ return data
558
+
559
+ def crop(
560
+ self,
561
+ sample_indice: pd.Series,
562
+ bioassembly_dict: dict[str, Any],
563
+ crop_size: int,
564
+ method_weights: list[float],
565
+ contiguous_crop_complete_lig: bool = True,
566
+ spatial_crop_complete_lig: bool = True,
567
+ drop_last: bool = True,
568
+ remove_metal: bool = True,
569
+ ) -> tuple[str, TokenArray, AtomArray, dict[str, Any], dict[str, Any]]:
570
+ """
571
+ Crops the bioassembly data based on the specified configurations.
572
+
573
+ Returns:
574
+ A tuple containing the cropping method, cropped token array, cropped atom array,
575
+ cropped MSA features, and cropped template features.
576
+ """
577
+ return DataPipeline.crop(
578
+ one_sample=sample_indice,
579
+ bioassembly_dict=bioassembly_dict,
580
+ crop_size=crop_size,
581
+ msa_featurizer=self.msa_featurizer,
582
+ template_featurizer=self.template_featurizer,
583
+ method_weights=method_weights,
584
+ contiguous_crop_complete_lig=contiguous_crop_complete_lig,
585
+ spatial_crop_complete_lig=spatial_crop_complete_lig,
586
+ drop_last=drop_last,
587
+ remove_metal=remove_metal,
588
+ )
589
+
590
+ def _get_sample_indice(self, idx: int) -> pd.Series:
591
+ """
592
+ Retrieves the sample indice for a given index. If the dataset is grouped by PDB ID, it returns the first row of the PDB-idx.
593
+ Otherwise, it returns the row at the specified index.
594
+
595
+ Args:
596
+ idx: The index of the data sample to retrieve.
597
+
598
+ Returns:
599
+ A pandas Series containing the sample indice.
600
+ """
601
+ if self.group_by_pdb_id:
602
+ # Row-0 of PDB-idx
603
+ sample_indice = self.indices_list[idx].iloc[0]
604
+ else:
605
+ sample_indice = self.indices_list.iloc[idx]
606
+ return sample_indice
607
+
608
+ def _get_pdb_indice(self, idx: int) -> pd.core.series.Series:
609
+ if self.group_by_pdb_id:
610
+ pdb_indice = self.indices_list[idx].copy()
611
+ else:
612
+ pdb_indice = self.indices_list.iloc[idx : idx + 1].copy()
613
+ return pdb_indice
614
+
615
+ def _get_eval_chain_interface_mask(
616
+ self, idx: int, atom_array_chain_id: np.ndarray
617
+ ) -> tuple[np.ndarray, np.ndarray, torch.Tensor, torch.Tensor]:
618
+ """
619
+ Retrieves the evaluation chain/interface mask for a given index.
620
+
621
+ Args:
622
+ idx: The index of the data sample.
623
+ atom_array_chain_id: An array containing the chain IDs of the atom array.
624
+
625
+ Returns:
626
+ A tuple containing the evaluation type, cluster ID, chain 1 mask, and chain 2 mask.
627
+ """
628
+ if self.group_by_pdb_id:
629
+ df = self.indices_list[idx]
630
+ else:
631
+ df = self.indices_list.iloc[idx : idx + 1]
632
+
633
+ # Only consider chain/interfaces defined in EvaluationChainInterface
634
+ df = df[df["eval_type"].apply(lambda x: x in EvaluationChainInterface)].copy()
635
+ if len(df) < 1:
636
+ raise ValueError(
637
+ f"Cannot find a chain/interface for evaluation in the PDB."
638
+ )
639
+
640
+ def get_atom_mask(row):
641
+ chain_1_mask = atom_array_chain_id == row["chain_1_id"]
642
+ if row["type"] == "chain":
643
+ chain_2_mask = chain_1_mask
644
+ else:
645
+ chain_2_mask = atom_array_chain_id == row["chain_2_id"]
646
+ chain_1_mask = torch.tensor(chain_1_mask).bool()
647
+ chain_2_mask = torch.tensor(chain_2_mask).bool()
648
+ if chain_1_mask.sum() == 0 or chain_2_mask.sum() == 0:
649
+ return None, None
650
+ return chain_1_mask, chain_2_mask
651
+
652
+ df["chain_1_mask"], df["chain_2_mask"] = zip(*df.apply(get_atom_mask, axis=1))
653
+ df = df[df["chain_1_mask"].notna()] # drop NaN
654
+
655
+ if len(df) < 1:
656
+ raise ValueError(
657
+ f"Cannot find a chain/interface for evaluation in the atom_array."
658
+ )
659
+
660
+ eval_type = np.array(df["eval_type"].tolist())
661
+ cluster_id = np.array(df["cluster_id"].tolist())
662
+ # [N_eval, N_atom]
663
+ chain_1_mask = torch.stack(df["chain_1_mask"].tolist())
664
+ # [N_eval, N_atom]
665
+ chain_2_mask = torch.stack(df["chain_2_mask"].tolist())
666
+
667
+ return eval_type, cluster_id, chain_1_mask, chain_2_mask
668
+
669
+ def get_constraint_feature(
670
+ self,
671
+ idx,
672
+ atom_array,
673
+ token_array,
674
+ msa_features,
675
+ max_entity_mol_id,
676
+ full_atom_array,
677
+ ):
678
+ sample_indice = self._get_sample_indice(idx=idx)
679
+ pdb_indice = self._get_pdb_indice(idx=idx)
680
+ features_dict = {}
681
+ (
682
+ token_array,
683
+ atom_array,
684
+ msa_features,
685
+ constraint_feature_dict,
686
+ feature_info,
687
+ log_dict,
688
+ full_atom_array,
689
+ ) = self.constraint_generator.generate(
690
+ atom_array,
691
+ token_array,
692
+ sample_indice,
693
+ pdb_indice,
694
+ msa_features,
695
+ max_entity_mol_id,
696
+ full_atom_array,
697
+ )
698
+ features_dict["constraint_feature"] = constraint_feature_dict
699
+ features_dict.update(feature_info)
700
+ features_dict["constraint_log_info"] = log_dict
701
+ return token_array, atom_array, features_dict, msa_features, full_atom_array
702
+
703
+ def get_feature_and_label(
704
+ self,
705
+ idx: int,
706
+ token_array: TokenArray,
707
+ atom_array: AtomArray,
708
+ msa_features: dict[str, Any],
709
+ template_features: dict[str, Any],
710
+ full_atom_array: AtomArray,
711
+ is_spatial_crop: bool = True,
712
+ max_entity_mol_id: int = None,
713
+ ) -> tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
714
+ """
715
+ Get feature and label information for a given data point.
716
+ It uses a Featurizer object to obtain input features and labels, and applies several
717
+ steps to add other features and labels. Finally, it returns the feature dictionary, label
718
+ dictionary, and a full label dictionary.
719
+
720
+ Args:
721
+ idx: Index of the data point.
722
+ token_array: Token array representing the amino acid sequence.
723
+ atom_array: Atom array containing atomic information.
724
+ msa_features: Dictionary of MSA features.
725
+ template_features: Dictionary of template features.
726
+ full_atom_array: Full atom array containing all atoms.
727
+ is_spatial_crop: Flag indicating whether spatial cropping is applied, by default True.
728
+ max_entity_mol_id: Maximum entity mol ID in the full atom array.
729
+ Returns:
730
+ A tuple containing the feature dictionary and the label dictionary.
731
+
732
+ Raises:
733
+ ValueError: If the ligand cannot be found in the data point.
734
+ """
735
+ features_dict = {}
736
+ if self.constraint.get("enable", False):
737
+ token_array, atom_array, features_dict, msa_features, full_atom_array = (
738
+ self.get_constraint_feature(
739
+ idx,
740
+ atom_array,
741
+ token_array,
742
+ msa_features,
743
+ max_entity_mol_id,
744
+ full_atom_array,
745
+ )
746
+ )
747
+
748
+ # Get feature and labels from Featurizer
749
+ feat = Featurizer(
750
+ cropped_token_array=token_array,
751
+ cropped_atom_array=atom_array,
752
+ ref_pos_augment=self.ref_pos_augment,
753
+ lig_atom_rename=self.lig_atom_rename,
754
+ )
755
+ features_dict.update(feat.get_all_input_features())
756
+ labels_dict = feat.get_labels()
757
+
758
+ # Permutation list for atom permutation
759
+ features_dict["atom_perm_list"] = feat.get_atom_permutation_list()
760
+
761
+ # Labels for multi-chain permutation
762
+ # Note: the returned full_atom_array may contain fewer atoms than the input
763
+ label_full_dict, full_atom_array = Featurizer.get_gt_full_complex_features(
764
+ atom_array=full_atom_array,
765
+ cropped_atom_array=atom_array,
766
+ get_cropped_asym_only=is_spatial_crop,
767
+ )
768
+
769
+ # Masks for Pocket Metrics
770
+ if self.find_pocket:
771
+ # Get entity_id of the interested ligand
772
+ sample_indice = self._get_sample_indice(idx=idx)
773
+ if sample_indice.mol_1_type == "ligand":
774
+ lig_entity_id = str(sample_indice.entity_1_id)
775
+ lig_chain_id = str(sample_indice.chain_1_id)
776
+ elif sample_indice.mol_2_type == "ligand":
777
+ lig_entity_id = str(sample_indice.entity_2_id)
778
+ lig_chain_id = str(sample_indice.chain_2_id)
779
+ else:
780
+ raise ValueError(f"Cannot find ligand from this data point.")
781
+ # Make sure the cropped array contains interested ligand
782
+ assert lig_entity_id in set(atom_array.label_entity_id)
783
+ assert lig_chain_id in set(atom_array.chain_id)
784
+
785
+ # Get asym ID of the specific ligand in the `main` pocket
786
+ lig_asym_id = atom_array.label_asym_id[atom_array.chain_id == lig_chain_id]
787
+ assert len(np.unique(lig_asym_id)) == 1
788
+ lig_asym_id = lig_asym_id[0]
789
+ ligands = [lig_asym_id]
790
+
791
+ if self.find_all_pockets:
792
+ # Get asym ID of other ligands with the same entity_id
793
+ all_lig_asym_ids = set(
794
+ full_atom_array[
795
+ full_atom_array.label_entity_id == lig_entity_id
796
+ ].label_asym_id
797
+ )
798
+ ligands.extend(list(all_lig_asym_ids - set([lig_asym_id])))
799
+
800
+ # Note: the `main` pocket is the 0-indexed one.
801
+ # [N_pocket, N_atom], [N_pocket, N_atom].
802
+ # If not find_all_pockets, then N_pocket = 1.
803
+ interested_ligand_mask, pocket_mask = feat.get_lig_pocket_mask(
804
+ atom_array=full_atom_array, lig_label_asym_id=ligands
805
+ )
806
+
807
+ label_full_dict["pocket_mask"] = pocket_mask
808
+ label_full_dict["interested_ligand_mask"] = interested_ligand_mask
809
+
810
+ # Masks for Chain/Interface Metrics
811
+ if self.find_eval_chain_interface:
812
+ eval_type, cluster_id, chain_1_mask, chain_2_mask = (
813
+ self._get_eval_chain_interface_mask(
814
+ idx=idx, atom_array_chain_id=full_atom_array.chain_id
815
+ )
816
+ )
817
+ labels_dict["eval_type"] = eval_type # [N_eval]
818
+ labels_dict["cluster_id"] = cluster_id # [N_eval]
819
+ labels_dict["chain_1_mask"] = chain_1_mask # [N_eval, N_atom]
820
+ labels_dict["chain_2_mask"] = chain_2_mask # [N_eval, N_atom]
821
+
822
+ # Make dummy features for not implemented features
823
+ dummy_feats = []
824
+ if len(msa_features) == 0:
825
+ dummy_feats.append("msa")
826
+ else:
827
+ msa_features = dict_to_tensor(msa_features)
828
+ features_dict.update(msa_features)
829
+ if len(template_features) == 0:
830
+ dummy_feats.append("template")
831
+ else:
832
+ template_features = dict_to_tensor(template_features)
833
+ features_dict.update(template_features)
834
+
835
+ features_dict = make_dummy_feature(
836
+ features_dict=features_dict, dummy_feats=dummy_feats
837
+ )
838
+ # Transform to right data type
839
+ features_dict = data_type_transform(feat_or_label_dict=features_dict)
840
+ labels_dict = data_type_transform(feat_or_label_dict=labels_dict)
841
+
842
+ # Is_distillation
843
+ features_dict["is_distillation"] = torch.tensor([self.is_distillation])
844
+ if self.is_distillation is True:
845
+ features_dict["resolution"] = torch.tensor([-1.0])
846
+ return features_dict, labels_dict, label_full_dict
847
+
848
+
849
+ def get_msa_featurizer(configs, dataset_name: str, stage: str) -> Optional[Callable]:
850
+ """
851
+ Creates and returns an MSAFeaturizer object based on the provided configurations.
852
+
853
+ Args:
854
+ configs: A dictionary containing the configurations for the MSAFeaturizer.
855
+ dataset_name: The name of the dataset.
856
+ stage: The stage of the dataset (e.g., 'train', 'test').
857
+
858
+ Returns:
859
+ An MSAFeaturizer object if MSA is enabled in the configurations, otherwise None.
860
+ """
861
+ if "msa" in configs["data"] and configs["data"]["msa"]["enable"]:
862
+ msa_info = configs["data"]["msa"]
863
+ msa_args = deepcopy(msa_info)
864
+
865
+ if "msa" in (dataset_config := configs["data"][dataset_name]):
866
+ for k, v in dataset_config["msa"].items():
867
+ if k not in ["prot", "rna"]:
868
+ msa_args[k] = v
869
+ else:
870
+ for kk, vv in dataset_config["msa"][k].items():
871
+ msa_args[k][kk] = vv
872
+
873
+ prot_msa_args = msa_args["prot"]
874
+ prot_msa_args.update(
875
+ {
876
+ "dataset_name": dataset_name,
877
+ "merge_method": msa_args["merge_method"],
878
+ "max_size": msa_args["max_size"][stage],
879
+ }
880
+ )
881
+
882
+ rna_msa_args = msa_args["rna"]
883
+ rna_msa_args.update(
884
+ {
885
+ "dataset_name": dataset_name,
886
+ "merge_method": msa_args["merge_method"],
887
+ "max_size": msa_args["max_size"][stage],
888
+ }
889
+ )
890
+
891
+ return MSAFeaturizer(
892
+ prot_msa_args=prot_msa_args,
893
+ rna_msa_args=rna_msa_args,
894
+ enable_rna_msa=configs.data.msa.enable_rna_msa,
895
+ )
896
+
897
+ else:
898
+ return None
899
+
900
+
901
+ class WeightedMultiDataset(Dataset):
902
+ """
903
+ A weighted dataset composed of multiple datasets with weights.
904
+ """
905
+
906
+ def __init__(
907
+ self,
908
+ datasets: list[Dataset],
909
+ dataset_names: list[str],
910
+ datapoint_weights: list[list[float]],
911
+ dataset_sample_weights: list[torch.tensor],
912
+ ):
913
+ """
914
+ Initializes the WeightedMultiDataset.
915
+ Args:
916
+ datasets: A list of Dataset objects.
917
+ dataset_names: A list of dataset names corresponding to the datasets.
918
+ datapoint_weights: A list of lists containing sampling weights for each datapoint in the datasets.
919
+ dataset_sample_weights: A list of torch tensors containing sampling weights for each dataset.
920
+ """
921
+ self.datasets = datasets
922
+ self.dataset_names = dataset_names
923
+ self.datapoint_weights = datapoint_weights
924
+ self.dataset_sample_weights = torch.Tensor(dataset_sample_weights)
925
+ self.iteration = 0
926
+ self.offset = 0
927
+ self.init_datasets()
928
+
929
+ def init_datasets(self):
930
+ """Calculate global weights of each datapoint in datasets for future sampling."""
931
+ self.merged_datapoint_weights = []
932
+ self.weight = 0.0
933
+ self.dataset_indices = []
934
+ self.within_dataset_indices = []
935
+ for dataset_index, (
936
+ dataset,
937
+ datapoint_weight_list,
938
+ dataset_weight,
939
+ ) in enumerate(
940
+ zip(self.datasets, self.datapoint_weights, self.dataset_sample_weights)
941
+ ):
942
+ # normalize each dataset weights
943
+ weight_sum = sum(datapoint_weight_list)
944
+ datapoint_weight_list = [
945
+ dataset_weight * w / weight_sum for w in datapoint_weight_list
946
+ ]
947
+ self.merged_datapoint_weights.extend(datapoint_weight_list)
948
+ self.weight += dataset_weight
949
+ self.dataset_indices.extend([dataset_index] * len(datapoint_weight_list))
950
+ self.within_dataset_indices.extend(list(range(len(datapoint_weight_list))))
951
+ self.merged_datapoint_weights = torch.tensor(
952
+ self.merged_datapoint_weights, dtype=torch.float64
953
+ )
954
+
955
+ def __len__(self) -> int:
956
+ return len(self.merged_datapoint_weights)
957
+
958
+ def __getitem__(self, index: int) -> dict[str, dict]:
959
+ return self.datasets[self.dataset_indices[index]][
960
+ self.within_dataset_indices[index]
961
+ ]
962
+
963
+
964
+ def get_weighted_pdb_weight(
965
+ data_type: str,
966
+ cluster_size: int,
967
+ chain_count: dict,
968
+ eps: float = 1e-9,
969
+ beta_dict: Optional[dict] = None,
970
+ alpha_dict: Optional[dict] = None,
971
+ ) -> float:
972
+ """
973
+ Get sample weight for each example in a weighted PDB dataset.
974
+
975
+ data_type (str): Type of data, either 'chain' or 'interface'.
976
+ cluster_size (int): Cluster size of this chain/interface.
977
+ chain_count (dict): Count of each kind of chains, e.g., {"prot": int, "nuc": int, "ligand": int}.
978
+ eps (float, optional): A small epsilon value to avoid division by zero. Default is 1e-9.
979
+ beta_dict (Optional[dict], optional): Dictionary containing beta values for 'chain' and 'interface'.
980
+ alpha_dict (Optional[dict], optional): Dictionary containing alpha values for different chain types.
981
+
982
+ Returns:
983
+ float: Calculated weight for the given chain/interface.
984
+ """
985
+ if not beta_dict:
986
+ beta_dict = {
987
+ "chain": 0.5,
988
+ "interface": 1,
989
+ }
990
+ if not alpha_dict:
991
+ alpha_dict = {
992
+ "prot": 3,
993
+ "nuc": 3,
994
+ "ligand": 1,
995
+ }
996
+
997
+ assert cluster_size > 0
998
+ assert data_type in ["chain", "interface"]
999
+ beta = beta_dict[data_type]
1000
+ assert set(chain_count.keys()).issubset(set(alpha_dict.keys()))
1001
+ weight = (
1002
+ beta
1003
+ * sum(
1004
+ [alpha * chain_count[data_mode] for data_mode, alpha in alpha_dict.items()]
1005
+ )
1006
+ / (cluster_size + eps)
1007
+ )
1008
+ return weight
1009
+
1010
+
1011
+ def calc_weights_for_df(
1012
+ indices_df: pd.DataFrame, beta_dict: dict[str, Any], alpha_dict: dict[str, Any]
1013
+ ) -> pd.DataFrame:
1014
+ """
1015
+ Calculate weights for each example in the dataframe.
1016
+
1017
+ Args:
1018
+ indices_df: A pandas DataFrame containing the indices.
1019
+ beta_dict: A dictionary containing beta values for different data types.
1020
+ alpha_dict: A dictionary containing alpha values for different data types.
1021
+
1022
+ Returns:
1023
+ A pandas DataFrame with an column 'weights' containing the calculated weights.
1024
+ """
1025
+ # Specific to assembly, and entities (chain or interface)
1026
+ indices_df["pdb_sorted_entity_id"] = indices_df.apply(
1027
+ lambda x: f"{x['pdb_id']}_{x['assembly_id']}_{'_'.join(sorted([str(x['entity_1_id']), str(x['entity_2_id'])]))}",
1028
+ axis=1,
1029
+ )
1030
+
1031
+ entity_member_num_dict = {}
1032
+ for pdb_sorted_entity_id, sub_df in indices_df.groupby("pdb_sorted_entity_id"):
1033
+ # Number of repeatative entities in the same assembly
1034
+ entity_member_num_dict[pdb_sorted_entity_id] = len(sub_df)
1035
+ indices_df["pdb_sorted_entity_id_member_num"] = indices_df.apply(
1036
+ lambda x: entity_member_num_dict[x["pdb_sorted_entity_id"]], axis=1
1037
+ )
1038
+
1039
+ cluster_size_record = {}
1040
+ for cluster_id, sub_df in indices_df.groupby("cluster_id"):
1041
+ cluster_size_record[cluster_id] = len(set(sub_df["pdb_sorted_entity_id"]))
1042
+
1043
+ weights = []
1044
+ for _, row in indices_df.iterrows():
1045
+ data_type = row["type"]
1046
+ cluster_size = cluster_size_record[row["cluster_id"]]
1047
+ chain_count = {"prot": 0, "nuc": 0, "ligand": 0}
1048
+ for mol_type in [row["mol_1_type"], row["mol_2_type"]]:
1049
+ if chain_count.get(mol_type) is None:
1050
+ continue
1051
+ chain_count[mol_type] += 1
1052
+ # Weight specific to (assembly, entity(chain/interface))
1053
+ weight = get_weighted_pdb_weight(
1054
+ data_type=data_type,
1055
+ cluster_size=cluster_size,
1056
+ chain_count=chain_count,
1057
+ beta_dict=beta_dict,
1058
+ alpha_dict=alpha_dict,
1059
+ )
1060
+ weights.append(weight)
1061
+ indices_df["weights"] = weights / indices_df["pdb_sorted_entity_id_member_num"]
1062
+ return indices_df
1063
+
1064
+
1065
+ def get_sample_weights(
1066
+ sampler_type: str,
1067
+ indices_df: pd.DataFrame = None,
1068
+ beta_dict: dict = {
1069
+ "chain": 0.5,
1070
+ "interface": 1,
1071
+ },
1072
+ alpha_dict: dict = {
1073
+ "prot": 3,
1074
+ "nuc": 3,
1075
+ "ligand": 1,
1076
+ },
1077
+ force_recompute_weight: bool = False,
1078
+ ) -> Union[pd.Series, list[float]]:
1079
+ """
1080
+ Computes sample weights based on the specified sampler type.
1081
+
1082
+ Args:
1083
+ sampler_type: The type of sampler to use ('weighted' or 'uniform').
1084
+ indices_df: A pandas DataFrame containing the indices.
1085
+ beta_dict: A dictionary containing beta values for different data types.
1086
+ alpha_dict: A dictionary containing alpha values for different data types.
1087
+ force_recompute_weight: Whether to force recomputation of weights even if they already exist.
1088
+
1089
+ Returns:
1090
+ A list of sample weights.
1091
+
1092
+ Raises:
1093
+ ValueError: If an unknown sampler type is provided.
1094
+ """
1095
+ if sampler_type == "weighted":
1096
+ assert indices_df is not None
1097
+ if "weights" not in indices_df.columns or force_recompute_weight:
1098
+ indices_df = calc_weights_for_df(
1099
+ indices_df=indices_df,
1100
+ beta_dict=beta_dict,
1101
+ alpha_dict=alpha_dict,
1102
+ )
1103
+ return indices_df["weights"].astype("float32")
1104
+ elif sampler_type == "uniform":
1105
+ assert indices_df is not None
1106
+ return [1 / len(indices_df) for _ in range(len(indices_df))]
1107
+ else:
1108
+ raise ValueError(f"Unknown sampler type: {sampler_type}")
1109
+
1110
+
1111
+ def get_datasets(
1112
+ configs: ConfigDict, error_dir: Optional[str]
1113
+ ) -> tuple[WeightedMultiDataset, dict[str, BaseSingleDataset]]:
1114
+ """
1115
+ Get training and testing datasets given configs
1116
+
1117
+ Args:
1118
+ configs: A ConfigDict containing the dataset configurations.
1119
+ error_dir: The directory where error logs will be saved.
1120
+
1121
+ Returns:
1122
+ A tuple containing the training dataset and a dictionary of testing datasets.
1123
+ """
1124
+
1125
+ def _get_dataset_param(config_dict, dataset_name: str, stage: str):
1126
+ # Template_featurizer is under development
1127
+ # Lig_atom_rename/shuffle_mols/shuffle_sym_ids do not affect the performance very much
1128
+ return {
1129
+ "name": dataset_name,
1130
+ **config_dict["base_info"],
1131
+ "cropping_configs": config_dict["cropping_configs"],
1132
+ "error_dir": error_dir,
1133
+ "msa_featurizer": get_msa_featurizer(configs, dataset_name, stage),
1134
+ "template_featurizer": None,
1135
+ "lig_atom_rename": config_dict.get("lig_atom_rename", False),
1136
+ "shuffle_mols": config_dict.get("shuffle_mols", False),
1137
+ "shuffle_sym_ids": config_dict.get("shuffle_sym_ids", False),
1138
+ "constraint": config_dict.get("constraint", {}),
1139
+ }
1140
+
1141
+ data_config = configs.data
1142
+ logger.info(f"Using train sets {data_config.train_sets}")
1143
+ assert len(data_config.train_sets) == len(
1144
+ data_config.train_sampler.train_sample_weights
1145
+ )
1146
+ train_datasets = []
1147
+ datapoint_weights = []
1148
+ for train_name in data_config.train_sets:
1149
+ config_dict = data_config[train_name].to_dict()
1150
+ dataset_param = _get_dataset_param(
1151
+ config_dict, dataset_name=train_name, stage="train"
1152
+ )
1153
+ dataset_param["ref_pos_augment"] = data_config.get(
1154
+ "train_ref_pos_augment", True
1155
+ )
1156
+ dataset_param["limits"] = data_config.get("limits", -1)
1157
+ train_dataset = BaseSingleDataset(**dataset_param)
1158
+ train_datasets.append(train_dataset)
1159
+ datapoint_weights.append(
1160
+ get_sample_weights(
1161
+ **data_config[train_name]["sampler_configs"],
1162
+ indices_df=train_dataset.indices_list,
1163
+ )
1164
+ )
1165
+ train_dataset = WeightedMultiDataset(
1166
+ datasets=train_datasets,
1167
+ dataset_names=data_config.train_sets,
1168
+ datapoint_weights=datapoint_weights,
1169
+ dataset_sample_weights=data_config.train_sampler.train_sample_weights,
1170
+ )
1171
+
1172
+ test_datasets = {}
1173
+ test_sets = data_config.test_sets
1174
+ for test_name in test_sets:
1175
+ config_dict = data_config[test_name].to_dict()
1176
+ dataset_param = _get_dataset_param(
1177
+ config_dict, dataset_name=test_name, stage="test"
1178
+ )
1179
+ dataset_param["ref_pos_augment"] = data_config.get("test_ref_pos_augment", True)
1180
+ test_dataset = BaseSingleDataset(**dataset_param)
1181
+ test_datasets[test_name] = test_dataset
1182
+ return train_dataset, test_datasets
protenix/data/esm_featurizer.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import traceback
17
+
18
+ import pandas as pd
19
+ import torch
20
+
21
+ from protenix.data.compute_esm import compute_ESM_embeddings, load_esm_model
22
+ from protenix.utils.logger import get_logger
23
+
24
+ logger = get_logger(__name__)
25
+
26
+
27
+ class ESMFeaturizer:
28
+
29
+ def __init__(
30
+ self,
31
+ embedding_dir: str,
32
+ sequence_fpath: str,
33
+ embedding_dim: int = 1028,
34
+ error_dir: str = None,
35
+ ):
36
+ self.embedding_dir = embedding_dir
37
+ self.sequence_fpath = sequence_fpath
38
+ self.seq_to_filename = self.get_seq_to_filename(sequence_fpath)
39
+ self.embedding_dim = embedding_dim
40
+ self.error_dir = error_dir
41
+ if self.error_dir is not None:
42
+ self.error_dir = os.path.join(self.error_dir, "esm_error")
43
+ os.makedirs(self.error_dir, exist_ok=True)
44
+
45
+ def get_seq_to_filename(self, sequence_fpath: str) -> dict[str, str]:
46
+ df = pd.read_csv(sequence_fpath)
47
+ df["filename"] = (
48
+ df["part_id"].astype(str) + "/" + df["seq_label"].astype(str) + ".pt"
49
+ )
50
+ return df.set_index("seq")["filename"].to_dict()
51
+
52
+ def load_esm_embedding(self, sequence: str):
53
+ x = torch.load(os.path.join(self.embedding_dir, self.seq_to_filename[sequence]))
54
+ assert x.size(0) == len(sequence)
55
+ return x
56
+
57
+ def save_error(self, error_sequences, pdb_id):
58
+ if (self.error_dir is None) or (len(error_sequences) == 0):
59
+ return
60
+ for error_data in error_sequences:
61
+ fpath = os.path.join(
62
+ self.error_dir, f"{pdb_id}_{error_data['entity_id']}.txt"
63
+ )
64
+ if os.path.exists(fpath):
65
+ continue
66
+ with open(fpath, "w") as f:
67
+ f.write(error_data["error"])
68
+
69
+ def __call__(self, token_array, atom_array, bioassembly_dict, inference_mode=False):
70
+
71
+ # init as zeros
72
+ N_token = len(token_array)
73
+ x = torch.zeros([N_token, self.embedding_dim])
74
+
75
+ # get one atom per token
76
+ centre_atoms_indices = token_array.get_annotation("centre_atom_index")
77
+ centre_atom_array = atom_array[centre_atoms_indices]
78
+
79
+ # protein entities
80
+ is_protein = centre_atom_array.chain_mol_type == "protein"
81
+ protein_entity_ids = set(centre_atom_array.label_entity_id[is_protein])
82
+
83
+ if inference_mode:
84
+ entity_id_to_sequence = (
85
+ {}
86
+ ) # Only contains protein entity, many-to-one mapping
87
+ for i, entity_info_wrapper in enumerate(bioassembly_dict["sequences"]):
88
+ entity_id = str(i + 1)
89
+ entity_type = list(entity_info_wrapper.keys())[0]
90
+ entity_info = entity_info_wrapper[entity_type]
91
+ if entity_type == "proteinChain":
92
+ entity_id_to_sequence[entity_id] = entity_info["sequence"]
93
+
94
+ # enumerate over the entities
95
+ error_sequences = []
96
+ for entity_id in protein_entity_ids:
97
+ try:
98
+ # Get sequence
99
+ if inference_mode:
100
+ sequence = entity_id_to_sequence[entity_id]
101
+ else:
102
+ sequence = bioassembly_dict["sequences"][str(entity_id)]
103
+ x_esm = self.load_esm_embedding(sequence)
104
+ # Get residue indices of the cropped tokens
105
+ entity_mask = centre_atom_array.label_entity_id == entity_id
106
+ res_index = (
107
+ centre_atom_array.res_id[entity_mask] - 1
108
+ ) # res_id starts with 1
109
+ # Get esm embeddding according to residue indices
110
+ x[entity_mask] = x_esm[res_index]
111
+ except Exception as e:
112
+ error_message = f"{e}:\n{traceback.format_exc()}"
113
+ error_sequences.append(
114
+ {
115
+ "entity_id": entity_id,
116
+ "error": error_message,
117
+ }
118
+ )
119
+ logger.warning(
120
+ f"[{bioassembly_dict['pdb_id']}] ESM error: {error_message}"
121
+ )
122
+
123
+ id_key = "name" if inference_mode else "pdb_id"
124
+ self.save_error(error_sequences, pdb_id=bioassembly_dict[id_key])
125
+
126
+ return x
127
+
128
+ @staticmethod
129
+ def precompute_esm_embedding(
130
+ inputs: list, model_name, embedding_dir, sequence_fpath, checkpoint_dir
131
+ ):
132
+ print("Precompute ESM embeddings")
133
+ # prepare seq_label
134
+ all_seq_dict = []
135
+ for sample_dict in inputs:
136
+ sample_name = sample_dict["name"]
137
+ for i, entity_info_wrapper in enumerate(sample_dict["sequences"]):
138
+ pdb_entity_id = sample_name + "_" + str(i + 1)
139
+ entity_type = list(entity_info_wrapper.keys())[0]
140
+ entity_info = entity_info_wrapper[entity_type]
141
+ if entity_type == "proteinChain":
142
+ all_seq_dict.append(
143
+ {
144
+ "seq": entity_info["sequence"],
145
+ "pdb_entity_id": pdb_entity_id,
146
+ "seq_label": pdb_entity_id,
147
+ "part_id": pdb_entity_id,
148
+ }
149
+ )
150
+ df_seq = pd.DataFrame(
151
+ all_seq_dict, columns=["seq", "pdb_entity_id", "seq_label", "part_id"]
152
+ )
153
+ df_seq.to_csv(sequence_fpath)
154
+ print(f"Save sequence file to {sequence_fpath}")
155
+
156
+ model, alphabet = load_esm_model(model_name, local_esm_dir=checkpoint_dir)
157
+ error_parts = []
158
+ part_counts = dict(df_seq["part_id"].value_counts())
159
+ for part_id, count in part_counts.items():
160
+ df_part = df_seq[df_seq["part_id"] == part_id]
161
+ print(f"Part {part_id}: {len(df_part)} sequences.")
162
+ labels = df_part["seq_label"].tolist()
163
+ sequences = df_part["seq"].tolist()
164
+ try:
165
+ save_dir = os.path.join(embedding_dir, part_id)
166
+ if not os.path.exists(save_dir):
167
+ os.makedirs(save_dir)
168
+ lm_embeddings = compute_ESM_embeddings(
169
+ model_name,
170
+ model,
171
+ alphabet,
172
+ labels,
173
+ sequences,
174
+ save_dir,
175
+ truncation_seq_length=4094,
176
+ toks_per_batch=16384,
177
+ )
178
+ print(
179
+ f"[{part_id}] Processed {len(lm_embeddings)} sequences in total. Done!"
180
+ )
181
+ except Exception as e:
182
+ print(f"[{part_id}] {e}")
183
+ error_parts.append(part_id)
184
+ print("Error parts: ", error_parts)
protenix/data/featurizer.py ADDED
@@ -0,0 +1,828 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import copy
16
+ from collections import defaultdict
17
+ from typing import Optional, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ from biotite.structure import Atom, AtomArray, get_residue_starts
22
+ from sklearn.neighbors import KDTree
23
+
24
+ from protenix.data.constants import STD_RESIDUES, STD_RESIDUES_WITH_GAP, get_all_elems
25
+ from protenix.data.tokenizer import Token, TokenArray
26
+ from protenix.data.utils import get_atom_level_token_mask, get_ligand_polymer_bond_mask
27
+ from protenix.utils.geometry import angle_3p, random_transform
28
+
29
+
30
+ class Featurizer(object):
31
+ def __init__(
32
+ self,
33
+ cropped_token_array: TokenArray,
34
+ cropped_atom_array: AtomArray,
35
+ ref_pos_augment: bool = True,
36
+ lig_atom_rename: bool = False,
37
+ ) -> None:
38
+ """
39
+ Args:
40
+ cropped_token_array (TokenArray): TokenArray object after cropping
41
+ cropped_atom_array (AtomArray): AtomArray object after cropping
42
+ ref_pos_augment (bool): Boolean indicating whether apply random rotation and translation on ref_pos
43
+ lig_atom_rename (bool): Boolean indicating whether rename atom name for ligand atoms
44
+ """
45
+ self.cropped_token_array = cropped_token_array
46
+
47
+ self.cropped_atom_array = cropped_atom_array
48
+ self.ref_pos_augment = ref_pos_augment
49
+ self.lig_atom_rename = lig_atom_rename
50
+
51
+ @staticmethod
52
+ def encoder(
53
+ encode_def_dict_or_list: Optional[Union[dict, list[str]]], input_list: list[str]
54
+ ) -> torch.Tensor:
55
+ """
56
+ Encode a list of input values into a binary format using a specified encoding definition list.
57
+
58
+ Args:
59
+ encode_def_dict_or_list (list or dict): A list or dict of encoding definitions.
60
+ input_list (list): A list of input values to be encoded.
61
+
62
+ Returns:
63
+ torch.Tensor: A tensor representing the binary encoding of the input values.
64
+ """
65
+ num_keys = len(encode_def_dict_or_list)
66
+ if isinstance(encode_def_dict_or_list, dict):
67
+ items = encode_def_dict_or_list.items()
68
+ assert (
69
+ num_keys == max(encode_def_dict_or_list.values()) + 1
70
+ ), "Do not use discontinuous number, which might causing potential bugs in the code"
71
+ elif isinstance(encode_def_dict_or_list, list):
72
+ items = ((key, idx) for idx, key in enumerate(encode_def_dict_or_list))
73
+ else:
74
+ raise TypeError(
75
+ "encode_def_dict_or_list must be a list or dict, "
76
+ f"but got {type(encode_def_dict_or_list)}"
77
+ )
78
+ onehot_dict = {
79
+ key: [int(i == idx) for i in range(num_keys)] for key, idx in items
80
+ }
81
+ onehot_encoded_data = [onehot_dict[item] for item in input_list]
82
+ onehot_tensor = torch.Tensor(onehot_encoded_data)
83
+ return onehot_tensor
84
+
85
+ @staticmethod
86
+ def restype_onehot_encoded(restype_list: list[str]) -> torch.Tensor:
87
+ """
88
+ Ref: AlphaFold3 SI Table 5 "restype"
89
+ One-hot encoding of the sequence. 32 possible values: 20 amino acids + unknown,
90
+ 4 RNA nucleotides + unknown, 4 DNA nucleotides + unknown, and gap.
91
+ Ligands represented as “unknown amino acid”.
92
+
93
+ Args:
94
+ restype_list (List[str]): A list of residue types.
95
+ The residue type of ligand should be "UNK" in the input list.
96
+
97
+ Returns:
98
+ torch.Tensor: A Tensor of one-hot encoded residue types
99
+ """
100
+
101
+ return Featurizer.encoder(STD_RESIDUES_WITH_GAP, restype_list)
102
+
103
+ @staticmethod
104
+ def elem_onehot_encoded(elem_list: list[str]) -> torch.Tensor:
105
+ """
106
+ Ref: AlphaFold3 SI Table 5 "ref_element"
107
+ One-hot encoding of the element atomic number for each atom
108
+ in the reference conformer, up to atomic number 128.
109
+
110
+ Args:
111
+ elem_list (List[str]): A list of element symbols.
112
+
113
+ Returns:
114
+ torch.Tensor: A Tensor of one-hot encoded elements
115
+ """
116
+ return Featurizer.encoder(get_all_elems(), elem_list)
117
+
118
+ @staticmethod
119
+ def ref_atom_name_chars_encoded(atom_names: list[str]) -> torch.Tensor:
120
+ """
121
+ Ref: AlphaFold3 SI Table 5 "ref_atom_name_chars"
122
+ One-hot encoding of the unique atom names in the reference conformer.
123
+ Each character is encoded as ord(c) − 32, and names are padded to length 4.
124
+
125
+ Args:
126
+ atom_name_list (List[str]): A list of atom names.
127
+
128
+ Returns:
129
+ torch.Tensor: A Tensor of character encoded atom names
130
+ """
131
+ onehot_dict = {}
132
+ for index, key in enumerate(range(64)):
133
+ onehot = [0] * 64
134
+ onehot[index] = 1
135
+ onehot_dict[key] = onehot
136
+ # [N_atom, 4, 64]
137
+ mol_encode = []
138
+ for atom_name in atom_names:
139
+ # [4, 64]
140
+ atom_encode = []
141
+ for name_str in atom_name.ljust(4):
142
+ atom_encode.append(onehot_dict[ord(name_str) - 32])
143
+ mol_encode.append(atom_encode)
144
+ onehot_tensor = torch.Tensor(mol_encode)
145
+ return onehot_tensor
146
+
147
+ @staticmethod
148
+ def get_prot_nuc_frame(token: Token, centre_atom: Atom) -> tuple[int, list[int]]:
149
+ """
150
+ Ref: AlphaFold3 SI Chapter 4.3.2
151
+ For proteins/DNA/RNA, we use the three atoms [N, CA, C] / [C1', C3', C4']
152
+
153
+ Args:
154
+ token (Token): Token object.
155
+ centre_atom (Atom): Biotite Atom object of Token centre atom.
156
+
157
+ Returns:
158
+ has_frame (int): 1 if the token has frame, 0 otherwise.
159
+ frame_atom_index (List[int]): The index of the atoms used to construct the frame.
160
+ """
161
+ if centre_atom.mol_type == "protein":
162
+ # For protein
163
+ abc_atom_name = ["N", "CA", "C"]
164
+ else:
165
+ # For DNA and RNA
166
+ abc_atom_name = [r"C1'", r"C3'", r"C4'"]
167
+
168
+ idx_in_atom_indices = []
169
+ for i in abc_atom_name:
170
+ if centre_atom.mol_type == "protein" and "N" not in token.atom_names:
171
+ return 0, [-1, -1, -1]
172
+ elif centre_atom.mol_type != "protein" and "C1'" not in token.atom_names:
173
+ return 0, [-1, -1, -1]
174
+ idx_in_atom_indices.append(token.atom_names.index(i))
175
+ # Protein/DNA/RNA always has frame
176
+ has_frame = 1
177
+ frame_atom_index = [token.atom_indices[i] for i in idx_in_atom_indices]
178
+ return has_frame, frame_atom_index
179
+
180
+ @staticmethod
181
+ def get_lig_frame(
182
+ token: Token,
183
+ centre_atom: Atom,
184
+ lig_res_ref_conf_kdtree: dict[str, tuple[KDTree, list[int]]],
185
+ ref_pos: torch.Tensor,
186
+ ref_mask: torch.Tensor,
187
+ ) -> tuple[int, list[int]]:
188
+ """
189
+ Ref: AlphaFold3 SI Chapter 4.3.2
190
+ For ligands, we use the reference conformer of the ligand to construct the frame.
191
+
192
+ Args:
193
+ token (Token): Token object.
194
+ centre_atom (Atom): Biotite Atom object of Token centre atom.
195
+ lig_res_ref_conf_kdtree (Dict[str, Tuple[KDTree, List[int]]]): A dictionary of KDTree objects and atom indices.
196
+ ref_pos (torch.Tensor): Atom positions in the reference conformer. Size=[N_atom, 3]
197
+ ref_mask (torch.Tensor): Mask indicating which atom slots are used in the reference conformer. Size=[N_atom]
198
+
199
+ Returns:
200
+ tuple[int, List[int]]:
201
+ has_frame (int): 1 if the token has frame, 0 otherwise.
202
+ frame_atom_index (List[int]): The index of the atoms used to construct the frame.
203
+ """
204
+ kdtree, atom_ids = lig_res_ref_conf_kdtree[centre_atom.ref_space_uid]
205
+ b_ref_pos = ref_pos[token.centre_atom_index]
206
+ b_idx = token.centre_atom_index
207
+ if kdtree is None:
208
+ # Atom num < 3
209
+ frame_atom_index = [-1, b_idx, -1]
210
+ has_frame = 0
211
+ else:
212
+ _dist, ind = kdtree.query([b_ref_pos], k=3)
213
+ a_idx, c_idx = atom_ids[ind[0][1]], atom_ids[ind[0][2]]
214
+ frame_atom_index = [a_idx, b_idx, c_idx]
215
+
216
+ # Check if reference confomrer vaild
217
+ has_frame = all([ref_mask[idx] for idx in frame_atom_index])
218
+
219
+ # Colinear check
220
+ if has_frame:
221
+ vec1 = ref_pos[frame_atom_index[1]] - ref_pos[frame_atom_index[0]]
222
+ vec2 = ref_pos[frame_atom_index[2]] - ref_pos[frame_atom_index[1]]
223
+ # ref_pos can be all zeros, in which case has_frame=0
224
+ is_zero_norm = np.isclose(
225
+ np.linalg.norm(vec1, axis=-1), 0
226
+ ) or np.isclose(np.linalg.norm(vec2, axis=-1), 0)
227
+ if is_zero_norm:
228
+ has_frame = 0
229
+ else:
230
+ theta_degrees = angle_3p(
231
+ *[ref_pos[idx] for idx in frame_atom_index]
232
+ )
233
+ is_colinear = theta_degrees <= 25 or theta_degrees >= 155
234
+ if is_colinear:
235
+ has_frame = 0
236
+ return has_frame, frame_atom_index
237
+
238
+ @staticmethod
239
+ def get_token_frame(
240
+ token_array: TokenArray,
241
+ atom_array: AtomArray,
242
+ ref_pos: torch.Tensor,
243
+ ref_mask: torch.Tensor,
244
+ ) -> TokenArray:
245
+ """
246
+ Ref: AlphaFold3 SI Chapter 4.3.2
247
+ The atoms (a_i, b_i, c_i) used to construct token i’s frame depend on the chain type of i:
248
+ Protein tokens use their residue’s backbone (N, Cα, C),
249
+ while DNA and RNA tokens use (C1′, C3′, C4′) atoms of their residue.
250
+ All other tokens (small molecules, glycans, ions) contain only one atom per token.
251
+ The token atom is assigned to b_i, the closest atom to the token atom is a_i,
252
+ and the second closest atom to the token atom is c_i.
253
+ If this set of three atoms is close to colinear (less than 25 degree deviation),
254
+ or if three atoms do not exist in the chain (e.g. a sodium ion),
255
+ then the frame is marked as invalid.
256
+
257
+ Note: frames constucted from reference conformer
258
+
259
+ Args:
260
+ token_array (TokenArray): A list of tokens.
261
+ atom_array (AtomArray): An atom array.
262
+ ref_pos (torch.Tensor): Atom positions in the reference conformer. Size=[N_atom, 3]
263
+ ref_mask (torch.Tensor): Mask indicating which atom slots are used in the reference conformer. Size=[N_atom]
264
+
265
+ Returns:
266
+ TokenArray: A TokenArray with updated frame annotations.
267
+ - has_frame: 1 if the token has frame, 0 otherwise.
268
+ - frame_atom_index: The index of the atoms used to construct the frame.
269
+ """
270
+ token_array_w_frame = token_array
271
+ atom_level_token_mask = get_atom_level_token_mask(token_array, atom_array)
272
+
273
+ # Construct a KDTree for queries to avoid redundant distance calculations
274
+ lig_res_ref_conf_kdtree = {}
275
+ # Ligand and non-standard residues need to use ref to identify frames
276
+ lig_atom_array = atom_array[
277
+ (atom_array.mol_type == "ligand")
278
+ | (~np.isin(atom_array.res_name, list(STD_RESIDUES.keys())))
279
+ | atom_level_token_mask
280
+ ]
281
+ for ref_space_uid in np.unique(lig_atom_array.ref_space_uid):
282
+ # The ref_space_uid is the unique identifier ID for each residue.
283
+ atom_ids = np.where(atom_array.ref_space_uid == ref_space_uid)[0]
284
+ if len(atom_ids) >= 3:
285
+ kdtree = KDTree(ref_pos[atom_ids], metric="euclidean")
286
+ else:
287
+ # Invalid frame
288
+ kdtree = None
289
+ lig_res_ref_conf_kdtree[ref_space_uid] = (kdtree, atom_ids)
290
+
291
+ has_frame = []
292
+ for token in token_array_w_frame:
293
+ centre_atom = atom_array[token.centre_atom_index]
294
+ if (
295
+ centre_atom.mol_type != "ligand"
296
+ and centre_atom.res_name in STD_RESIDUES
297
+ and len(token.atom_indices) > 1
298
+ ):
299
+ has_frame, frame_atom_index = Featurizer.get_prot_nuc_frame(
300
+ token, centre_atom
301
+ )
302
+
303
+ else:
304
+ has_frame, frame_atom_index = Featurizer.get_lig_frame(
305
+ token, centre_atom, lig_res_ref_conf_kdtree, ref_pos, ref_mask
306
+ )
307
+
308
+ token.has_frame = has_frame
309
+ token.frame_atom_index = frame_atom_index
310
+ return token_array_w_frame
311
+
312
+ def get_token_features(self) -> dict[str, torch.Tensor]:
313
+ """
314
+ Ref: AlphaFold3 SI Chapter 2.8
315
+
316
+ Get token features.
317
+ The size of these features is [N_token].
318
+
319
+ Returns:
320
+ Dict[str, torch.Tensor]: A dict of token features.
321
+ """
322
+ token_features = {}
323
+
324
+ centre_atoms_indices = self.cropped_token_array.get_annotation(
325
+ "centre_atom_index"
326
+ )
327
+ centre_atoms = self.cropped_atom_array[centre_atoms_indices]
328
+
329
+ restype = centre_atoms.cano_seq_resname
330
+ restype_onehot = self.restype_onehot_encoded(restype)
331
+
332
+ token_features["token_index"] = torch.arange(0, len(self.cropped_token_array))
333
+ token_features["residue_index"] = torch.Tensor(
334
+ centre_atoms.res_id.astype(int)
335
+ ).long()
336
+ token_features["asym_id"] = torch.Tensor(centre_atoms.asym_id_int).long()
337
+ token_features["entity_id"] = torch.Tensor(centre_atoms.entity_id_int).long()
338
+ token_features["sym_id"] = torch.Tensor(centre_atoms.sym_id_int).long()
339
+ token_features["restype"] = restype_onehot
340
+
341
+ return token_features
342
+
343
+ def get_chain_perm_features(self) -> dict[str, torch.Tensor]:
344
+ """
345
+ The chain permutation use "entity_mol_id", "mol_id" and "mol_atom_index"
346
+ instead of the "entity_id", "asym_id" and "residue_index".
347
+
348
+ The shape of these features is [N_atom].
349
+
350
+ Returns:
351
+ Dict[str, torch.Tensor]: A dict of chain permutation features.
352
+ """
353
+
354
+ chain_perm_features = {}
355
+ chain_perm_features["mol_id"] = torch.Tensor(
356
+ self.cropped_atom_array.mol_id
357
+ ).long()
358
+ chain_perm_features["mol_atom_index"] = torch.Tensor(
359
+ self.cropped_atom_array.mol_atom_index
360
+ ).long()
361
+ chain_perm_features["entity_mol_id"] = torch.Tensor(
362
+ self.cropped_atom_array.entity_mol_id
363
+ ).long()
364
+ return chain_perm_features
365
+
366
+ def get_renamed_atom_names(self) -> np.ndarray:
367
+ """
368
+ Rename the atom names of ligands to avioid information leakage.
369
+
370
+ Returns:
371
+ np.ndarray: A numpy array of renamed atom names.
372
+ """
373
+ res_starts = get_residue_starts(
374
+ self.cropped_atom_array, add_exclusive_stop=True
375
+ )
376
+ new_atom_names = copy.deepcopy(self.cropped_atom_array.atom_name)
377
+ for start, stop in zip(res_starts[:-1], res_starts[1:]):
378
+ res_mol_type = self.cropped_atom_array.mol_type[start]
379
+ if res_mol_type != "ligand":
380
+ continue
381
+
382
+ elem_count = defaultdict(int)
383
+ new_res_atom_names = []
384
+ for elem in self.cropped_atom_array.element[start:stop]:
385
+ elem_count[elem] += 1
386
+ new_res_atom_names.append(f"{elem.upper()}{elem_count[elem]}")
387
+ new_atom_names[start:stop] = new_res_atom_names
388
+ return new_atom_names
389
+
390
+ def get_reference_features(self) -> dict[str, torch.Tensor]:
391
+ """
392
+ Ref: AlphaFold3 SI Chapter 2.8
393
+
394
+ Get reference features.
395
+ The size of these features is [N_atom].
396
+
397
+ Returns:
398
+ Dict[str, torch.Tensor]: a dict of reference features.
399
+ """
400
+ ref_pos = []
401
+ for ref_space_uid in np.unique(self.cropped_atom_array.ref_space_uid):
402
+ res_ref_pos = random_transform(
403
+ self.cropped_atom_array.ref_pos[
404
+ self.cropped_atom_array.ref_space_uid == ref_space_uid,
405
+ ],
406
+ apply_augmentation=self.ref_pos_augment,
407
+ centralize=True,
408
+ )
409
+ ref_pos.append(res_ref_pos)
410
+ ref_pos = np.concatenate(ref_pos)
411
+
412
+ ref_features = {}
413
+ ref_features["ref_pos"] = torch.Tensor(ref_pos)
414
+ ref_features["ref_mask"] = torch.Tensor(self.cropped_atom_array.ref_mask).long()
415
+ ref_features["ref_element"] = Featurizer.elem_onehot_encoded(
416
+ self.cropped_atom_array.element
417
+ ).long()
418
+ ref_features["ref_charge"] = torch.Tensor(
419
+ self.cropped_atom_array.ref_charge
420
+ ).long()
421
+
422
+ if self.lig_atom_rename:
423
+ atom_names = self.get_renamed_atom_names()
424
+ else:
425
+ atom_names = self.cropped_atom_array.atom_name
426
+
427
+ ref_features["ref_atom_name_chars"] = Featurizer.ref_atom_name_chars_encoded(
428
+ atom_names
429
+ ).long()
430
+ ref_features["ref_space_uid"] = torch.Tensor(
431
+ self.cropped_atom_array.ref_space_uid
432
+ ).long()
433
+
434
+ token_array_with_frame = self.get_token_frame(
435
+ token_array=self.cropped_token_array,
436
+ atom_array=self.cropped_atom_array,
437
+ ref_pos=ref_features["ref_pos"],
438
+ ref_mask=ref_features["ref_mask"],
439
+ )
440
+ ref_features["has_frame"] = torch.Tensor(
441
+ token_array_with_frame.get_annotation("has_frame")
442
+ ).long() # [N_token]
443
+ ref_features["frame_atom_index"] = torch.Tensor(
444
+ token_array_with_frame.get_annotation("frame_atom_index")
445
+ ).long() # [N_token, 3]
446
+ return ref_features
447
+
448
+ def get_bond_features(self) -> dict[str, torch.Tensor]:
449
+ """
450
+ Ref: AlphaFold3 SI Chapter 2.8
451
+ A 2D matrix indicating if there is a bond between any atom in token i and token j,
452
+ restricted to just polymer-ligand and ligand-ligand bonds and bonds less than 2.4 Å during training.
453
+ The size of bond feature is [N_token, N_token].
454
+ Returns:
455
+ Dict[str, torch.Tensor]: A dict of bond features.
456
+ """
457
+ bond_array = self.cropped_atom_array.bonds.as_array()
458
+ bond_atom_i = bond_array[:, 0]
459
+ bond_atom_j = bond_array[:, 1]
460
+ ref_space_uid = self.cropped_atom_array.ref_space_uid
461
+ polymer_mask = np.isin(
462
+ self.cropped_atom_array.mol_type, ["protein", "dna", "rna"]
463
+ )
464
+ std_res_mask = (
465
+ np.isin(self.cropped_atom_array.res_name, list(STD_RESIDUES.keys()))
466
+ & polymer_mask
467
+ )
468
+ unstd_res_mask = ~std_res_mask & polymer_mask
469
+ # the polymer-polymer (std-std, std-unstd, and inter-unstd) bond will not be included in token_bonds.
470
+ std_std_bond_mask = std_res_mask[bond_atom_i] & std_res_mask[bond_atom_j]
471
+ std_unstd_bond_mask = (
472
+ std_res_mask[bond_atom_i] & unstd_res_mask[bond_atom_j]
473
+ ) | (std_res_mask[bond_atom_j] & unstd_res_mask[bond_atom_i])
474
+ inter_unstd_bond_mask = (
475
+ unstd_res_mask[bond_atom_i] & unstd_res_mask[bond_atom_j]
476
+ ) & (ref_space_uid[bond_atom_i] != ref_space_uid[bond_atom_j])
477
+ kept_bonds = bond_array[
478
+ ~(std_std_bond_mask | std_unstd_bond_mask | inter_unstd_bond_mask)
479
+ ]
480
+ # -1 means the atom is not in any token
481
+ atom_idx_to_token_idx = np.zeros(len(self.cropped_atom_array), dtype=int) - 1
482
+ for idx, token in enumerate(self.cropped_token_array.tokens):
483
+ for atom_idx in token.atom_indices:
484
+ atom_idx_to_token_idx[atom_idx] = idx
485
+ assert np.all(atom_idx_to_token_idx >= 0), "Some atoms are not in any token"
486
+ num_tokens = len(self.cropped_token_array)
487
+ token_adj_matrix = np.zeros((num_tokens, num_tokens), dtype=int)
488
+ bond_token_i, bond_atom_j = (
489
+ atom_idx_to_token_idx[kept_bonds[:, 0]],
490
+ atom_idx_to_token_idx[kept_bonds[:, 1]],
491
+ )
492
+ for i, j in zip(bond_token_i, bond_atom_j):
493
+ token_adj_matrix[i, j] = 1
494
+ token_adj_matrix[j, i] = 1
495
+ bond_features = {"token_bonds": torch.Tensor(token_adj_matrix)}
496
+ return bond_features
497
+
498
+ def get_extra_features(self) -> dict[str, torch.Tensor]:
499
+ """
500
+ Get other features not listed in AlphaFold3 SI Chapter 2.8 Table 5.
501
+ The size of these features is [N_atom].
502
+
503
+ Returns:
504
+ Dict[str, torch.Tensor]: a dict of extra features.
505
+ """
506
+ atom_to_token_idx_dict = {}
507
+ for idx, token in enumerate(self.cropped_token_array.tokens):
508
+ for atom_idx in token.atom_indices:
509
+ atom_to_token_idx_dict[atom_idx] = idx
510
+
511
+ # Ensure the order of the atom_to_token_idx is the same as the atom_array
512
+ atom_to_token_idx = [
513
+ atom_to_token_idx_dict[atom_idx]
514
+ for atom_idx in range(len(self.cropped_atom_array))
515
+ ]
516
+
517
+ extra_features = {}
518
+ extra_features["atom_to_token_idx"] = torch.Tensor(atom_to_token_idx).long()
519
+ extra_features["atom_to_tokatom_idx"] = torch.Tensor(
520
+ self.cropped_atom_array.tokatom_idx
521
+ ).long()
522
+
523
+ extra_features["is_protein"] = torch.Tensor(
524
+ self.cropped_atom_array.is_protein
525
+ ).long()
526
+ extra_features["is_ligand"] = torch.Tensor(
527
+ self.cropped_atom_array.is_ligand
528
+ ).long()
529
+ extra_features["is_dna"] = torch.Tensor(self.cropped_atom_array.is_dna).long()
530
+ extra_features["is_rna"] = torch.Tensor(self.cropped_atom_array.is_rna).long()
531
+ if "resolution" in self.cropped_atom_array._annot:
532
+ extra_features["resolution"] = torch.Tensor(
533
+ [self.cropped_atom_array.resolution[0]]
534
+ )
535
+ else:
536
+ extra_features["resolution"] = torch.Tensor([-1])
537
+ return extra_features
538
+
539
+ @staticmethod
540
+ def get_lig_pocket_mask(
541
+ atom_array: AtomArray, lig_label_asym_id: Union[str, list]
542
+ ) -> tuple[torch.Tensor, torch.Tensor]:
543
+ """
544
+ Ref: AlphaFold3 Chapter Methods.Metrics
545
+
546
+ the pocket is defined as all heavy atoms within 10 Å of any heavy atom of the ligand,
547
+ restricted to the primary polymer chain for the ligand or modified residue being scored,
548
+ and further restricted to only backbone atoms for proteins. The primary polymer chain is defined variously:
549
+ for PoseBusters it is the protein chain with the most atoms within 10 Å of the ligand,
550
+ for bonded ligand scores it is the bonded polymer chain and for modified residues it
551
+ is the chain that the residue is contained in (minus that residue).
552
+
553
+ Args:
554
+ atom_array (AtomArray): atoms in the complex.
555
+ lig_label_asym_id (Union[str, List]): The label_asym_id of the ligand of interest.
556
+
557
+ Returns:
558
+ tuple[torch.Tensor, torch.Tensor]: A tuple of ligand pocket mask and pocket mask.
559
+ """
560
+
561
+ if isinstance(lig_label_asym_id, str):
562
+ lig_label_asym_ids = [lig_label_asym_id]
563
+ else:
564
+ lig_label_asym_ids = list(lig_label_asym_id)
565
+
566
+ # Get backbone mask
567
+ prot_backbone = (
568
+ atom_array.is_protein & np.isin(atom_array.atom_name, ["C", "N", "CA"])
569
+ ).astype(bool)
570
+
571
+ kdtree = KDTree(atom_array.coord)
572
+
573
+ ligand_mask_list = []
574
+ pocket_mask_list = []
575
+ for lig_label_asym_id in lig_label_asym_ids:
576
+ assert np.isin(
577
+ lig_label_asym_id, atom_array.label_asym_id
578
+ ), f"{lig_label_asym_id} is not in the label_asym_id of the cropped atom array."
579
+
580
+ ligand_mask = atom_array.label_asym_id == lig_label_asym_id
581
+ lig_pos = atom_array.coord[ligand_mask & atom_array.is_resolved]
582
+
583
+ # Get atoms in 10 Angstrom radius
584
+ near_atom_indices = np.unique(
585
+ np.concatenate(kdtree.query_radius(lig_pos, 10.0))
586
+ )
587
+ near_atoms = [
588
+ (
589
+ True
590
+ if ((i in near_atom_indices) and atom_array.is_resolved[i])
591
+ else False
592
+ )
593
+ for i in range(len(atom_array))
594
+ ]
595
+
596
+ # Get primary chain (protein backone in 10 Angstrom radius)
597
+ primary_chain_candidates = near_atoms & prot_backbone
598
+ primary_chain_candidates_atoms = atom_array[primary_chain_candidates]
599
+
600
+ max_atom = 0
601
+ primary_chain_asym_id_int = None
602
+ for asym_id_int in np.unique(primary_chain_candidates_atoms.asym_id_int):
603
+ n_atoms = np.sum(
604
+ primary_chain_candidates_atoms.asym_id_int == asym_id_int
605
+ )
606
+ if n_atoms > max_atom:
607
+ max_atom = n_atoms
608
+ primary_chain_asym_id_int = asym_id_int
609
+ assert (
610
+ primary_chain_asym_id_int is not None
611
+ ), f"No primary chain found for ligand ({lig_label_asym_id=})."
612
+
613
+ pocket_mask = primary_chain_candidates & (
614
+ atom_array.asym_id_int == primary_chain_asym_id_int
615
+ )
616
+ ligand_mask_list.append(ligand_mask)
617
+ pocket_mask_list.append(pocket_mask)
618
+
619
+ ligand_mask_by_pockets = torch.Tensor(
620
+ np.array(ligand_mask_list).astype(int)
621
+ ).long()
622
+ pocket_mask_by_pockets = torch.Tensor(
623
+ np.array(pocket_mask_list).astype(int)
624
+ ).long()
625
+ return ligand_mask_by_pockets, pocket_mask_by_pockets
626
+
627
+ def get_mask_features(self) -> dict[str, torch.Tensor]:
628
+ """
629
+ Generate mask features for the cropped atom array.
630
+
631
+ Returns:
632
+ Dict[str, torch.Tensor]: A dictionary containing various mask features.
633
+ """
634
+ mask_features = {}
635
+
636
+ mask_features["pae_rep_atom_mask"] = torch.Tensor(
637
+ self.cropped_atom_array.centre_atom_mask
638
+ ).long()
639
+
640
+ mask_features["plddt_m_rep_atom_mask"] = torch.Tensor(
641
+ self.cropped_atom_array.plddt_m_rep_atom_mask
642
+ ).long() # [N_atom]
643
+
644
+ mask_features["distogram_rep_atom_mask"] = torch.Tensor(
645
+ self.cropped_atom_array.distogram_rep_atom_mask
646
+ ).long() # [N_atom]
647
+
648
+ mask_features["modified_res_mask"] = torch.Tensor(
649
+ self.cropped_atom_array.modified_res_mask
650
+ ).long()
651
+
652
+ lig_polymer_bonds = get_ligand_polymer_bond_mask(self.cropped_atom_array)
653
+ num_atoms = len(self.cropped_atom_array)
654
+ bond_mask_mat = np.zeros((num_atoms, num_atoms))
655
+ for i, j, _ in lig_polymer_bonds:
656
+ bond_mask_mat[i, j] = 1
657
+ bond_mask_mat[j, i] = 1
658
+ mask_features["bond_mask"] = torch.Tensor(
659
+ bond_mask_mat
660
+ ).long() # [N_atom, N_atom]
661
+ return mask_features
662
+
663
+ def get_all_input_features(self):
664
+ """
665
+ Get input features from cropped data.
666
+
667
+ Returns:
668
+ Dict[str, torch.Tensor]: a dict of features.
669
+ """
670
+ features = {}
671
+ token_features = self.get_token_features()
672
+ features.update(token_features)
673
+
674
+ bond_features = self.get_bond_features()
675
+ features.update(bond_features)
676
+
677
+ reference_features = self.get_reference_features()
678
+ features.update(reference_features)
679
+
680
+ extra_features = self.get_extra_features()
681
+ features.update(extra_features)
682
+
683
+ chain_perm_features = self.get_chain_perm_features()
684
+ features.update(chain_perm_features)
685
+
686
+ mask_features = self.get_mask_features()
687
+ features.update(mask_features)
688
+ return features
689
+
690
+ def get_labels(self) -> dict[str, torch.Tensor]:
691
+ """
692
+ Get the input labels required for the training phase.
693
+
694
+ Returns:
695
+ Dict[str, torch.Tensor]: a dict of labels.
696
+ """
697
+
698
+ labels = {}
699
+
700
+ labels["coordinate"] = torch.Tensor(
701
+ self.cropped_atom_array.coord
702
+ ) # [N_atom, 3]
703
+
704
+ labels["coordinate_mask"] = torch.Tensor(
705
+ self.cropped_atom_array.is_resolved.astype(int)
706
+ ).long() # [N_atom]
707
+ return labels
708
+
709
+ def get_atom_permutation_list(
710
+ self,
711
+ ) -> list[list[int]]:
712
+ """
713
+ Generate info of permutations.
714
+
715
+ Returns:
716
+ List[List[int]]: a list of atom permutations.
717
+ """
718
+ atom_perm_list = []
719
+ for i in self.cropped_atom_array.res_perm:
720
+ # Decode list[str] -> list[list[int]]
721
+ atom_perm_list.append([int(j) for j in i.split("_")])
722
+
723
+ # Atoms connected to different residue are fixed.
724
+ # Bonds array: [[atom_idx_i, atom_idx_j, bond_type]]
725
+ idx_i = self.cropped_atom_array.bonds._bonds[:, 0]
726
+ idx_j = self.cropped_atom_array.bonds._bonds[:, 1]
727
+ diff_mask = (
728
+ self.cropped_atom_array.ref_space_uid[idx_i]
729
+ != self.cropped_atom_array.ref_space_uid[idx_j]
730
+ )
731
+ inter_residue_bonds = self.cropped_atom_array.bonds._bonds[diff_mask]
732
+ fixed_atom_mask = np.isin(
733
+ np.arange(len(self.cropped_atom_array)),
734
+ np.unique(inter_residue_bonds[:, :2]),
735
+ )
736
+
737
+ # Get fixed atom permutation for each residue.
738
+ fixed_atom_perm_list = []
739
+ res_starts = get_residue_starts(
740
+ self.cropped_atom_array, add_exclusive_stop=True
741
+ )
742
+ for r_start, r_stop in zip(res_starts[:-1], res_starts[1:]):
743
+ atom_res_perm = np.array(
744
+ atom_perm_list[r_start:r_stop]
745
+ ) # [N_res_atoms, N_res_perm]
746
+ res_fixed_atom_mask = fixed_atom_mask[r_start:r_stop]
747
+
748
+ if np.sum(res_fixed_atom_mask) == 0:
749
+ # If all atoms in the residue are not fixed, e.g. ions
750
+ fixed_atom_perm_list.extend(atom_res_perm.tolist())
751
+ continue
752
+
753
+ # Create a [N_res_atoms, N_res_perm] template of indices
754
+ n_res_atoms, n_perm = atom_res_perm.shape
755
+ indices_template = (
756
+ atom_res_perm[:, 0].reshape(n_res_atoms, 1).repeat(n_perm, axis=1)
757
+ )
758
+
759
+ # Identify the column where the positions of the fixed atoms remain unchanged
760
+ fixed_atom_perm = atom_res_perm[
761
+ res_fixed_atom_mask
762
+ ] # [N_fixed_res_atoms, N_res_perm]
763
+ fixed_indices_template = indices_template[
764
+ res_fixed_atom_mask
765
+ ] # [N_fixed_res_atoms, N_res_perm]
766
+ unchanged_columns_mask = np.all(
767
+ fixed_atom_perm == fixed_indices_template, axis=0
768
+ )
769
+
770
+ # Remove the columns related to the position changes of fixed atoms.
771
+ fiedx_atom_res_perm = atom_res_perm[:, unchanged_columns_mask]
772
+ fixed_atom_perm_list.extend(fiedx_atom_res_perm.tolist())
773
+ return fixed_atom_perm_list
774
+
775
+ @staticmethod
776
+ def get_gt_full_complex_features(
777
+ atom_array: AtomArray,
778
+ cropped_atom_array: AtomArray = None,
779
+ get_cropped_asym_only: bool = True,
780
+ ) -> dict[str, torch.Tensor]:
781
+ """Get full ground truth complex features.
782
+ It is used for multi-chain permutation alignment.
783
+
784
+ Args:
785
+ atom_array (AtomArray): all atoms in the complex.
786
+ cropped_atom_array (AtomArray, optional): cropped atoms. Defaults to None.
787
+ get_cropped_asym_only (bool, optional): Defaults to True.
788
+ - If true, a chain is returned only if its asym_id (mol_id) appears in the
789
+ cropped_atom_array. It should be a favored setting for the spatial cropping.
790
+ - If false, a chain is returned if its entity_id (entity_mol_id) appears in
791
+ the cropped_atom_array.
792
+
793
+ Returns:
794
+ Dict[str, torch.Tensor]: a dictionary containing
795
+ coordinate, coordinate_mask, etc.
796
+ """
797
+ gt_features = {}
798
+
799
+ if cropped_atom_array is not None:
800
+ # Get the cropped part of gt entities
801
+ entity_atom_set = set(
802
+ zip(
803
+ cropped_atom_array.entity_mol_id,
804
+ cropped_atom_array.mol_atom_index,
805
+ )
806
+ )
807
+ mask = [
808
+ (entity, atom) in entity_atom_set
809
+ for (entity, atom) in zip(
810
+ atom_array.entity_mol_id, atom_array.mol_atom_index
811
+ )
812
+ ]
813
+
814
+ if get_cropped_asym_only:
815
+ # Restrict to asym chains appeared in cropped_atom_array
816
+ asyms = np.unique(cropped_atom_array.mol_id)
817
+ mask = mask * np.isin(atom_array.mol_id, asyms)
818
+ atom_array = atom_array[mask]
819
+
820
+ gt_features["coordinate"] = torch.Tensor(atom_array.coord)
821
+ gt_features["coordinate_mask"] = torch.Tensor(atom_array.is_resolved).long()
822
+ gt_features["entity_mol_id"] = torch.Tensor(atom_array.entity_mol_id).long()
823
+ gt_features["mol_id"] = torch.Tensor(atom_array.mol_id).long()
824
+ gt_features["mol_atom_index"] = torch.Tensor(atom_array.mol_atom_index).long()
825
+ gt_features["pae_rep_atom_mask"] = torch.Tensor(
826
+ atom_array.centre_atom_mask
827
+ ).long()
828
+ return gt_features, atom_array
protenix/data/filter.py ADDED
@@ -0,0 +1,637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import biotite.structure as struc
16
+ import numpy as np
17
+ from biotite.structure import AtomArray, get_molecule_indices
18
+ from scipy.spatial.distance import cdist
19
+
20
+ from protenix.data.constants import CRYSTALLIZATION_AIDS
21
+
22
+
23
+ class Filter(object):
24
+ """
25
+ Ref: AlphaFold3 SI Chapter 2.5.4
26
+ """
27
+
28
+ @staticmethod
29
+ def remove_hydrogens(atom_array: AtomArray) -> AtomArray:
30
+ """remove hydrogens and deuteriums"""
31
+ return atom_array[~np.isin(atom_array.element, ["H", "D"])]
32
+
33
+ @staticmethod
34
+ def remove_water(atom_array: AtomArray) -> AtomArray:
35
+ """remove water (HOH) and deuterated water (DOD)"""
36
+ return atom_array[~np.isin(atom_array.res_name, ["HOH", "DOD"])]
37
+
38
+ @staticmethod
39
+ def remove_element_X(atom_array: AtomArray) -> AtomArray:
40
+ """
41
+ remove element X
42
+ following residues have element X:
43
+ - UNX: unknown one atom or ion
44
+ - UNL: unknown ligand, some atoms are marked as X
45
+ - ASX: ASP/ASN ambiguous, two ambiguous atoms are marked as X, 6 entries in the PDB
46
+ - GLX: GLU/GLN ambiguous, two ambiguous atoms are marked as X, 5 entries in the PDB
47
+ """
48
+ X_mask = np.zeros(len(atom_array), dtype=bool)
49
+ starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
50
+ for start, stop in zip(starts[:-1], starts[1:]):
51
+ res_name = atom_array.res_name[start]
52
+ if res_name in ["UNX", "UNL"]:
53
+ X_mask[start:stop] = True
54
+ atom_array = atom_array[~X_mask]
55
+
56
+ # map ASX to ASP, as ASP is more symmetric than ASN
57
+ mask = atom_array.res_name == "ASX"
58
+ atom_array.res_name[mask] = "ASP"
59
+ atom_array.atom_name[mask & (atom_array.atom_name == "XD1")] = "OD1"
60
+ atom_array.atom_name[mask & (atom_array.atom_name == "XD2")] = "OD2"
61
+ atom_array.element[mask & (atom_array.element == "X")] = "O"
62
+
63
+ # map GLX to GLU, as GLU is more symmetric than GLN
64
+ mask = atom_array.res_name == "GLX"
65
+ atom_array.res_name[mask] = "GLU"
66
+ atom_array.atom_name[mask & (atom_array.atom_name == "XE1")] = "OE1"
67
+ atom_array.atom_name[mask & (atom_array.atom_name == "XE2")] = "OE2"
68
+ atom_array.element[mask & (atom_array.element == "X")] = "O"
69
+ return atom_array
70
+
71
+ @staticmethod
72
+ def remove_crystallization_aids(
73
+ atom_array: AtomArray, entity_poly_type: dict
74
+ ) -> AtomArray:
75
+ """remove crystallization aids, eg: SO4, GOL, etc.
76
+
77
+ Only remove crystallization aids if the chain is not polymer.
78
+
79
+ Ref: AlphaFold3 SI Chapter 2.5.4
80
+ """
81
+ non_aids_mask = ~np.isin(atom_array.res_name, CRYSTALLIZATION_AIDS)
82
+ poly_mask = np.isin(atom_array.label_entity_id, list(entity_poly_type.keys()))
83
+ return atom_array[poly_mask | non_aids_mask]
84
+
85
+ @staticmethod
86
+ def _get_clashing_chains(
87
+ atom_array: AtomArray, chain_ids: list[str]
88
+ ) -> tuple[np.ndarray, list[int]]:
89
+ """
90
+ Calculate the number of atoms clashing with other chains for each chain
91
+ and return a matrix that records the count of clashing atoms.
92
+
93
+ Note: if two chains are covalent, they are not considered as clashing.
94
+
95
+ Args:
96
+ atom_array (AtomArray): All atoms, including those not resolved.
97
+ chain_ids (list[str]): Unique chain indices of resolved atoms.
98
+
99
+ Returns:
100
+ tuple:
101
+ clash_records (numpy.ndarray): Matrix of clashing atom num.
102
+ (i, j) means the ratio of i's atom clashed with j's atoms.
103
+ Note: (i, j) != (j, i).
104
+ chain_resolved_atom_nums (list[int]): The number of resolved atoms corresponding to each chain ID.
105
+ """
106
+ is_resolved_centre_atom = (
107
+ atom_array.centre_atom_mask == 1
108
+ ) & atom_array.is_resolved
109
+ cell_list = struc.CellList(
110
+ atom_array, cell_size=1.7, selection=is_resolved_centre_atom
111
+ )
112
+
113
+ # (i, j) means the ratio of i's atom clashed with j's atoms
114
+ clash_records = np.zeros((len(chain_ids), len(chain_ids)))
115
+
116
+ # record the number of resolved atoms for each chain
117
+ chain_resolved_atom_nums = []
118
+
119
+ # record covalent relationship between chains
120
+ chains_covalent_dict = {}
121
+ for idx, chain_id_i in enumerate(chain_ids):
122
+ for chain_id_j in chain_ids[idx + 1 :]:
123
+ mol_indices = get_molecule_indices(
124
+ atom_array[np.isin(atom_array.chain_id, [chain_id_i, chain_id_j])]
125
+ )
126
+ if len(mol_indices) == 1:
127
+ covalent = 1
128
+ else:
129
+ covalent = 0
130
+ chains_covalent_dict[(chain_id_i, chain_id_j)] = covalent
131
+ chains_covalent_dict[(chain_id_j, chain_id_i)] = covalent
132
+
133
+ for i, chain_id in enumerate(chain_ids):
134
+ coords = atom_array.coord[
135
+ (atom_array.chain_id == chain_id) & is_resolved_centre_atom
136
+ ]
137
+ chain_resolved_atom_nums.append(len(coords))
138
+ chain_atom_ids = np.where(atom_array.chain_id == chain_id)[0]
139
+ chain_atom_ids_set = set(chain_atom_ids) | {-1}
140
+
141
+ # Get atom indices from the current cell and the eight surrounding cells.
142
+ neighbors_ids_2d = cell_list.get_atoms_in_cells(coords, cell_radius=1)
143
+ neighbors_ids = np.unique(neighbors_ids_2d)
144
+
145
+ # Remove the atom indices of the current chain.
146
+ other_chain_atom_ids = list(set(neighbors_ids) - chain_atom_ids_set)
147
+
148
+ if not other_chain_atom_ids:
149
+ continue
150
+ else:
151
+ # Calculate the distance matrix with neighboring atoms.
152
+ other_chain_atom_coords = atom_array.coord[other_chain_atom_ids]
153
+ dist_mat = cdist(coords, other_chain_atom_coords, metric="euclidean")
154
+ clash_mat = dist_mat < 1.6 # change 1.7 to 1.6 for more compatibility
155
+ if np.any(clash_mat):
156
+ clashed_other_chain_ids = atom_array.chain_id[other_chain_atom_ids]
157
+
158
+ for other_chain_id in set(clashed_other_chain_ids):
159
+
160
+ # two chains covalent with each other
161
+ if chains_covalent_dict[(chain_id, other_chain_id)]:
162
+ continue
163
+
164
+ cols = np.where(clashed_other_chain_ids == other_chain_id)[0]
165
+
166
+ # how many i's atoms clashed with j
167
+ any_atom_clashed = np.any(
168
+ clash_mat[:, cols].astype(int), axis=1
169
+ )
170
+ clashed_atom_num = np.sum(any_atom_clashed.astype(int))
171
+
172
+ if clashed_atom_num > 0:
173
+ j = chain_ids.index(other_chain_id)
174
+ clash_records[i][j] += clashed_atom_num
175
+ return clash_records, chain_resolved_atom_nums
176
+
177
+ @staticmethod
178
+ def _get_removed_clash_chain_ids(
179
+ clash_records: np.ndarray,
180
+ chain_ids: list[str],
181
+ chain_resolved_atom_nums: list[int],
182
+ core_chain_id: np.ndarray = [],
183
+ ) -> list[str]:
184
+ """
185
+ Perform pairwise comparisons on the chains, and select the chain IDs
186
+ to be deleted according to the clahsing chain rules.
187
+
188
+ Args:
189
+ clash_records (numpy.ndarray): Matrix of clashing atom num.
190
+ (i, j) means the ratio of i's atom clashed with j's atoms.
191
+ Note: (i, j) != (j, i).
192
+ chain_ids (list[str]): Unique chain indices of resolved atoms.
193
+ chain_resolved_atom_nums (list[int]): The number of resolved atoms corresponding to each chain ID.
194
+ core_chain_id (np.ndarray): The chain ID of the core chain.
195
+
196
+ Returns:
197
+ list[str]: A list of chain IDs that have been determined for deletion.
198
+ """
199
+ removed_chain_ids = []
200
+ for i in range(len(chain_ids)):
201
+ atom_num_i = chain_resolved_atom_nums[i]
202
+ chain_idx_i = chain_ids[i]
203
+
204
+ if chain_idx_i in removed_chain_ids:
205
+ continue
206
+
207
+ for j in range(i + 1, len(chain_ids)):
208
+ atom_num_j = chain_resolved_atom_nums[j]
209
+ chain_idx_j = chain_ids[j]
210
+
211
+ if chain_idx_j in removed_chain_ids:
212
+ continue
213
+
214
+ clash_num_ij, clash_num_ji = (
215
+ clash_records[i][j],
216
+ clash_records[j][i],
217
+ )
218
+
219
+ clash_ratio_ij = clash_num_ij / atom_num_i
220
+ clash_ratio_ji = clash_num_ji / atom_num_j
221
+
222
+ if clash_ratio_ij <= 0.3 and clash_ratio_ji <= 0.3:
223
+ # not reaches the threshold
224
+ continue
225
+ else:
226
+ # clashing chains
227
+ if (
228
+ chain_idx_i in core_chain_id
229
+ and chain_idx_j not in core_chain_id
230
+ ):
231
+ removed_chain_idx = chain_idx_j
232
+ elif (
233
+ chain_idx_i not in core_chain_id
234
+ and chain_idx_j in core_chain_id
235
+ ):
236
+ removed_chain_idx = chain_idx_i
237
+
238
+ elif clash_ratio_ij > clash_ratio_ji:
239
+ removed_chain_idx = chain_idx_i
240
+ elif clash_ratio_ij < clash_ratio_ji:
241
+ removed_chain_idx = chain_idx_j
242
+ else:
243
+ if atom_num_i < atom_num_j:
244
+ removed_chain_idx = chain_idx_i
245
+ elif atom_num_i > atom_num_j:
246
+ removed_chain_idx = chain_idx_j
247
+ else:
248
+ removed_chain_idx = sorted([chain_idx_i, chain_idx_j])[1]
249
+
250
+ removed_chain_ids.append(removed_chain_idx)
251
+
252
+ if removed_chain_idx == chain_idx_i:
253
+ # chain i already removed
254
+ break
255
+ return removed_chain_ids
256
+
257
+ @staticmethod
258
+ def remove_polymer_chains_all_residues_unknown(
259
+ atom_array: AtomArray,
260
+ entity_poly_type: dict,
261
+ ) -> AtomArray:
262
+ """remove chains with all residues unknown"""
263
+ chain_starts = struc.get_chain_starts(atom_array, add_exclusive_stop=True)
264
+ invalid_chains = [] # list of [start, end)
265
+ for index in range(len(chain_starts) - 1):
266
+ start, end = chain_starts[index], chain_starts[index + 1]
267
+ entity_id = atom_array[start].label_entity_id
268
+ if (
269
+ entity_poly_type.get(entity_id, "non-poly") == "polypeptide(L)"
270
+ and np.all(atom_array.res_name[start:end] == "UNK")
271
+ ) or (
272
+ entity_poly_type.get(entity_id, "non-poly")
273
+ in (
274
+ "polyribonucleotide",
275
+ "polydeoxyribonucleotide",
276
+ )
277
+ and np.all(atom_array.res_name[start:end] == "N")
278
+ ):
279
+ invalid_chains.append((start, end))
280
+ mask = np.ones(len(atom_array), dtype=bool)
281
+ for start, end in invalid_chains:
282
+ mask[start:end] = False
283
+ atom_array = atom_array[mask]
284
+ return atom_array
285
+
286
+ @staticmethod
287
+ def remove_polymer_chains_too_short(
288
+ atom_array: AtomArray, entity_poly_type: dict
289
+ ) -> AtomArray:
290
+ chain_starts = struc.get_chain_starts(atom_array, add_exclusive_stop=True)
291
+ invalid_chains = [] # list of [start, end)
292
+ for index in range(len(chain_starts) - 1):
293
+ start, end = chain_starts[index], chain_starts[index + 1]
294
+ entity_id = atom_array[start].label_entity_id
295
+ num_residue_ids = len(set(atom_array.label_seq_id[start:end]))
296
+ if (
297
+ entity_poly_type.get(entity_id, "non-poly")
298
+ in (
299
+ "polypeptide(L)", # TODO: how to handle polypeptide(D)?
300
+ "polyribonucleotide",
301
+ "polydeoxyribonucleotide",
302
+ )
303
+ and num_residue_ids < 4
304
+ ):
305
+ invalid_chains.append((start, end))
306
+ mask = np.ones(len(atom_array), dtype=bool)
307
+ for start, end in invalid_chains:
308
+ mask[start:end] = False
309
+ atom_array = atom_array[mask]
310
+ return atom_array
311
+
312
+ @staticmethod
313
+ def remove_polymer_chains_with_consecutive_c_alpha_too_far_away(
314
+ atom_array: AtomArray, entity_poly_type: dict, max_distance: float = 10.0
315
+ ) -> AtomArray:
316
+ chain_starts = struc.get_chain_starts(atom_array, add_exclusive_stop=True)
317
+ invalid_chains = [] # list of [start, end)
318
+ for index in range(len(chain_starts) - 1):
319
+ start, end = chain_starts[index], chain_starts[index + 1]
320
+ entity_id = atom_array.label_entity_id[start]
321
+ if entity_poly_type.get(entity_id, "non-poly") == "polypeptide(L)":
322
+ peptide_atoms = atom_array[start:end]
323
+ ca_atoms = peptide_atoms[peptide_atoms.atom_name == "CA"]
324
+ seq_ids = ca_atoms.label_seq_id
325
+ seq_ids[seq_ids == "."] = "-100"
326
+ seq_ids = seq_ids.astype(np.int64)
327
+ dist_square = np.sum(
328
+ (ca_atoms[:-1].coord - ca_atoms[1:].coord) ** 2, axis=-1
329
+ )
330
+ invalid_neighbor_mask = (dist_square > max_distance**2) & (
331
+ seq_ids[:-1] + 1 == seq_ids[1:]
332
+ )
333
+ if np.any(invalid_neighbor_mask):
334
+ invalid_chains.append((start, end))
335
+ mask = np.ones(len(atom_array), dtype=bool)
336
+ for start, end in invalid_chains:
337
+ mask[start:end] = False
338
+ atom_array = atom_array[mask]
339
+ return atom_array
340
+
341
+ @staticmethod
342
+ def too_many_chains_filter(
343
+ atom_array: AtomArray,
344
+ interface_radius: int = 15,
345
+ max_chains_num: int = 20,
346
+ core_indices: list[int] = None,
347
+ max_tokens_num: int = None,
348
+ ) -> tuple[AtomArray, int]:
349
+ """
350
+ Ref: AlphaFold3 SI Chapter 2.5.4
351
+
352
+ For bioassemblies with greater than 20 chains, we select a random interface token
353
+ (with a centre atom <15 Å to the centre atom of a token in another chain)
354
+ and select the closest 20 chains to this token based on
355
+ minimum distance between any tokens centre atom.
356
+
357
+ Note: due to the presence of covalent small molecules,
358
+ treat the covalent small molecule and the polymer it is attached to
359
+ as a single chain to avoid inadvertently removing the covalent small molecules.
360
+ Use the mol_id added to the AtomArray to differentiate between the various
361
+ parts of the structure composed of covalent bonds.
362
+
363
+ Args:
364
+ atom_array (AtomArray): Biotite AtomArray Object of a Bioassembly.
365
+ interface_radius (int, optional): Atoms within this distance of the central atom are considered interface atoms.
366
+ Defaults to 15.
367
+ max_chains_num (int, optional): The maximum number of chains permitted in a bioassembly.
368
+ Filtration will be applied if exceeds this value. Defaults to 20.
369
+ core_indices (list[int], optional): A list of indices to be used as chose the central atom.
370
+ And corresponding chains in the list will be selected proriority.
371
+ If None, a random index from whole AtomArray will be selected. Defaults to None.
372
+ max_tokens_num (int, optional): The maximum number of tokens permitted in a bioassembly.
373
+ If not None, after more than max_chains_num, if the max_tokens_num is not reached,
374
+ it will continue to append the chains.
375
+
376
+ Returns:
377
+ tuple:
378
+ - atom_array (AtomArray): An AtomArray that has been processed through this filter.
379
+ - input_chains_num (int): The number of chain in the input AtomArray.
380
+ This is to log whether the filter has been utilized.
381
+ """
382
+ # each mol is a so called "chain" in the context of this filter.
383
+ input_chains_num = len(np.unique(atom_array.mol_id))
384
+ if input_chains_num <= max_chains_num:
385
+ # no change
386
+ return atom_array, input_chains_num
387
+
388
+ is_resolved_centre_atom = (
389
+ atom_array.centre_atom_mask == 1
390
+ ) & atom_array.is_resolved
391
+
392
+ cell_list = struc.CellList(
393
+ atom_array, cell_size=interface_radius, selection=is_resolved_centre_atom
394
+ )
395
+ resolved_centre_atom = atom_array[is_resolved_centre_atom]
396
+
397
+ assert resolved_centre_atom, "There is no resolved central atom."
398
+
399
+ # random pick centre atom
400
+ if core_indices is None:
401
+ index_shuf = np.random.default_rng(seed=42).permutation(
402
+ len(resolved_centre_atom)
403
+ )
404
+ else:
405
+ index_shuf = np.array(core_indices)
406
+ resolved_centre_atom_indices = np.nonzero(is_resolved_centre_atom)[0]
407
+
408
+ # get indices of resolved_centre_atom
409
+ index_shuf = np.array(
410
+ [
411
+ np.where(resolved_centre_atom_indices == idx)[0][0]
412
+ for idx in index_shuf
413
+ if idx in resolved_centre_atom_indices
414
+ ]
415
+ )
416
+ np.random.default_rng(seed=42).shuffle(index_shuf)
417
+
418
+ chosen_centre_atom = None
419
+ for idx in index_shuf:
420
+ centre_atom = resolved_centre_atom[idx]
421
+ neighbors_indices = cell_list.get_atoms(
422
+ centre_atom.coord, radius=interface_radius
423
+ )
424
+ neighbors_indices = neighbors_indices[neighbors_indices != -1]
425
+
426
+ neighbors_chain_ids = np.unique(atom_array.mol_id[neighbors_indices])
427
+ # neighbors include centre atom itself
428
+ if len(neighbors_chain_ids) > 1:
429
+ chosen_centre_atom = centre_atom
430
+ break
431
+
432
+ # The distance between the central atoms in any two chains is greater than 15 angstroms.
433
+ if chosen_centre_atom is None:
434
+ return None, input_chains_num
435
+
436
+ dist_mat = cdist(centre_atom.coord.reshape((1, -1)), resolved_centre_atom.coord)
437
+ sorted_chain_id = np.array(
438
+ [
439
+ chain_id
440
+ for chain_id, _dist in sorted(
441
+ zip(resolved_centre_atom.mol_id, dist_mat[0]),
442
+ key=lambda pair: pair[1],
443
+ )
444
+ ]
445
+ )
446
+
447
+ if core_indices is not None:
448
+ # select core proriority
449
+ core_mol_id = np.unique(atom_array.mol_id[core_indices])
450
+ in_core_mask = np.isin(sorted_chain_id, core_mol_id)
451
+ sorted_chain_id = np.concatenate(
452
+ (sorted_chain_id[in_core_mask], sorted_chain_id[~in_core_mask])
453
+ )
454
+
455
+ closest_chain_id = set()
456
+ chain_ids_to_token_num = {}
457
+ if max_tokens_num is None:
458
+ max_tokens_num = 0
459
+
460
+ tokens = 0
461
+ for chain_id in sorted_chain_id:
462
+ # get token num
463
+ if chain_id not in chain_ids_to_token_num:
464
+ chain_ids_to_token_num[chain_id] = atom_array.centre_atom_mask[
465
+ atom_array.mol_id == chain_id
466
+ ].sum()
467
+ chain_token_num = chain_ids_to_token_num[chain_id]
468
+
469
+ if len(closest_chain_id) >= max_chains_num:
470
+ if tokens + chain_token_num > max_tokens_num:
471
+ break
472
+
473
+ closest_chain_id.add(chain_id)
474
+ tokens += chain_token_num
475
+
476
+ atom_array = atom_array[np.isin(atom_array.mol_id, list(closest_chain_id))]
477
+ output_chains_num = len(np.unique(atom_array.mol_id))
478
+ assert (
479
+ output_chains_num == max_chains_num
480
+ or atom_array.centre_atom_mask.sum() <= max_tokens_num
481
+ )
482
+ return atom_array, input_chains_num
483
+
484
+ @staticmethod
485
+ def remove_clashing_chains(
486
+ atom_array: AtomArray,
487
+ core_indices: list[int] = None,
488
+ ) -> AtomArray:
489
+ """
490
+ Ref: AlphaFold3 SI Chapter 2.5.4
491
+
492
+ Clashing chains are removed.
493
+ Clashing chains are defined as those with >30% of atoms within 1.7 Å of an atom in another chain.
494
+ If two chains are clashing with each other, the chain with the greater percentage of clashing atoms will be removed.
495
+ If the same fraction of atoms are clashing, the chain with fewer total atoms is removed.
496
+ If the chains have the same number of atoms, then the chain with the larger chain id is removed.
497
+
498
+ Note: if two chains are covalent, they are not considered as clashing.
499
+
500
+ Args:
501
+ atom_array (AtomArray): Biotite AtomArray Object of a Bioassembly.
502
+ core_indices (list[int]): A list of indices for core structures,
503
+ where these indices correspond to structures that will be preferentially
504
+ retained when pairwise clash chain assessments are performed.
505
+
506
+ Returns:
507
+ atom_array (AtomArray): An AtomArray that has been processed through this filter.
508
+ removed_chain_ids (list[str]): A list of chain IDs that have been determined for deletion.
509
+ This is to log whether the filter has been utilized.
510
+ """
511
+ chain_ids = np.unique(atom_array.chain_id[atom_array.is_resolved]).tolist()
512
+
513
+ if core_indices is not None:
514
+ core_chain_id = np.unique(atom_array.chain_id[core_indices])
515
+ else:
516
+ core_chain_id = np.array([])
517
+
518
+ clash_records, chain_resolved_atom_nums = Filter._get_clashing_chains(
519
+ atom_array, chain_ids
520
+ )
521
+ removed_chain_ids = Filter._get_removed_clash_chain_ids(
522
+ clash_records,
523
+ chain_ids,
524
+ chain_resolved_atom_nums,
525
+ core_chain_id=core_chain_id,
526
+ )
527
+
528
+ atom_array = atom_array[~np.isin(atom_array.chain_id, removed_chain_ids)]
529
+ return atom_array, removed_chain_ids
530
+
531
+ @staticmethod
532
+ def remove_unresolved_mols(atom_array: AtomArray) -> AtomArray:
533
+ """
534
+ Remove molecules from a bioassembly object which all atoms are not resolved.
535
+
536
+ Args:
537
+ atom_array (AtomArray): Biotite AtomArray Object of a bioassembly.
538
+
539
+ Returns:
540
+ AtomArray: An AtomArray object with unresolved molecules removed.
541
+ """
542
+ valid_mol_id = []
543
+ for mol_id in np.unique(atom_array.mol_id):
544
+ resolved = atom_array.is_resolved[atom_array.mol_id == mol_id]
545
+ if np.any(resolved):
546
+ valid_mol_id.append(mol_id)
547
+
548
+ atom_array = atom_array[np.isin(atom_array.mol_id, valid_mol_id)]
549
+ return atom_array
550
+
551
+ @staticmethod
552
+ def remove_asymmetric_polymer_ligand_bonds(
553
+ atom_array: AtomArray, entity_poly_type: dict
554
+ ) -> AtomArray:
555
+ """remove asymmetric polymer ligand bonds (including protein-protein bond, like disulfide bond).
556
+
557
+ AF3 SI 5.1 Structure filters
558
+ Bonds for structures with homomeric subcomplexes lacking the corresponding homomeric symmetry are also removed
559
+ - e.g. if a certain bonded ligand only exists for some of the symmetric copies, but not for all,
560
+ we remove the corresponding bond information from the input.
561
+ In consequence the model has to learn to infer these bonds by itself.
562
+
563
+ Args:
564
+ atom_array (AtomArray): input atom array
565
+
566
+ Returns:
567
+ AtomArray: output atom array with asymmetric polymer ligand bonds removed.
568
+ """
569
+ # get inter chain bonds
570
+ inter_chain_bonds = set()
571
+ for i, j, b in atom_array.bonds.as_array():
572
+ if atom_array.chain_id[i] != atom_array.chain_id[j]:
573
+ inter_chain_bonds.add((i, j))
574
+
575
+ # get asymmetric polymer ligand bonds
576
+ asymmetric_bonds = set()
577
+ chain_starts = struc.get_chain_starts(atom_array, add_exclusive_stop=False)
578
+ for bond in inter_chain_bonds:
579
+
580
+ if bond in asymmetric_bonds:
581
+ continue
582
+
583
+ i, j = bond
584
+ atom_i = atom_array[i]
585
+ atom_j = atom_array[j]
586
+ i_is_polymer = atom_i.label_entity_id in entity_poly_type
587
+ j_is_polymer = atom_j.label_entity_id in entity_poly_type
588
+ if i_is_polymer:
589
+ pass
590
+ elif j_is_polymer:
591
+ i, j = j, i
592
+ atom_i, atom_j = atom_j, atom_i
593
+ i_is_polymer, j_is_polymer = j_is_polymer, i_is_polymer
594
+ else:
595
+ # both entity is not polymer
596
+ continue
597
+
598
+ # get atom i mask from all entity i copies
599
+ entity_mask_i = atom_array.label_entity_id == atom_i.label_entity_id
600
+ num_copies = np.isin(chain_starts, np.flatnonzero(entity_mask_i)).sum()
601
+ mask_i = (
602
+ entity_mask_i
603
+ & (atom_array.res_id == atom_i.res_id)
604
+ & (atom_array.atom_name == atom_i.atom_name)
605
+ )
606
+ indices_i = np.flatnonzero(mask_i)
607
+
608
+ if len(indices_i) != num_copies:
609
+ # not every copy of entity i has atom i.
610
+ asymmetric_bonds.add(bond)
611
+ continue
612
+
613
+ # check all atom i in entity i bond to an atom j in entity j.
614
+ target_bonds = []
615
+ for ii in indices_i:
616
+ ii_bonds = [b for b in inter_chain_bonds if ii in b]
617
+ for bond in ii_bonds:
618
+ jj = bond[1] if ii == bond[0] else bond[0]
619
+ atom_jj = atom_array[jj]
620
+ if atom_jj.label_entity_id != atom_j.label_entity_id:
621
+ continue
622
+ if atom_jj.res_name != atom_j.res_name:
623
+ continue
624
+ if atom_jj.atom_name != atom_j.atom_name:
625
+ continue
626
+ if j_is_polymer and atom_jj.res_id != atom_j.res_id:
627
+ # only for polymer, check res_id
628
+ continue
629
+ # found bond (ii, jj) with same enity_id, res_name, atom_name to bond (i,j)
630
+ target_bonds.append((min(ii, jj), max(ii, jj)))
631
+ break
632
+ if len(target_bonds) != num_copies:
633
+ asymmetric_bonds |= set(target_bonds)
634
+
635
+ for bond in asymmetric_bonds:
636
+ atom_array.bonds.remove_bond(bond[0], bond[1])
637
+ return atom_array
protenix/data/infer_data_pipeline.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import logging
17
+ import os
18
+ import time
19
+ import traceback
20
+ import warnings
21
+ from typing import Any, Mapping
22
+
23
+ import torch
24
+ from biotite.structure import AtomArray
25
+ from torch.utils.data import DataLoader, Dataset, DistributedSampler
26
+
27
+ from protenix.data.data_pipeline import DataPipeline
28
+ from protenix.data.esm_featurizer import ESMFeaturizer
29
+ from protenix.data.json_to_feature import SampleDictToFeatures
30
+ from protenix.data.msa_featurizer import InferenceMSAFeaturizer
31
+ from protenix.data.utils import data_type_transform, make_dummy_feature
32
+ from protenix.utils.distributed import DIST_WRAPPER
33
+ from protenix.utils.torch_utils import collate_fn_identity, dict_to_tensor
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+ warnings.filterwarnings("ignore", module="biotite")
38
+
39
+
40
+ def get_inference_dataloader(configs: Any) -> DataLoader:
41
+ """
42
+ Creates and returns a DataLoader for inference using the InferenceDataset.
43
+
44
+ Args:
45
+ configs: A configuration object containing the necessary parameters for the DataLoader.
46
+
47
+ Returns:
48
+ A DataLoader object configured for inference.
49
+ """
50
+ inference_dataset = InferenceDataset(
51
+ input_json_path=configs.input_json_path,
52
+ configs=configs,
53
+ dump_dir=configs.dump_dir,
54
+ use_msa=configs.use_msa,
55
+ )
56
+ sampler = DistributedSampler(
57
+ dataset=inference_dataset,
58
+ num_replicas=DIST_WRAPPER.world_size,
59
+ rank=DIST_WRAPPER.rank,
60
+ shuffle=False,
61
+ )
62
+ dataloader = DataLoader(
63
+ dataset=inference_dataset,
64
+ batch_size=1,
65
+ sampler=sampler,
66
+ collate_fn=collate_fn_identity,
67
+ # num_workers=configs.num_workers,
68
+ num_workers=0
69
+ )
70
+ return dataloader
71
+
72
+
73
+ class InferenceDataset(Dataset):
74
+ def __init__(
75
+ self,
76
+ input_json_path: str,
77
+ dump_dir: str,
78
+ use_msa: bool = True,
79
+ configs=None,
80
+ ) -> None:
81
+
82
+ self.input_json_path = input_json_path
83
+ self.dump_dir = dump_dir
84
+ self.use_msa = use_msa
85
+ with open(self.input_json_path, "r") as f:
86
+ self.inputs = json.load(f)
87
+ json_task_name = os.path.basename(self.input_json_path).split(".")[0]
88
+ esm_info = configs.get("esm", {})
89
+ configs.esm.embedding_dir = f"./esm_embeddings/{configs.esm.model_name}"
90
+ configs.esm.sequence_fpath = (
91
+ f"./esm_embeddings/{json_task_name}_prot_sequences.csv"
92
+ )
93
+ self.esm_enable = esm_info.get("enable", False)
94
+ if self.esm_enable:
95
+ os.makedirs(configs.esm.embedding_dir, exist_ok=True)
96
+ os.makedirs(os.path.dirname(configs.esm.sequence_fpath), exist_ok=True)
97
+ ESMFeaturizer.precompute_esm_embedding(
98
+ self.inputs,
99
+ configs.esm.model_name,
100
+ configs.esm.embedding_dir,
101
+ configs.esm.sequence_fpath,
102
+ configs.load_checkpoint_dir,
103
+ )
104
+ self.esm_featurizer = ESMFeaturizer(
105
+ embedding_dir=esm_info.embedding_dir,
106
+ sequence_fpath=esm_info.sequence_fpath,
107
+ embedding_dim=esm_info.embedding_dim,
108
+ error_dir="./esm_embeddings/",
109
+ )
110
+
111
+ def process_one(
112
+ self,
113
+ single_sample_dict: Mapping[str, Any],
114
+ ) -> tuple[dict[str, torch.Tensor], AtomArray, dict[str, float]]:
115
+ """
116
+ Processes a single sample from the input JSON to generate features and statistics.
117
+
118
+ Args:
119
+ single_sample_dict: A dictionary containing the sample data.
120
+
121
+ Returns:
122
+ A tuple containing:
123
+ - A dictionary of features.
124
+ - An AtomArray object.
125
+ - A dictionary of time tracking statistics.
126
+ """
127
+ # general features
128
+ t0 = time.time()
129
+ sample2feat = SampleDictToFeatures(
130
+ single_sample_dict,
131
+ )
132
+ features_dict, atom_array, token_array = sample2feat.get_feature_dict()
133
+ features_dict["distogram_rep_atom_mask"] = torch.Tensor(
134
+ atom_array.distogram_rep_atom_mask
135
+ ).long()
136
+ entity_poly_type = sample2feat.entity_poly_type
137
+ t1 = time.time()
138
+
139
+ # Msa features
140
+ entity_to_asym_id = DataPipeline.get_label_entity_id_to_asym_id_int(atom_array)
141
+ msa_features = (
142
+ InferenceMSAFeaturizer.make_msa_feature(
143
+ bioassembly=single_sample_dict["sequences"],
144
+ entity_to_asym_id=entity_to_asym_id,
145
+ token_array=token_array,
146
+ atom_array=atom_array,
147
+ )
148
+ if self.use_msa
149
+ else {}
150
+ )
151
+
152
+ # Esm features
153
+ if self.esm_enable:
154
+ x_esm = self.esm_featurizer(
155
+ token_array=token_array,
156
+ atom_array=atom_array,
157
+ bioassembly_dict=single_sample_dict,
158
+ inference_mode=True,
159
+ )
160
+ features_dict["esm_token_embedding"] = x_esm
161
+
162
+ # Make dummy features for not implemented features
163
+ dummy_feats = ["template"]
164
+ if len(msa_features) == 0:
165
+ dummy_feats.append("msa")
166
+ else:
167
+ msa_features = dict_to_tensor(msa_features)
168
+ features_dict.update(msa_features)
169
+ features_dict = make_dummy_feature(
170
+ features_dict=features_dict,
171
+ dummy_feats=dummy_feats,
172
+ )
173
+
174
+ # Transform to right data type
175
+ feat = data_type_transform(feat_or_label_dict=features_dict)
176
+
177
+ t2 = time.time()
178
+
179
+ data = {}
180
+ data["input_feature_dict"] = feat
181
+
182
+ # Add dimension related items
183
+ N_token = feat["token_index"].shape[0]
184
+ N_atom = feat["atom_to_token_idx"].shape[0]
185
+ N_msa = feat["msa"].shape[0]
186
+
187
+ stats = {}
188
+ for mol_type in ["ligand", "protein", "dna", "rna"]:
189
+ mol_type_mask = feat[f"is_{mol_type}"].bool()
190
+ stats[f"{mol_type}/atom"] = int(mol_type_mask.sum(dim=-1).item())
191
+ stats[f"{mol_type}/token"] = len(
192
+ torch.unique(feat["atom_to_token_idx"][mol_type_mask])
193
+ )
194
+
195
+ N_asym = len(torch.unique(data["input_feature_dict"]["asym_id"]))
196
+ data.update(
197
+ {
198
+ "N_asym": torch.tensor([N_asym]),
199
+ "N_token": torch.tensor([N_token]),
200
+ "N_atom": torch.tensor([N_atom]),
201
+ "N_msa": torch.tensor([N_msa]),
202
+ }
203
+ )
204
+
205
+ def formatted_key(key):
206
+ type_, unit = key.split("/")
207
+ if type_ == "protein":
208
+ type_ = "prot"
209
+ elif type_ == "ligand":
210
+ type_ = "lig"
211
+ else:
212
+ pass
213
+ return f"N_{type_}_{unit}"
214
+
215
+ data.update(
216
+ {
217
+ formatted_key(k): torch.tensor([stats[k]])
218
+ for k in [
219
+ "protein/atom",
220
+ "ligand/atom",
221
+ "dna/atom",
222
+ "rna/atom",
223
+ "protein/token",
224
+ "ligand/token",
225
+ "dna/token",
226
+ "rna/token",
227
+ ]
228
+ }
229
+ )
230
+ data.update({"entity_poly_type": entity_poly_type})
231
+ t3 = time.time()
232
+ time_tracker = {
233
+ "crop": t1 - t0,
234
+ "featurizer": t2 - t1,
235
+ "added_feature": t3 - t2,
236
+ }
237
+
238
+ return data, atom_array, token_array
239
+
240
+ def __len__(self) -> int:
241
+ return len(self.inputs)
242
+
243
+ def __getitem__(self, index: int) -> tuple[dict[str, torch.Tensor], AtomArray, str]:
244
+ try:
245
+ single_sample_dict = self.inputs[index]
246
+ sample_name = single_sample_dict["name"]
247
+ logger.info(f"Featurizing {sample_name}...")
248
+
249
+ data, atom_array, token_array = self.process_one(
250
+ single_sample_dict=single_sample_dict
251
+ )
252
+ error_message = ""
253
+
254
+ coords = torch.load(f"/home/hui007/Protenix/coord/{sample_name}.pt")
255
+ assert len(coords) == len(token_array)
256
+
257
+ coord_label = {'coordinate': [], 'coordinate_mask': []}
258
+ tmp_coord = []
259
+ tmp_mask = []
260
+ for coord_entry, token in zip(coords, token_array):
261
+ atom_list = coord_entry[1]['atom_list']
262
+ coord_list = coord_entry[1]['coord_list']
263
+ token_atom_names = token.atom_names
264
+
265
+ atom_to_coord = {atom: coord_list[i] for i, atom in enumerate(atom_list)}
266
+
267
+ for atom_name in token_atom_names:
268
+ if atom_name in atom_to_coord:
269
+ tmp_coord.append(atom_to_coord[atom_name].tolist())
270
+ tmp_mask.append(1)
271
+ else:
272
+ tmp_coord.append([0.0, 0.0, 0.0])
273
+ tmp_mask.append(0)
274
+
275
+ coord_label['coordinate'] = torch.tensor(tmp_coord, dtype=torch.float32)
276
+ coord_label['coordinate_mask'] = torch.tensor(tmp_mask)
277
+ coord_label['sample_name'] = sample_name
278
+
279
+ except Exception as e:
280
+ data, atom_array = {}, None
281
+ error_message = f"{e}:\n{traceback.format_exc()}"
282
+ data["sample_name"] = single_sample_dict["name"]
283
+ data["sample_index"] = index
284
+ return data, atom_array, coord_label, error_message
protenix/data/json_maker.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 ByteDance and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import argparse
17
+ import copy
18
+ import json
19
+ import os
20
+ from collections import defaultdict
21
+
22
+ import numpy as np
23
+ from biotite.structure import AtomArray, get_chain_starts, get_residue_starts
24
+
25
+ from protenix.data.constants import STD_RESIDUES
26
+ from protenix.data.filter import Filter
27
+ from protenix.data.parser import AddAtomArrayAnnot, MMCIFParser
28
+ from protenix.data.utils import get_lig_lig_bonds, get_ligand_polymer_bond_mask
29
+
30
+
31
+ def merge_covalent_bonds(
32
+ covalent_bonds: list[dict], all_entity_counts: dict[str, int]
33
+ ) -> list[dict]:
34
+ """
35
+ Merge covalent bonds with same entity and position.
36
+
37
+ Args:
38
+ covalent_bonds (list[dict]): A list of covalent bond dicts.
39
+ all_entity_counts (dict[str, int]): A dict of entity id to chain count.
40
+
41
+ Returns:
42
+ list[dict]: A list of merged covalent bond dicts.
43
+ """
44
+ bonds_recorder = defaultdict(list)
45
+ bonds_entity_counts = {}
46
+ for bond_dict in covalent_bonds:
47
+ bond_unique_string = []
48
+ entity_counts = (
49
+ all_entity_counts[str(bond_dict["entity1"])],
50
+ all_entity_counts[str(bond_dict["entity2"])],
51
+ )
52
+ for i in range(2):
53
+ for j in ["entity", "position", "atom"]:
54
+ k = f"{j}{i+1}"
55
+ bond_unique_string.append(str(bond_dict[k]))
56
+ bond_unique_string = "_".join(bond_unique_string)
57
+ bonds_recorder[bond_unique_string].append(bond_dict)
58
+ bonds_entity_counts[bond_unique_string] = entity_counts
59
+
60
+ merged_covalent_bonds = []
61
+ for k, v in bonds_recorder.items():
62
+ counts1 = bonds_entity_counts[k][0]
63
+ counts2 = bonds_entity_counts[k][1]
64
+ if counts1 == counts2 == len(v):
65
+ bond_dict_copy = copy.deepcopy(v[0])
66
+ del bond_dict_copy["copy1"]
67
+ del bond_dict_copy["copy2"]
68
+ merged_covalent_bonds.append(bond_dict_copy)
69
+ else:
70
+ merged_covalent_bonds.extend(v)
71
+ return merged_covalent_bonds
72
+
73
+
74
+ def atom_array_to_input_json(
75
+ atom_array: AtomArray,
76
+ parser: MMCIFParser,
77
+ assembly_id: str = None,
78
+ output_json: str = None,
79
+ sample_name: str = None,
80
+ save_entity_and_asym_id: bool = False,
81
+ ) -> dict:
82
+ """
83
+ Convert a Biotite AtomArray to a dict that can be used as input to the model.
84
+
85
+ Args:
86
+ atom_array (AtomArray): Biotite Atom array.
87
+ parser (MMCIFParser): Instantiated Protenix MMCIFParer.
88
+ assembly_id (str, optional): Assembly ID. Defaults to None.
89
+ output_json (str, optional): Output json file path. Defaults to None.
90
+ sample_name (str, optional): The "name" filed in json file. Defaults to None.
91
+ save_entity_and_asym_id (bool, optional): Whether to save entity and asym ids to json.
92
+ Defaults to False.
93
+
94
+ Returns:
95
+ dict: Protenix input json dict.
96
+ """
97
+ # get sequences after modified AtomArray
98
+ entity_seq = parser.get_sequences(atom_array)
99
+
100
+ # add unique chain id
101
+ atom_array = AddAtomArrayAnnot.unique_chain_and_add_ids(atom_array)
102
+
103
+ # get lig entity sequences and position
104
+ label_entity_id_to_sequences = {}
105
+ lig_chain_ids = [] # record chain_id of the first asym chain
106
+ for label_entity_id in np.unique(atom_array.label_entity_id):
107
+ if label_entity_id not in parser.entity_poly_type:
108
+ current_lig_chain_ids = np.unique(
109
+ atom_array.chain_id[atom_array.label_entity_id == label_entity_id]
110
+ ).tolist()
111
+ lig_chain_ids += current_lig_chain_ids
112
+ for chain_id in current_lig_chain_ids:
113
+ lig_atom_array = atom_array[atom_array.chain_id == chain_id]
114
+ starts = get_residue_starts(lig_atom_array, add_exclusive_stop=True)
115
+ seq = lig_atom_array.res_name[starts[:-1]].tolist()
116
+ label_entity_id_to_sequences[label_entity_id] = seq
117
+
118
+ # find polymer modifications
119
+ entity_id_to_mod_list = {}
120
+ for entity_id, res_names in parser.get_poly_res_names(atom_array).items():
121
+ modifications_list = []
122
+ for idx, res_name in enumerate(res_names):
123
+ if res_name not in STD_RESIDUES:
124
+ position = idx + 1
125
+ modifications_list.append([position, f"CCD_{res_name}"])
126
+ if modifications_list:
127
+ entity_id_to_mod_list[entity_id] = modifications_list
128
+
129
+ chain_starts = get_chain_starts(atom_array, add_exclusive_stop=False)
130
+ chain_starts_atom_array = atom_array[chain_starts]
131
+
132
+ json_dict = {
133
+ "sequences": [],
134
+ }
135
+ if assembly_id is not None:
136
+ json_dict["assembly_id"] = assembly_id
137
+
138
+ unique_label_entity_id = np.unique(atom_array.label_entity_id)
139
+ chain_id_to_copy_id_dict = {}
140
+ for label_entity_id in unique_label_entity_id:
141
+ chain_ids_in_entity = chain_starts_atom_array.chain_id[
142
+ chain_starts_atom_array.label_entity_id == label_entity_id
143
+ ]
144
+ for chain_count, chain_id in enumerate(chain_ids_in_entity):
145
+ chain_id_to_copy_id_dict[chain_id] = chain_count + 1
146
+ copy_id = np.vectorize(chain_id_to_copy_id_dict.get)(atom_array.chain_id)
147
+ atom_array.set_annotation("copy_id", copy_id)
148
+
149
+ all_entity_counts = {}
150
+ label_entity_id_to_entity_id_in_json = {}
151
+ entity_idx = 0
152
+ for label_entity_id in unique_label_entity_id:
153
+ entity_dict = {}
154
+ asym_chains = chain_starts_atom_array[
155
+ chain_starts_atom_array.label_entity_id == label_entity_id
156
+ ]
157
+ entity_type = parser.entity_poly_type.get(label_entity_id, "ligand")
158
+ if entity_type != "ligand":
159
+ if entity_type == "polypeptide(L)":
160
+ entity_type = "proteinChain"
161
+ elif entity_type == "polydeoxyribonucleotide":
162
+ entity_type = "dnaSequence"
163
+ elif entity_type == "polyribonucleotide":
164
+ entity_type = "rnaSequence"
165
+ else:
166
+ # DNA/RNA hybrid, polypeptide(D), etc.
167
+ continue
168
+
169
+ sequence = entity_seq.get(label_entity_id)
170
+ entity_dict["sequence"] = sequence
171
+ else:
172
+ # ligand
173
+ lig_ccd = "_".join(label_entity_id_to_sequences[label_entity_id])
174
+ entity_dict["ligand"] = f"CCD_{lig_ccd}"
175
+ entity_dict["count"] = len(asym_chains)
176
+ entity_idx += 1
177
+ entity_id_in_json = str(entity_idx)
178
+ label_entity_id_to_entity_id_in_json[label_entity_id] = entity_id_in_json
179
+ all_entity_counts[entity_id_in_json] = len(asym_chains)
180
+ if save_entity_and_asym_id:
181
+ entity_dict["label_entity_id"] = str(label_entity_id)
182
+ entity_dict["label_asym_id"] = asym_chains.label_asym_id.tolist()
183
+
184
+ # add PTM info
185
+ if label_entity_id in entity_id_to_mod_list:
186
+ modifications = entity_id_to_mod_list[label_entity_id]
187
+ if entity_type == "proteinChain":
188
+ entity_dict["modifications"] = [
189
+ {"ptmPosition": position, "ptmType": mod_ccd_code}
190
+ for position, mod_ccd_code in modifications
191
+ ]
192
+ else:
193
+ entity_dict["modifications"] = [
194
+ {"basePosition": position, "modificationType": mod_ccd_code}
195
+ for position, mod_ccd_code in modifications
196
+ ]
197
+
198
+ json_dict["sequences"].append({entity_type: entity_dict})
199
+
200
+ # skip some uncommon entities
201
+ atom_array = atom_array[
202
+ np.isin(
203
+ atom_array.label_entity_id,
204
+ list(label_entity_id_to_entity_id_in_json.keys()),
205
+ )
206
+ ]
207
+
208
+ # add covalent bonds
209
+ atom_array = AddAtomArrayAnnot.add_token_mol_type(
210
+ atom_array, parser.entity_poly_type
211
+ )
212
+ lig_polymer_bonds = get_ligand_polymer_bond_mask(atom_array, lig_include_ions=False)
213
+ lig_lig_bonds = get_lig_lig_bonds(atom_array, lig_include_ions=False)
214
+ inter_entity_bonds = np.vstack((lig_polymer_bonds, lig_lig_bonds))
215
+
216
+ lig_indices = np.where(np.isin(atom_array.chain_id, lig_chain_ids))[0]
217
+ lig_bond_mask = np.any(np.isin(inter_entity_bonds[:, :2], lig_indices), axis=1)
218
+ inter_entity_bonds = inter_entity_bonds[lig_bond_mask] # select bonds of ligands
219
+ if inter_entity_bonds.size != 0:
220
+ covalent_bonds = []
221
+ for atoms in inter_entity_bonds[:, :2]:
222
+ bond_dict = {}
223
+ for i in range(2):
224
+ positon = atom_array.res_id[atoms[i]]
225
+ bond_dict[f"entity{i+1}"] = int(
226
+ label_entity_id_to_entity_id_in_json[
227
+ atom_array.label_entity_id[atoms[i]]
228
+ ]
229
+ )
230
+ bond_dict[f"position{i+1}"] = int(positon)
231
+ bond_dict[f"atom{i+1}"] = atom_array.atom_name[atoms[i]]
232
+ bond_dict[f"copy{i+1}"] = int(atom_array.copy_id[atoms[i]])
233
+
234
+ covalent_bonds.append(bond_dict)
235
+
236
+ # merge covalent_bonds for same entity
237
+ merged_covalent_bonds = merge_covalent_bonds(covalent_bonds, all_entity_counts)
238
+ json_dict["covalent_bonds"] = merged_covalent_bonds
239
+
240
+ json_dict["name"] = sample_name
241
+
242
+ if output_json is not None:
243
+ with open(output_json, "w") as f:
244
+ json.dump([json_dict], f, indent=4)
245
+ return json_dict
246
+
247
+
248
+ def cif_to_input_json(
249
+ mmcif_file: str,
250
+ assembly_id: str = None,
251
+ altloc="first",
252
+ output_json: str = None,
253
+ sample_name: str = None,
254
+ save_entity_and_asym_id: bool = False,
255
+ ) -> dict:
256
+ """
257
+ Convert mmcif file to Protenix input json file.
258
+
259
+ Args:
260
+ mmcif_file (str): mmCIF file path.
261
+ assembly_id (str, optional): Assembly ID. Defaults to None.
262
+ altloc (str, optional): Altloc selection. Defaults to "first".
263
+ output_json (str, optional): Output json file path. Defaults to None.
264
+ sample_name (str, optional): The "name" filed in json file. Defaults to None.
265
+ save_entity_and_asym_id (bool, optional): Whether to save entity and asym ids to json.
266
+ Defaults to False.
267
+
268
+ Returns:
269
+ dict: Protenix input json dict.
270
+ """
271
+ parser = MMCIFParser(mmcif_file)
272
+ atom_array = parser.get_structure(altloc, model=1, bond_lenth_threshold=None)
273
+
274
+ # remove HOH from entities
275
+ atom_array = Filter.remove_water(atom_array)
276
+ atom_array = Filter.remove_hydrogens(atom_array)
277
+ atom_array = parser.mse_to_met(atom_array)
278
+ atom_array = Filter.remove_element_X(atom_array)
279
+
280
+ # remove crystallization_aids
281
+ if any(["DIFFRACTION" in m for m in parser.methods]):
282
+ atom_array = Filter.remove_crystallization_aids(
283
+ atom_array, parser.entity_poly_type
284
+ )
285
+
286
+ if assembly_id is not None:
287
+ # expand created AtomArray by expand bioassembly
288
+ atom_array = parser.expand_assembly(atom_array, assembly_id)
289
+
290
+ if sample_name is None:
291
+ sample_name = os.path.basename(mmcif_file).split(".")[0]
292
+
293
+ json_dict = atom_array_to_input_json(
294
+ atom_array,
295
+ parser,
296
+ assembly_id,
297
+ output_json,
298
+ sample_name,
299
+ save_entity_and_asym_id=save_entity_and_asym_id,
300
+ )
301
+ return json_dict
302
+
303
+
304
+ if __name__ == "__main__":
305
+ parser = argparse.ArgumentParser()
306
+ parser.add_argument(
307
+ "--cif_file", type=str, required=True, help="The cif file to parse"
308
+ )
309
+ parser.add_argument(
310
+ "--json_file",
311
+ type=str,
312
+ required=False,
313
+ default=None,
314
+ help="The json file path to generate",
315
+ )
316
+ args = parser.parse_args()
317
+ print(cif_to_input_json(args.cif_file, output_json=args.json_file))