# Copyright 2024 ByteDance and/or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import time import traceback import urllib.request from contextlib import nullcontext from os.path import exists as opexists from os.path import join as opjoin from typing import Any, Mapping import sys sys.path.insert(0, '/home/hui007/Protenix') import torch import torch.distributed as dist from ml_collections.config_dict import ConfigDict from configs.configs_base import configs as configs_base from configs.configs_data import data_configs from configs.configs_inference import inference_configs from configs.configs_model_type import model_configs from protenix.config import parse_configs, parse_sys_args from protenix.data.infer_data_pipeline import get_inference_dataloader from protenix.model.protenix import Protenix from protenix.utils.distributed import DIST_WRAPPER from protenix.utils.seed import seed_everything from protenix.utils.torch_utils import to_device from protenix.web_service.dependency_url import URL from runner.dumper import DataDumper from protenix.model.utils import Jump logger = logging.getLogger(__name__) class InferenceRunner(object): def __init__(self, configs: Any) -> None: self.configs = configs self.init_env() self.init_basics() self.init_model() self.load_checkpoint() self.init_dumper( need_atom_confidence=configs.need_atom_confidence, sorted_by_ranking_score=configs.sorted_by_ranking_score, ) def init_env(self) -> None: self.print( f"Distributed environment: world size: {DIST_WRAPPER.world_size}, " + f"global rank: {DIST_WRAPPER.rank}, local rank: {DIST_WRAPPER.local_rank}" ) self.use_cuda = torch.cuda.device_count() > 0 if self.use_cuda: self.device = torch.device("cuda:{}".format(DIST_WRAPPER.local_rank)) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" all_gpu_ids = ",".join(str(x) for x in range(torch.cuda.device_count())) devices = os.getenv("CUDA_VISIBLE_DEVICES", all_gpu_ids) logging.info( f"LOCAL_RANK: {DIST_WRAPPER.local_rank} - CUDA_VISIBLE_DEVICES: [{devices}]" ) torch.cuda.set_device(self.device) else: self.device = torch.device("cpu") if DIST_WRAPPER.world_size > 1: dist.init_process_group(backend="nccl") if self.configs.use_deepspeed_evo_attention: env = os.getenv("CUTLASS_PATH", None) self.print(f"env: {env}") assert ( env is not None ), "if use ds4sci, set `CUTLASS_PATH` environment variable according to the instructions at https://www.deepspeed.ai/tutorials/ds4sci_evoformerattention/" if env is not None: logging.info( "The kernels will be compiled when DS4Sci_EvoformerAttention is called for the first time." ) use_fastlayernorm = os.getenv("LAYERNORM_TYPE", None) if use_fastlayernorm == "fast_layernorm": logging.info( "The kernels will be compiled when fast_layernorm is called for the first time." ) logging.info("Finished init ENV.") def init_basics(self) -> None: self.dump_dir = self.configs.dump_dir self.error_dir = opjoin(self.dump_dir, "ERR") os.makedirs(self.dump_dir, exist_ok=True) os.makedirs(self.error_dir, exist_ok=True) def init_model(self) -> None: self.model = Protenix(self.configs).to(self.device) def load_checkpoint(self) -> None: checkpoint_path = ( f"{self.configs.load_checkpoint_dir}/{self.configs.model_name}.pt" ) if not os.path.exists(checkpoint_path): raise Exception(f"Given checkpoint path not exist [{checkpoint_path}]") self.print( f"Loading from {checkpoint_path}, strict: {self.configs.load_strict}" ) checkpoint = torch.load(checkpoint_path, self.device) sample_key = [k for k in checkpoint["model"].keys()][0] self.print(f"Sampled key: {sample_key}") if sample_key.startswith("module."): # DDP checkpoint has module. prefix checkpoint["model"] = { k[len("module.") :]: v for k, v in checkpoint["model"].items() } self.model.load_state_dict( state_dict=checkpoint["model"], strict=self.configs.load_strict, ) self.model.eval() self.print(f"Finish loading checkpoint.") def init_dumper( self, need_atom_confidence: bool = False, sorted_by_ranking_score: bool = True ): self.dumper = DataDumper( base_dir=self.dump_dir, need_atom_confidence=need_atom_confidence, sorted_by_ranking_score=sorted_by_ranking_score, ) # Adapted from runner.train.Trainer.evaluate @torch.no_grad() def predict(self, data: Mapping[str, Mapping[str, Any]], coord_label) -> dict[str, torch.Tensor]: eval_precision = { "fp32": torch.float32, "bf16": torch.bfloat16, "fp16": torch.float16, }[self.configs.dtype] enable_amp = ( torch.autocast(device_type="cuda", dtype=eval_precision) if torch.cuda.is_available() else nullcontext() ) data = to_device(data, self.device) with enable_amp: prediction, _, _ = self.model( input_feature_dict=data["input_feature_dict"], label_full_dict=None, label_dict=None, mode="inference", coord_label=coord_label ) return prediction def print(self, msg: str): if DIST_WRAPPER.rank == 0: logger.info(msg) def update_model_configs(self, new_configs: Any) -> None: self.model.configs = new_configs def download_infercence_cache(configs: Any) -> None: def progress_callback(block_num, block_size, total_size): downloaded = block_num * block_size percent = min(100, downloaded * 100 / total_size) bar_length = 30 filled_length = int(bar_length * percent // 100) bar = "=" * filled_length + "-" * (bar_length - filled_length) status = f"\r[{bar}] {percent:.1f}%" print(status, end="", flush=True) if downloaded >= total_size: print() def download_from_url(tos_url, checkpoint_path, check_weight=True): urllib.request.urlretrieve( tos_url, checkpoint_path, reporthook=progress_callback ) if check_weight: try: ckpt = torch.load(checkpoint_path) del ckpt except: os.remove(checkpoint_path) raise RuntimeError( "Download model checkpoint failed, please download by yourself with " f"wget {tos_url} -O {checkpoint_path}" ) for cache_name in ( "ccd_components_file", "ccd_components_rdkit_mol_file", "pdb_cluster_file", ): cur_cache_fpath = configs["data"][cache_name] if not opexists(cur_cache_fpath): os.makedirs(os.path.dirname(cur_cache_fpath), exist_ok=True) tos_url = URL[cache_name] assert os.path.basename(tos_url) == os.path.basename(cur_cache_fpath), ( f"{cache_name} file name is incorrect, `{tos_url}` and " f"`{cur_cache_fpath}`. Please check and try again." ) logger.info( f"Downloading data cache from\n {tos_url}... to {cur_cache_fpath}" ) download_from_url(tos_url, cur_cache_fpath, check_weight=False) checkpoint_path = f"{configs.load_checkpoint_dir}/{configs.model_name}.pt" checkpoint_dir = configs.load_checkpoint_dir if not opexists(checkpoint_path): os.makedirs(checkpoint_dir, exist_ok=True) tos_url = URL[configs.model_name] logger.info( f"Downloading model checkpoint from\n {tos_url}... to {checkpoint_path}" ) download_from_url(tos_url, checkpoint_path) if "esm" in configs.model_name: # currently esm only support 3b model esm_3b_ckpt_path = f"{checkpoint_dir}/esm2_t36_3B_UR50D.pt" if not opexists(esm_3b_ckpt_path): tos_url = URL["esm2_t36_3B_UR50D"] logger.info( f"Downloading model checkpoint from\n {tos_url}... to {esm_3b_ckpt_path}" ) download_from_url(tos_url, esm_3b_ckpt_path) esm_3b_ckpt_path2 = f"{checkpoint_dir}/esm2_t36_3B_UR50D-contact-regression.pt" if not opexists(esm_3b_ckpt_path2): tos_url = URL["esm2_t36_3B_UR50D-contact-regression"] logger.info( f"Downloading model checkpoint from\n {tos_url}... to {esm_3b_ckpt_path2}" ) download_from_url(tos_url, esm_3b_ckpt_path2) if "ism" in configs.model_name: esm_3b_ism_ckpt_path = f"{checkpoint_dir}/esm2_t36_3B_UR50D_ism.pt" if not opexists(esm_3b_ism_ckpt_path): tos_url = URL["esm2_t36_3B_UR50D_ism"] logger.info( f"Downloading model checkpoint from\n {tos_url}... to {esm_3b_ism_ckpt_path}" ) download_from_url(tos_url, esm_3b_ism_ckpt_path) esm_3b_ism_ckpt_path2 = f"{checkpoint_dir}/esm2_t36_3B_UR50D_ism-contact-regression.pt" # the same as esm_3b_ckpt_path2 if not opexists(esm_3b_ism_ckpt_path2): tos_url = URL["esm2_t36_3B_UR50D_ism-contact-regression"] logger.info( f"Downloading model checkpoint from\n {tos_url}... to {esm_3b_ism_ckpt_path2}" ) download_from_url(tos_url, esm_3b_ism_ckpt_path2) def update_inference_configs(configs: Any, N_token: int): # Setting the default inference configs for different N_token and N_atom # when N_token is larger than 3000, the default config might OOM even on a # A100 80G GPUS, if N_token > 3840: configs.skip_amp.confidence_head = False configs.skip_amp.sample_diffusion = False elif N_token > 2560: configs.skip_amp.confidence_head = False configs.skip_amp.sample_diffusion = True else: configs.skip_amp.confidence_head = True configs.skip_amp.sample_diffusion = True return configs def infer_predict(runner: InferenceRunner, configs: Any) -> None: # Data logger.info(f"Loading data from\n{configs.input_json_path}") try: dataloader = get_inference_dataloader(configs=configs) except Exception as e: error_message = f"{e}:\n{traceback.format_exc()}" logger.info(error_message) with open(opjoin(runner.error_dir, "error.txt"), "a") as f: f.write(error_message) return num_data = len(dataloader.dataset) for seed in configs.seeds: seed_everything(seed=seed, deterministic=configs.deterministic) for batch in dataloader: try: data, atom_array, coord_label, data_error_message = batch[0] sample_name = data["sample_name"] if len(data_error_message) > 0: logger.info(data_error_message) with open(opjoin(runner.error_dir, f"{sample_name}.txt"), "a") as f: f.write(data_error_message) continue logger.info( ( f"[Rank {DIST_WRAPPER.rank} ({data['sample_index'] + 1}/{num_data})] {sample_name}: " f"N_asym {data['N_asym'].item()}, N_token {data['N_token'].item()}, " f"N_atom {data['N_atom'].item()}, N_msa {data['N_msa'].item()}" ) ) new_configs = update_inference_configs(configs, data["N_token"].item()) runner.update_model_configs(new_configs) prediction = runner.predict(data, coord_label) runner.dumper.dump( dataset_name="", pdb_id=sample_name, seed=seed, pred_dict=prediction, atom_array=atom_array, entity_poly_type=data["entity_poly_type"], ) logger.info( f"[Rank {DIST_WRAPPER.rank}] {data['sample_name']} succeeded.\n" f"Results saved to {configs.dump_dir}" ) torch.cuda.empty_cache() except Jump: torch.cuda.empty_cache() continue except Exception as e: error_message = f"[Rank {DIST_WRAPPER.rank}]{data['sample_name']} {e}:\n{traceback.format_exc()}" logger.info(error_message) # Save error info with open(opjoin(runner.error_dir, f"{sample_name}.txt"), "a") as f: f.write(error_message) if hasattr(torch.cuda, "empty_cache"): torch.cuda.empty_cache() def main(configs: Any) -> None: # Runner runner = InferenceRunner(configs) infer_predict(runner, configs) def run() -> None: LOG_FORMAT = "%(asctime)s,%(msecs)-3d %(levelname)-8s [%(filename)s:%(lineno)s %(funcName)s] %(message)s" logging.basicConfig( format=LOG_FORMAT, level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S", filemode="w", ) configs_base["use_deepspeed_evo_attention"] = ( os.environ.get("USE_DEEPSPEED_EVO_ATTENTION", False) == "true" ) configs = {**configs_base, **{"data": data_configs}, **inference_configs} configs = parse_configs( configs=configs, arg_str=parse_sys_args(), fill_required_with_null=True, ) model_name = configs.model_name _, model_size, model_feature, model_version = model_name.split("_") logger.info( f"Inference by Protenix: model_size: {model_size}, with_feature: {model_feature.replace('-',', ')}, model_version: {model_version}" ) model_specfics_configs = ConfigDict(model_configs[model_name]) # update model specific configs configs.update(model_specfics_configs) download_infercence_cache(configs) main(configs) if __name__ == "__main__": run()