import numpy as np | |
import pickle | |
import torch | |
import json | |
# np_data = np.load('/home/hui007/rna/rna_repr/zhiyuan/train_data_final.npz') | |
# data_list = pickle.loads(np_data['data_list']) | |
# training_json = [] | |
# for item in data_list: | |
# full_id = item['full_id'] | |
# sequence = ''.join([i[1] for i in item['data']]) | |
# # entry = { | |
# # "sequences": [ | |
# # { | |
# # "rnaSequence": { | |
# # "sequence": sequence, | |
# # "count": 1 | |
# # } | |
# # } | |
# # ], | |
# # "name": full_id | |
# # } | |
# # training_json.append(entry) | |
# data = item['data'] | |
# extracted = [[j[1], j[2]] for j in data] | |
# torch.save(extracted, f"/home/hui007/Protenix/coord/{full_id}.pt") | |
# # coords = [coord for i in item['data'] for coord in i[2]['coord_list']] | |
# # tensor = torch.tensor(coords, dtype=torch.float32) | |
# # centroid = tensor.mean(dim=0, keepdim=True) | |
# # normalized_pos = (tensor - centroid) / 20.3689 | |
# # torch.save(normalized_pos, f"/home/hui007/Protenix/coord/{full_id}.pt") | |
# # with open("/home/hui007/Protenix/training.json", "w") as f: | |
# # json.dump(training_json, f, indent=2) | |
# import os | |
# import json | |
# from pathlib import Path | |
# # === 路径设置 === | |
# embedding_dir = Path("/home/hui007/Protenix/protenix_1d_embeddings") | |
# input_json_path = "/home/hui007/Protenix/training_json/training.json" | |
# output_prefix = "training" | |
# # === 加载 JSON 数据 === | |
# with open(input_json_path, "r") as f: | |
# data = json.load(f) | |
# # === 只保留那些其 name.pt 不存在的条目 === | |
# filtered_data = [] | |
# for item in data: | |
# name = item["name"] | |
# pt_path = embedding_dir / f"{name}.pt" | |
# if not pt_path.exists(): | |
# filtered_data.append(item) | |
# print(f"共有 {len(filtered_data)} 条数据将被保留并拆分") | |
# # === 平均分成 4 份 === | |
# chunk_size = (len(filtered_data) + 5) // 6 # 向上取整 | |
# chunks = [filtered_data[i:i+chunk_size] for i in range(0, len(filtered_data), chunk_size)] | |
# # === 保存为 training1.json ~ training4.json === | |
# for i, chunk in enumerate(chunks): | |
# out_path = f"/home/hui007/Protenix/training_json/{output_prefix}{i+1}.json" | |
# with open(out_path, "w") as f: | |
# json.dump(chunk, f, indent=2) | |
# print(f"保存 {out_path},包含 {len(chunk)} 条") | |
from huggingface_hub import upload_folder | |
upload_folder( | |
repo_id="Yimingbear/protenix", | |
repo_type="dataset", | |
folder_path=".", | |
ignore_patterns=["coord/*", "ModelGenerator/*", "protenix_1d_embeddings/*", "protenix_3d_embeddings/*", "second_stage/*", "training_json/*", "examples/*"] # 忽略 scale 文件夹 | |
) |