Datasets:

Formats:
text
ArXiv:
Libraries:
Datasets
File size: 2,715 Bytes
2872543
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import numpy as np
import pickle
import torch
import json

# np_data = np.load('/home/hui007/rna/rna_repr/zhiyuan/train_data_final.npz') 
# data_list = pickle.loads(np_data['data_list'])

# training_json = []

# for item in data_list:
#     full_id = item['full_id']
#     sequence = ''.join([i[1] for i in item['data']])
#     # entry = {
#     #     "sequences": [
#     #         {
#     #             "rnaSequence": {
#     #                 "sequence": sequence,
#     #                 "count": 1
#     #             }
#     #         }
#     #     ],
#     #     "name": full_id
#     # }
#     # training_json.append(entry)
#     data = item['data']

#     extracted = [[j[1], j[2]] for j in data]

#     torch.save(extracted, f"/home/hui007/Protenix/coord/{full_id}.pt")
        

#     # coords = [coord for i in item['data'] for coord in i[2]['coord_list']]
#     # tensor = torch.tensor(coords, dtype=torch.float32)
#     # centroid = tensor.mean(dim=0, keepdim=True)
#     # normalized_pos = (tensor - centroid) / 20.3689
    
#     # torch.save(normalized_pos, f"/home/hui007/Protenix/coord/{full_id}.pt")

# # with open("/home/hui007/Protenix/training.json", "w") as f:
# #     json.dump(training_json, f, indent=2)

# import os
# import json
# from pathlib import Path

# # === 路径设置 ===
# embedding_dir = Path("/home/hui007/Protenix/protenix_1d_embeddings")
# input_json_path = "/home/hui007/Protenix/training_json/training.json"
# output_prefix = "training"

# # === 加载 JSON 数据 ===
# with open(input_json_path, "r") as f:
#     data = json.load(f)

# # === 只保留那些其 name.pt 不存在的条目 ===
# filtered_data = []
# for item in data:
#     name = item["name"]
#     pt_path = embedding_dir / f"{name}.pt"
#     if not pt_path.exists():
#         filtered_data.append(item)

# print(f"共有 {len(filtered_data)} 条数据将被保留并拆分")

# # === 平均分成 4 份 ===
# chunk_size = (len(filtered_data) + 5) // 6  # 向上取整
# chunks = [filtered_data[i:i+chunk_size] for i in range(0, len(filtered_data), chunk_size)]

# # === 保存为 training1.json ~ training4.json ===
# for i, chunk in enumerate(chunks):
#     out_path = f"/home/hui007/Protenix/training_json/{output_prefix}{i+1}.json"
#     with open(out_path, "w") as f:
#         json.dump(chunk, f, indent=2)
#     print(f"保存 {out_path},包含 {len(chunk)} 条")

from huggingface_hub import upload_folder

upload_folder(
    repo_id="Yimingbear/protenix",
    repo_type="dataset",
    folder_path=".",
    ignore_patterns=["coord/*", "ModelGenerator/*", "protenix_1d_embeddings/*", "protenix_3d_embeddings/*", "second_stage/*", "training_json/*", "examples/*"]  # 忽略 scale 文件夹
)