File size: 2,901 Bytes
ee588cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import pickle
import networkx as nx
import json
import os
from tqdm import tqdm

# Class for reading DCS pickle files
class DCS:
    def __init__(self, sent_id, sentence):
        self.sent_id = sent_id
        self.sentence = sentence
        self.dcs_chunks = []
        self.lemmas = []
        self.cng = []

# === Config Paths ===
graph_file_list = "graphFiles"
graph_folder = "After_graphml"
pkl_folder = "DCS_pick"
output_jsonl = "dataset.jsonl"
log_file = "conversion_log.txt"

# === Prepare Output Files ===
with open(output_jsonl, "w", encoding="utf-8") as out_fp, open(log_file, "w", encoding="utf-8") as log_fp:
    with open(graph_file_list, "r") as id_file:
        ids = [line.strip().replace(".graphml", "") for line in id_file]

    success_count = 0
    error_count = 0

    for sid in tqdm(ids, desc="Converting files"):
        try:
            # === Load .p (pickle) file ===
            pkl_path = os.path.join(pkl_folder, f"{sid}.p")
            if not os.path.exists(pkl_path):
                raise FileNotFoundError(f"Missing pickle: {pkl_path}")

            with open(pkl_path, "rb") as f:
                gold = pickle.load(f, encoding="utf-8")

            # === Load .graphml file ===
            graph_path = os.path.join(graph_folder, f"{sid}.graphml")
            if not os.path.exists(graph_path):
                raise FileNotFoundError(f"Missing graphml: {graph_path}")

            G = nx.read_graphml(graph_path)

            # === Extract candidates from graph ===
            candidates = []
            for node_id, node_data in G.nodes(data=True):
                candidates.append({
                    "id": node_id,
                    "word": node_data.get("word", ""),
                    "lemma": node_data.get("lemma", ""),
                    "morph": node_data.get("morph", ""),
                    "cng": str(node_data.get("cng", "")),
                    "chunk_no": str(node_data.get("chunk_no", "")),
                    "position": int(node_data.get("position", -1)),
                    "length": int(node_data.get("length_word", -1)),
                    "pre_verb": node_data.get("pre_verb", "")
                })

            # === Final dictionary ===
            sample = {
                "id": sid,
                "sentence": gold.sentence.strip(),
                "gold_segments": gold.dcs_chunks,
                "lemmas": gold.lemmas,
                "morph_tags": gold.cng,
                "candidates": candidates
            }

            json.dump(sample, out_fp, ensure_ascii=False)
            out_fp.write("\n")
            success_count += 1

        except Exception as e:
            log_fp.write(f"[ERROR] {sid}: {str(e)}\n")
            error_count += 1

# === Done ===
print(f"\n✅ Conversion complete: {success_count} samples written")
print(f"⚠️  {error_count} samples skipped — see {log_file} for details")