File size: 7,529 Bytes
94a4b0c
 
 
 
bf6a516
 
 
 
 
94a4b0c
 
bf6a516
94a4b0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf6a516
94a4b0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf6a516
 
 
 
 
 
94a4b0c
 
 
bf6a516
94a4b0c
 
 
 
 
 
 
bf6a516
94a4b0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf6a516
 
 
 
 
 
94a4b0c
 
 
 
 
 
 
 
 
 
 
bf6a516
 
94a4b0c
bf6a516
 
94a4b0c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
日本語文字画像 + 英語説明文(CC0)
- 単語は外部ファイルから読み込み(UTF-8, 1行1語)
- 白背景・黒文字
- 説明文に色指定
- PNG+TXTペアを train/ に出力
- 最後に train.tar.gz にまとめる
"""

import csv, random, argparse, hashlib, datetime, tarfile, shutil
from pathlib import Path
from dataclasses import dataclass
from PIL import Image, ImageDraw, ImageFont
import numpy as np

# --------------------
# 定数
# --------------------
DEFAULT_FONTS_DIR = Path("./fonts")
TRAIN_DIR         = Path("./train")
LINE_SPACING      = 1.25

@dataclass
class Example:
    jp_text: str
    en_desc: str

# --------------------
# 外部ファイルから単語読み込み
# --------------------
def load_words(file_path: Path):
    if not file_path.exists():
        raise FileNotFoundError(f"単語ファイルが見つかりません: {file_path}")
    with open(file_path, "r", encoding="utf-8") as f:
        words = [line.strip() for line in f if line.strip()]
    if not words:
        raise ValueError(f"単語ファイルが空です: {file_path}")
    return words

# --------------------
# データ生成
# --------------------
def gen_examples(n:int, seed:int, words:list[str]):
    random.seed(seed)
    exs = []
    for _ in range(n):
        jp = random.choice(words)
        desc = f'This image is saying "{jp}". The background is white. The letter is black.'
        exs.append(Example(jp_text=jp, en_desc=desc))
    return exs

# --------------------
# フォント
# --------------------
def list_fonts(font_dir:Path):
    fonts = [p for p in font_dir.glob("*") if p.suffix.lower() in (".ttf",".otf",".ttc",".otc")]
    if not fonts:
        raise FileNotFoundError(f"フォントが見つかりません: {font_dir} にOFL/PDの日本語フォントを置いてください")
    return fonts

# --------------------
# 描画(背景:白、文字:黒)
# --------------------
def draw_horizontal(text, font_path:Path, size, max_font_size, min_font_size, margin_px):
    W,H = size
    img = Image.new("RGB", (W,H), (255,255,255))  # 白背景
    draw = ImageDraw.Draw(img)
    for fs in range(max_font_size, min_font_size-1, -2):
        font = ImageFont.truetype(str(font_path), fs)
        bbox = draw.textbbox((0,0), text, font=font)
        w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
        if w <= W - 2*margin_px and h <= H - 2*margin_px:
            break
    x = (W - w)//2
    y = (H - h)//2
    draw.text((x, y), text, font=font, fill=(0,0,0))  # 黒
    return img

def draw_vertical(text, font_path:Path, size, max_font_size, min_font_size, margin_px):
    W,H = size
    img = Image.new("RGB", (W,H), (255,255,255))  # 白背景
    draw = ImageDraw.Draw(img)
    for fs in range(max_font_size, min_font_size-1, -2):
        font = ImageFont.truetype(str(font_path), fs)
        line_h = font.getbbox("Hg")[3] - font.getbbox("Hg")[1]
        step = int(line_h * LINE_SPACING)
        total_h = len(text) * step if text else step
        if text:
            widths = []
            for c in text:
                cb = draw.textbbox((0,0), c, font=font)
                widths.append(cb[2] - cb[0])
            col_w = max(widths)
        else:
            cb = draw.textbbox((0,0), "あ", font=font)
            col_w = cb[2] - cb[0]
        if col_w <= W - 2*margin_px and total_h <= H - 2*margin_px:
            break
    x = (W - col_w)//2
    y = (H - total_h)//2
    for i, ch in enumerate(text):
        draw.text((x, y + i*step), ch, font=font, fill=(0,0,0))  # 黒
    return img

# --------------------
# その他
# --------------------
def sha1_of_text(t:str)->str:
    import hashlib
    return hashlib.sha1(t.encode("utf-8")).hexdigest()[:16]

def write_license_file():
    text = f"""Dataset License (CC0, JP glyphs + EN descriptions)

Copyright (c) {datetime.date.today().year} YOUR_NAME
All images (Japanese text) and English descriptions are synthetic and authored by the dataset creator.
Released under CC0-1.0 (Public Domain Dedication).
"""
    Path("./LICENSE.txt").write_text(text, encoding="utf-8")

def append_assets_registry(font_paths):
    reg_path = Path("./provenance/assets_registry.csv")
    reg_path.parent.mkdir(parents=True, exist_ok=True)
    new = not reg_path.exists()
    with open(reg_path, "a", newline="", encoding="utf-8") as f:
        w = csv.writer(f)
        if new:
            w.writerow(["asset_type","path","license","notes"])
        for p in font_paths:
            w.writerow(["font", str(p), "SIL Open Font License (assumed)", "同梱時は各フォントのLICENSEを添付"])

def make_tarfile(source_dir: Path, output_filename: Path, remove_source=False):
    with tarfile.open(output_filename, "w:gz") as tar:
        tar.add(source_dir, arcname=source_dir.name)
    if remove_source:
        shutil.rmtree(source_dir)

# --------------------
# メイン
# --------------------
def main(n_train, seed, mode, img_size, max_font_size, min_font_size, margin_px, words_file, archive, remove_source):
    random.seed(seed); np.random.seed(seed)
    TRAIN_DIR.mkdir(parents=True, exist_ok=True)
    write_license_file()
    fonts = list_fonts(DEFAULT_FONTS_DIR)
    append_assets_registry(fonts)

    words = load_words(words_file)
    exs = gen_examples(n_train, seed, words)

    def render(jp_text, font_path, writing_mode):
        if writing_mode == "horizontal":
            return draw_horizontal(jp_text, font_path, img_size, max_font_size, min_font_size, margin_px)
        elif writing_mode == "vertical":
            return draw_vertical(jp_text, font_path, img_size, max_font_size, min_font_size, margin_px)

    for i, ex in enumerate(exs):
        font = random.choice(fonts)
        writing_mode = random.choice(["horizontal","vertical"]) if mode=="both" else mode
        img = render(ex.jp_text, font, writing_mode)
        uid = sha1_of_text(f"{i}-{ex.jp_text}-{font.name}-{writing_mode}-{img_size}-{max_font_size}-{min_font_size}")
        img_path = TRAIN_DIR/f"{uid}.png"
        txt_path = TRAIN_DIR/f"{uid}.txt"
        img.save(img_path)
        txt_path.write_text(ex.en_desc, encoding="utf-8")

    print(f"Generated {n_train} samples in {TRAIN_DIR}")

    if archive:
        tar_path = Path("./train.tar.gz")
        make_tarfile(TRAIN_DIR, tar_path, remove_source=remove_source)
        print(f"Created archive: {tar_path}")

if __name__ == "__main__":
    ap = argparse.ArgumentParser()
    ap.add_argument("--n_train", type=int, default=1000)
    ap.add_argument("--seed", type=int, default=0)
    ap.add_argument("--mode", type=str, default="both", choices=["horizontal","vertical","both"])
    ap.add_argument("--img_size", type=int, nargs=2, metavar=("WIDTH","HEIGHT"), default=(640,640))
    ap.add_argument("--max_font_size", type=int, default=54)
    ap.add_argument("--min_font_size", type=int, default=28)
    ap.add_argument("--margin_px", type=int, default=28)
    ap.add_argument("--words_file", type=Path, required=True, help="日本語単語リストファイル(UTF-8, 1行1語)")
    ap.add_argument("--archive", action="store_true", help="train/ を tar.gz にまとめる")
    ap.add_argument("--remove_source", action="store_true", help="tar作成後に train/ を削除")
    args = ap.parse_args()
    main(args.n_train, args.seed, args.mode, tuple(args.img_size), args.max_font_size, args.min_font_size,
         args.margin_px, args.words_file, args.archive, args.remove_source)