|
|
|
""" |
|
Convert NLS Scottish School Exams dataset to Hugging Face format with proper page numbering. |
|
|
|
This script processes directories containing: |
|
- image/ folder with JPG files |
|
- alto/ folder with ALTO XML files |
|
- METS XML files with page ordering information |
|
- Creates one row per page with image, text, raw XML, and correct page numbers |
|
""" |
|
|
|
import argparse |
|
import csv |
|
import logging |
|
import os |
|
import re |
|
import sys |
|
import xml.etree.ElementTree as ET |
|
from collections import defaultdict |
|
from pathlib import Path |
|
from typing import Optional, Dict, Tuple |
|
|
|
from datasets import Dataset, Features, Value |
|
from datasets import Image as HFImage |
|
from tqdm import tqdm |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
def extract_base_number(filename: str) -> str: |
|
"""Extract the base number from a filename (before first dot).""" |
|
return filename.split('.')[0] |
|
|
|
|
|
def parse_mets_page_order(mets_path: Path) -> Dict[str, int]: |
|
""" |
|
Parse METS XML file to extract page ordering information. |
|
|
|
Returns: |
|
Dictionary mapping file base numbers to page order numbers |
|
""" |
|
page_order_map = {} |
|
|
|
try: |
|
tree = ET.parse(mets_path) |
|
root = tree.getroot() |
|
|
|
|
|
ns = { |
|
'mets': 'http://www.loc.gov/METS/', |
|
'xlink': 'http://www.w3.org/1999/xlink' |
|
} |
|
|
|
|
|
for div in root.findall('.//mets:div[@ORDER]', ns): |
|
order = div.get('ORDER') |
|
if order: |
|
|
|
for fptr in div.findall('.//mets:fptr', ns): |
|
file_id = fptr.get('FILEID') |
|
if file_id and '.3' in file_id: |
|
|
|
base_num = file_id.split('.')[0].replace('file_', '') |
|
page_order_map[base_num] = int(order) |
|
|
|
logger.debug(f"Extracted page order for {len(page_order_map)} pages from METS") |
|
|
|
except Exception as e: |
|
logger.warning(f"Error parsing METS file {mets_path}: {e}") |
|
|
|
return page_order_map |
|
|
|
|
|
def extract_exam_info_from_metadata(metadata: str) -> Dict[str, str]: |
|
""" |
|
Extract exam information from metadata string. |
|
|
|
Example: "Leaving Certificate - 1888 - P.P.1888 XLI" |
|
Returns: {"exam_type": "Leaving Certificate", "year": "1888", "reference": "P.P.1888 XLI"} |
|
""" |
|
info = { |
|
"exam_type": "", |
|
"year": "", |
|
"reference": "" |
|
} |
|
|
|
if not metadata: |
|
return info |
|
|
|
|
|
year_match = re.search(r'\b(18\d{2}|19\d{2}|20\d{2})\b', metadata) |
|
if year_match: |
|
info["year"] = year_match.group(1) |
|
|
|
|
|
parts = metadata.split(' - ') |
|
if parts: |
|
info["exam_type"] = parts[0].strip() |
|
|
|
|
|
if len(parts) >= 3: |
|
info["reference"] = parts[2].strip() |
|
|
|
return info |
|
|
|
|
|
def parse_inventory_csv(root_dir: Path) -> dict[str, str]: |
|
""" |
|
Parse inventory CSV file if it exists in the dataset directory. |
|
|
|
Returns: |
|
Dictionary mapping document_id to metadata description |
|
""" |
|
inventory_pattern = "*-inventory.csv" |
|
inventory_files = list(root_dir.glob(inventory_pattern)) |
|
|
|
if not inventory_files: |
|
logger.info("No inventory CSV file found") |
|
return {} |
|
|
|
if len(inventory_files) > 1: |
|
logger.warning(f"Multiple inventory files found: {inventory_files}. Using first one.") |
|
|
|
inventory_file = inventory_files[0] |
|
logger.info(f"Reading inventory from: {inventory_file}") |
|
|
|
metadata_map = {} |
|
|
|
try: |
|
|
|
with open(inventory_file, encoding='utf-8-sig') as f: |
|
reader = csv.reader(f) |
|
for row_num, row in enumerate(reader, 1): |
|
if len(row) >= 2: |
|
doc_id = row[0].strip() |
|
description = row[1].strip() |
|
metadata_map[doc_id] = description |
|
else: |
|
logger.warning(f"Skipping malformed row {row_num} in {inventory_file}: {row}") |
|
|
|
except Exception as e: |
|
logger.error(f"Error reading inventory CSV: {e}") |
|
return {} |
|
|
|
logger.info(f"Loaded metadata for {len(metadata_map)} documents from inventory") |
|
return metadata_map |
|
|
|
|
|
def extract_text_from_alto(alto_path: Path) -> tuple[str, str]: |
|
""" |
|
Extract text content from an ALTO XML file. |
|
|
|
Returns: |
|
Tuple of (extracted_text, raw_xml) |
|
""" |
|
try: |
|
with open(alto_path, encoding='utf-8') as f: |
|
raw_xml = f.read() |
|
|
|
|
|
root = ET.fromstring(raw_xml) |
|
|
|
|
|
|
|
ns = {'alto': 'http://www.loc.gov/standards/alto/v3/alto.xsd'} |
|
|
|
|
|
text_parts = [] |
|
|
|
|
|
for textline in root.findall('.//alto:TextLine', ns): |
|
line_parts = [] |
|
|
|
|
|
for string_elem in textline.findall('./alto:String', ns): |
|
content = string_elem.get('CONTENT', '') |
|
if content: |
|
line_parts.append(content) |
|
|
|
|
|
if line_parts: |
|
text_parts.append(' '.join(line_parts)) |
|
|
|
|
|
extracted_text = '\n'.join(text_parts) |
|
|
|
return extracted_text, raw_xml |
|
|
|
except Exception as e: |
|
logger.warning(f"Error processing ALTO file {alto_path}: {e}") |
|
return "", "" |
|
|
|
|
|
def process_document_folder(doc_path: Path, metadata_map: dict[str, str] = None) -> list[dict]: |
|
""" |
|
Process a single document folder and return list of page records. |
|
|
|
Args: |
|
doc_path: Path to document folder |
|
metadata_map: Optional dictionary mapping document_id to metadata |
|
""" |
|
records = [] |
|
doc_id = doc_path.name |
|
doc_metadata = metadata_map.get(doc_id, None) if metadata_map else None |
|
|
|
|
|
exam_info = extract_exam_info_from_metadata(doc_metadata) |
|
|
|
image_dir = doc_path / "image" |
|
alto_dir = doc_path / "alto" |
|
mets_file = doc_path / f"{doc_id}-mets.xml" |
|
|
|
if not image_dir.exists() or not alto_dir.exists(): |
|
logger.warning(f"Skipping {doc_path}: missing image or alto directory") |
|
return records |
|
|
|
|
|
page_order_map = {} |
|
if mets_file.exists(): |
|
page_order_map = parse_mets_page_order(mets_file) |
|
else: |
|
logger.warning(f"No METS file found for {doc_id}, using filename sorting for page order") |
|
|
|
|
|
image_files = {f for f in os.listdir(image_dir) |
|
if f.lower().endswith(('.jpg', '.jpeg', '.png', '.tiff', '.tif'))} |
|
|
|
|
|
alto_files = {f for f in os.listdir(alto_dir) if f.endswith('.xml')} |
|
|
|
|
|
image_map = {extract_base_number(f): f for f in image_files} |
|
alto_map = {extract_base_number(f): f for f in alto_files} |
|
|
|
|
|
all_pages = set(image_map.keys()) | set(alto_map.keys()) |
|
|
|
|
|
if not page_order_map: |
|
sorted_pages = sorted(all_pages) |
|
page_order_map = {page: idx + 1 for idx, page in enumerate(sorted_pages)} |
|
|
|
|
|
for page_base in sorted(all_pages, key=lambda x: page_order_map.get(x, 999999)): |
|
actual_page_number = page_order_map.get(page_base, 0) |
|
|
|
record = { |
|
'document_id': doc_path.name, |
|
'page_number': actual_page_number, |
|
'file_identifier': page_base, |
|
'image_path': None, |
|
'alto_xml': None, |
|
'text': None, |
|
'has_image': False, |
|
'has_alto': False, |
|
'document_metadata': doc_metadata, |
|
'has_metadata': doc_metadata is not None, |
|
'exam_type': exam_info['exam_type'], |
|
'exam_year': exam_info['year'], |
|
'exam_reference': exam_info['reference'] |
|
} |
|
|
|
|
|
if page_base in image_map: |
|
image_path = image_dir / image_map[page_base] |
|
if image_path.exists(): |
|
record['image_path'] = str(image_path) |
|
record['has_image'] = True |
|
|
|
|
|
if page_base in alto_map: |
|
alto_path = alto_dir / alto_map[page_base] |
|
if alto_path.exists(): |
|
text, xml = extract_text_from_alto(alto_path) |
|
record['alto_xml'] = xml |
|
record['text'] = text |
|
record['has_alto'] = True |
|
|
|
records.append(record) |
|
|
|
return records |
|
|
|
|
|
def process_dataset(root_dir: Path, max_docs: Optional[int] = None, |
|
include_metadata: bool = True) -> list[dict]: |
|
""" |
|
Process entire dataset directory. |
|
|
|
Args: |
|
root_dir: Root directory of dataset |
|
max_docs: Maximum number of documents to process |
|
include_metadata: Whether to include metadata from inventory CSV |
|
""" |
|
all_records = [] |
|
|
|
|
|
metadata_map = {} |
|
if include_metadata: |
|
metadata_map = parse_inventory_csv(root_dir) |
|
|
|
|
|
doc_dirs = [d for d in root_dir.iterdir() |
|
if d.is_dir() and not d.name.startswith('.') |
|
and d.name not in ['__pycache__']] |
|
|
|
if max_docs: |
|
doc_dirs = doc_dirs[:max_docs] |
|
|
|
logger.info(f"Processing {len(doc_dirs)} document directories...") |
|
|
|
|
|
for doc_dir in tqdm(doc_dirs, desc="Processing documents"): |
|
records = process_document_folder(doc_dir, metadata_map) |
|
all_records.extend(records) |
|
|
|
return all_records |
|
|
|
|
|
def create_huggingface_dataset(records: list[dict], include_missing: bool = True) -> Dataset: |
|
""" |
|
Create a Hugging Face dataset from records. |
|
|
|
Args: |
|
records: List of page records |
|
include_missing: If False, only include pages with both image and ALTO |
|
""" |
|
|
|
if not include_missing: |
|
records = [r for r in records if r['has_image'] and r['has_alto']] |
|
logger.info(f"Filtered to {len(records)} records with both image and ALTO") |
|
|
|
|
|
dataset_dict = defaultdict(list) |
|
|
|
for record in records: |
|
dataset_dict['document_id'].append(record['document_id']) |
|
dataset_dict['page_number'].append(record['page_number']) |
|
dataset_dict['file_identifier'].append(record['file_identifier']) |
|
|
|
|
|
|
|
if record['has_image'] and record['image_path']: |
|
dataset_dict['image'].append(record['image_path']) |
|
else: |
|
dataset_dict['image'].append(None) |
|
|
|
dataset_dict['text'].append(record['text'] or "") |
|
dataset_dict['alto_xml'].append(record['alto_xml'] or "") |
|
dataset_dict['has_image'].append(record['has_image']) |
|
dataset_dict['has_alto'].append(record['has_alto']) |
|
dataset_dict['document_metadata'].append(record.get('document_metadata') or "") |
|
dataset_dict['has_metadata'].append(record.get('has_metadata', False)) |
|
dataset_dict['exam_type'].append(record.get('exam_type', '')) |
|
dataset_dict['exam_year'].append(record.get('exam_year', '')) |
|
dataset_dict['exam_reference'].append(record.get('exam_reference', '')) |
|
|
|
|
|
features = Features({ |
|
'document_id': Value('string'), |
|
'page_number': Value('int32'), |
|
'file_identifier': Value('string'), |
|
'image': HFImage(), |
|
'text': Value('string'), |
|
'alto_xml': Value('string'), |
|
'has_image': Value('bool'), |
|
'has_alto': Value('bool'), |
|
'document_metadata': Value('string'), |
|
'has_metadata': Value('bool'), |
|
'exam_type': Value('string'), |
|
'exam_year': Value('string'), |
|
'exam_reference': Value('string') |
|
}) |
|
|
|
dataset = Dataset.from_dict(dict(dataset_dict), features=features) |
|
|
|
return dataset |
|
|
|
|
|
def print_statistics(records: list[dict]): |
|
"""Print statistics about the processed dataset.""" |
|
total = len(records) |
|
with_both = sum(1 for r in records if r['has_image'] and r['has_alto']) |
|
image_only = sum(1 for r in records if r['has_image'] and not r['has_alto']) |
|
alto_only = sum(1 for r in records if not r['has_image'] and r['has_alto']) |
|
with_metadata = sum(1 for r in records if r.get('has_metadata', False)) |
|
|
|
print("\n=== Dataset Statistics ===") |
|
print(f"Total pages: {total:,}") |
|
print(f"Pages with both image and ALTO: {with_both:,} ({with_both/total*100:.1f}%)") |
|
print(f"Pages with image only: {image_only:,} ({image_only/total*100:.1f}%)") |
|
print(f"Pages with ALTO only: {alto_only:,} ({alto_only/total*100:.1f}%)") |
|
if with_metadata > 0: |
|
print(f"Pages with metadata: {with_metadata:,} ({with_metadata/total*100:.1f}%)") |
|
|
|
|
|
docs = defaultdict(lambda: {'pages': 0, 'complete': 0, 'has_metadata': False}) |
|
for r in records: |
|
docs[r['document_id']]['pages'] += 1 |
|
if r['has_image'] and r['has_alto']: |
|
docs[r['document_id']]['complete'] += 1 |
|
if r.get('has_metadata', False): |
|
docs[r['document_id']]['has_metadata'] = True |
|
|
|
print(f"\nTotal documents: {len(docs)}") |
|
complete_docs = sum(1 for d in docs.values() if d['pages'] == d['complete']) |
|
print(f"Documents with all pages complete: {complete_docs} " |
|
f"({complete_docs/len(docs)*100:.1f}%)") |
|
|
|
docs_with_metadata = sum(1 for d in docs.values() if d['has_metadata']) |
|
if docs_with_metadata > 0: |
|
print(f"Documents with metadata: {docs_with_metadata} " |
|
f"({docs_with_metadata/len(docs)*100:.1f}%)") |
|
|
|
|
|
years = defaultdict(int) |
|
for r in records: |
|
year = r.get('exam_year', '') |
|
if year: |
|
years[year] += 1 |
|
|
|
if years: |
|
print("\n=== Exam Years Distribution ===") |
|
for year in sorted(years.keys()): |
|
print(f"{year}: {years[year]} pages") |
|
|
|
|
|
def main(): |
|
parser = argparse.ArgumentParser(description='Convert NLS Scottish Exams dataset to Hugging Face format') |
|
parser.add_argument('input_dir', type=str, help='Path to dataset directory') |
|
parser.add_argument('output_path', type=str, help='Output path for HF dataset') |
|
parser.add_argument('--max-docs', type=int, help='Maximum number of documents to process') |
|
parser.add_argument('--include-missing', action='store_true', |
|
help='Include pages with missing image or ALTO') |
|
parser.add_argument('--format', choices=['parquet', 'json', 'csv'], |
|
default='parquet', help='Output format') |
|
parser.add_argument('--push-to-hub', action='store_true', |
|
help='Push dataset to Hugging Face Hub') |
|
parser.add_argument('--repo-id', type=str, |
|
help='Repository ID on Hugging Face Hub (e.g., username/dataset-name)') |
|
parser.add_argument('--private', action='store_true', |
|
help='Make the dataset private on Hugging Face Hub') |
|
parser.add_argument('--include-metadata', type=str, default='true', |
|
choices=['true', 'false'], |
|
help='Include metadata from inventory CSV if available (default: true)') |
|
|
|
args = parser.parse_args() |
|
|
|
|
|
if args.push_to_hub and not args.repo_id: |
|
logger.error("--repo-id is required when using --push-to-hub") |
|
sys.exit(1) |
|
|
|
input_path = Path(args.input_dir) |
|
if not input_path.exists(): |
|
logger.error(f"Input directory does not exist: {input_path}") |
|
sys.exit(1) |
|
|
|
|
|
include_metadata = args.include_metadata.lower() == 'true' |
|
|
|
|
|
logger.info(f"Processing dataset from {input_path}") |
|
records = process_dataset(input_path, args.max_docs, include_metadata) |
|
|
|
if not records: |
|
logger.error("No records found!") |
|
sys.exit(1) |
|
|
|
|
|
print_statistics(records) |
|
|
|
|
|
logger.info("Creating Hugging Face dataset...") |
|
dataset = create_huggingface_dataset(records, include_missing=args.include_missing) |
|
|
|
|
|
logger.info(f"Saving dataset to {args.output_path}") |
|
if args.format == 'parquet': |
|
dataset.to_parquet(args.output_path) |
|
elif args.format == 'json': |
|
dataset.to_json(args.output_path) |
|
elif args.format == 'csv': |
|
dataset.to_csv(args.output_path) |
|
|
|
logger.info(f"Dataset saved successfully! Total rows: {len(dataset)}") |
|
|
|
|
|
if args.push_to_hub: |
|
logger.info(f"Pushing dataset to Hugging Face Hub: {args.repo_id}") |
|
try: |
|
dataset.push_to_hub( |
|
repo_id=args.repo_id, |
|
private=args.private, |
|
commit_message=f"Add NLS Scottish Exams dataset with {len(dataset)} pages" |
|
) |
|
logger.info(f"Dataset successfully pushed to https://huggingface.co/datasets/{args.repo_id}") |
|
except Exception as e: |
|
logger.error(f"Failed to push to Hub: {e}") |
|
logger.info("Make sure you're logged in with 'huggingface-cli login'") |
|
sys.exit(1) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |