# import requests | |
from PIL import Image, ImageDraw, ImageFont | |
# from transformers import AutoProcessor, AutoModelForVision2Seq, Kosmos2ForConditionalGeneration, Kosmos2Config, Kosmos2Model, BitsAndBytesConfig, TrainingArguments | |
# from mse import mse | |
# import datasets | |
# from datasets import Features, Value, Sequence, load_dataset | |
# import pandas as pd | |
# import numpy as np | |
import torch | |
import os | |
# import glob | |
# import re | |
# import math | |
import random | |
# from jsonl2json import JsonlToJsonFormatter | |
import json | |
import csv | |
import shutil | |
# from io import BytesIO | |
# from peft import LoraConfig | |
# from trl import SFTTrainer | |
class MSEDataset(torch.utils.data.Dataset): | |
def __init__(self, data_path, images_path, split="train", shuffle=False): | |
self.json_list = [] | |
with open(data_path, 'r') as json_file: | |
self.json_list = [json.loads(jline) for jline in json_file.read().splitlines()] | |
self.json_list = self.json_list[:64860] | |
self.max_size = len(self.json_list) | |
first_split_index = int(self.max_size * 0.9) | |
second_split_index = first_split_index + int(self.max_size * 0.05) | |
if split == "train": | |
self.json_list = self.json_list[:first_split_index] | |
elif split == "test": | |
self.json_list = self.json_list[first_split_index:second_split_index] | |
elif split == "eval": | |
self.json_list = self.json_list[second_split_index:] | |
else: | |
print("Invalid Input") | |
self.max_size = len(self.json_list) | |
if shuffle: | |
random.shuffle(self.json_list) | |
self.images_path = images_path | |
self.default_prompt = "<grounding> An image of a question which says " | |
# print(len(json_list)) | |
# print(len(json_list[0]["answers"])) | |
# print(json_list[0]["answers"][0]["score"]) | |
def __getitem__(self, idx): | |
# question_id = self.json_list[idx]['id'] | |
# question_body = self.json_list[idx]['body'] | |
# prompt = self.default_prompt + question_body | |
# question_image = None | |
# question_dir = self.images_path + "/" + str(question_id) + "/" | |
# question_path = question_dir + "question_0.jpg" | |
# if os.path.exists(question_path): | |
# question_image = Image.open(question_path) | |
# for answer in self.json_list[idx]['answers']: | |
# item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()} | |
# item['labels'] = torch.tensor(self.labels[idx]) | |
return self.json_list[idx] | |
def __len__(self): | |
return self.max_size | |
def convert_jsonl_to_json(input_jsonl_file, output_json_folder): | |
# Ensure the output folder exists | |
os.makedirs(output_json_folder, exist_ok=True) | |
# Determine the output JSON filename | |
base_name = os.path.splitext(os.path.basename(input_jsonl_file))[0] | |
output_json_file = os.path.join(output_json_folder, base_name + '.json') | |
# Read the JSONL file and aggregate the data | |
data = [] | |
with open(input_jsonl_file, 'r') as jsonl_file: | |
for line_number, line in enumerate(jsonl_file, start=1): | |
line = line.strip() | |
if not line: # Skip empty lines | |
continue | |
try: | |
data.append(json.loads(line)) | |
except json.JSONDecodeError as e: | |
print(f"Error decoding JSON on line {line_number}: {e}") | |
continue | |
# Write to the JSON file | |
with open(output_json_file, 'w') as json_file: | |
json.dump(data, json_file, indent=4) | |
print(f"Converted {input_jsonl_file} to {output_json_file}") | |
if __name__=="__main__": | |
# 64860 lines converted: 90% (58374, index 0) train, 5% (3243, index 58374) val, 5% (3243, index 61617) | |
# ds = datasets.load_dataset("nurik040404/mse", features=features) | |
# jsonl = JsonlToJsonFormatter('dataset.jsonl', 'dataset.json') | |
# jsonl.to_json() | |
# df.to_json('mse_dataset.json') | |
# train, eval, test = np.split(df.sample(frac=1, random_state=42), | |
# [int(.6*len(df)), int(.8*len(df))]) | |
# train.to_json('mse_dataset_train.json') | |
# eval.to_json('mse_dataset_eval.json') | |
# test.to_json('mse_dataset_test.json') | |
# dataset_path = 'mse_dataset_test.json' | |
# mse_dataset = MSEDataset(data_path="dataset.jsonl", images_path="mse_images", split="train") | |
# print(mse_dataset[0]) | |
# print(mse_dataset[0]['answers']) | |
# print(mse_dataset[0]['answers'][:]['score']) | |
# print('Started train split') | |
# with open('train.csv', 'w', newline='') as file: | |
# writer = csv.writer(file) | |
# field = ["question_id", "question_text", "question_image", "answer_id", "answer_text", "answer_image"] | |
# writer.writerow(field) | |
# writer.writerow(["Oladele Damilola", "40", "Nigeria"]) | |
# for qas in mse_dataset: | |
# question_id = qas['id'] | |
# question_text = qas['body'] | |
# question_image = 'train_images/' + question_id + '/question_0.jpg' | |
# answers = qas['answers'] | |
# source = 'mse_images/' + question_id | |
# destination = 'train_images/' | |
# if os.path.exists('train_images/' + question_id) is False: | |
# shutil.move(source, destination) | |
# max_score = None | |
# for answer in answers: | |
# if max_score == None: | |
# max_score = answer['score'] | |
# if max_score > answer['score']: | |
# max_score = answer['score'] | |
# for answer in answers: | |
# if answer['accepted'] or answer['score'] == max_score: | |
# writer.writerow([question_id, question_image, question_text, answer['id'], answer['body'], destination + question_id + '/' + answer['id'] + '_0.jpg']) | |
print('Started train split') | |
mse_dataset = MSEDataset(data_path="dataset.jsonl", images_path="mse_images", split="train") | |
with open('train.csv', 'w', newline='') as file: | |
writer = csv.writer(file, delimiter ="█", lineterminator="\u2063") | |
field = ["question_id", "question_text", "question_image", "answer_id", "answer_text", "answer_image"] | |
writer.writerow(field) | |
# writer.writerow(["Oladele Damilola", "40", "Nigeria"]) | |
for qas in mse_dataset: | |
question_id = qas['id'] | |
question_text = qas['body'] | |
question_image = 'train_images/' + question_id + '/question_0.jpg' | |
answers = qas['answers'] | |
destination = 'train_images/' | |
source = 'mse_images/' + question_id | |
if os.path.exists('train_images/' + question_id) is False: | |
shutil.move(source, destination) | |
max_score = None | |
for answer in answers: | |
if max_score == None: | |
max_score = answer['score'] | |
if max_score > answer['score']: | |
max_score = answer['score'] | |
for answer in answers: | |
if answer['accepted'] or answer['score'] == max_score: | |
writer.writerow([question_id, question_image, question_text, answer['id'], answer['body'], destination + question_id + '/' + answer['id'] + '_0.jpg']) | |
print('Started test split') | |
mse_dataset = MSEDataset(data_path="dataset.jsonl", images_path="mse_images", split="test") | |
with open('test.csv', 'w', newline='') as file: | |
writer = csv.writer(file, delimiter ="█", lineterminator="\u2063") | |
field = ["question_id", "question_text", "question_image", "answer_id", "answer_text", "answer_image"] | |
writer.writerow(field) | |
# writer.writerow(["Oladele Damilola", "40", "Nigeria"]) | |
for qas in mse_dataset: | |
question_id = qas['id'] | |
question_text = qas['body'] | |
question_image = 'test_images/' + question_id + '/question_0.jpg' | |
answers = qas['answers'] | |
source = 'mse_images/' + question_id | |
destination = 'test_images/' | |
if os.path.exists('test_images/' + question_id) is False: | |
shutil.move(source, destination) | |
max_score = None | |
for answer in answers: | |
if max_score == None: | |
max_score = answer['score'] | |
if max_score > answer['score']: | |
max_score = answer['score'] | |
for answer in answers: | |
if answer['accepted'] or answer['score'] == max_score: | |
writer.writerow([question_id, question_image, question_text, answer['id'], answer['body'], destination + question_id + '/' + answer['id'] + '_0.jpg']) | |
print('Started val split') | |
mse_dataset = MSEDataset(data_path="dataset.jsonl", images_path="mse_images", split="eval") | |
with open('val.csv', 'w', newline='') as file: | |
writer = csv.writer(file, delimiter ="█", lineterminator="\u2063") | |
field = ["question_id", "question_text", "question_image", "answer_id", "answer_text", "answer_image"] | |
writer.writerow(field) | |
# writer.writerow(["Oladele Damilola", "40", "Nigeria"]) | |
for qas in mse_dataset: | |
question_id = qas['id'] | |
question_text = qas['body'] | |
question_image = 'val_images/' + question_id + '/question_0.jpg' | |
answers = qas['answers'] | |
source = 'mse_images/' + question_id | |
destination = 'val_images/' | |
if os.path.exists('val_images/' + question_id) is False: | |
shutil.move(source, destination) | |
max_score = None | |
for answer in answers: | |
if max_score == None: | |
max_score = answer['score'] | |
if max_score > answer['score']: | |
max_score = answer['score'] | |
for answer in answers: | |
if answer['accepted'] or answer['score'] == max_score: | |
writer.writerow([question_id, question_image, question_text, answer['id'], answer['body'], destination + question_id + '/' + answer['id'] + '_0.jpg']) | |
print('Finished generating dataset') | |
# convert_jsonl_to_json("dataset.jsonl", "dataset.json") | |
# Keys: "id", "body", "answers": "id", "body", "score", "accepted" | |
# dataset_train = load_dataset("json", data_files="mse_dataset_train.json", split=None) | |
# dataset_eval = load_dataset("json", data_files="mse_dataset.json", split=None) | |
# dataset_test = load_dataset("json", data_files="mse_dataset_test.json", split=None) | |
# print(dataset_eval.description) | |
# dataset = load_dataset("json", data_files="dataset.json", split=None) | |
# print(dataset) | |
# df = pd.read_json(dataset_path) | |
# df = df.drop(df.columns[[1, 2, 3, 5, 6, 9]], axis=1) | |
# df.to_json(dataset_path) | |
# mse_list = df.to_dict(orient='records') | |
# print(df.columns) | |
# print(df["body"]) | |
# print(df["answers"]) | |
# print(mse_list[0]) | |
# test_dataset = MSEDataset(data_path=dataset_path, images_path="mse_images/") | |
# print(len(test_dataset)) | |
# print(test_dataset[0]) | |
# ds = load_dataset('json', data_files='dataset.jsonl') | |
# dataset = load_dataset("TheFusion21/PokemonCards", split="train") | |
# Dataset({ | |
# features: ['id', 'image_url', 'caption', 'name', 'hp', 'set_name'], | |
# num_rows: 13139 | |
# }) | |
# # load image | |
# example = dataset[1] | |
# image_url = example["image_url"] | |
# response = requests.get(image_url) | |
# # Read the image from the response content | |
# image = Image.open(BytesIO(response.content)) | |
# image | |
# {'id': 'ex12-1', | |
# 'image_url': 'https://images.pokemontcg.io/ex12/1_hires.png', | |
# 'caption': "A Stage 1 Pokemon Card of type Colorless with the title Aerodactyl and 70 HP of rarity Rare Holo evolved from Mysterious Fossil from the set Legend Maker. It has the attack Power Blow with the cost Colorless, the energy cost 1 and the damage of 10+ with the description: Does 10 damage plus 10 more damage for each Energy attached to Aerodactyl. It has the attack Speed Stroke with the cost Colorless, Colorless, Colorless, the energy cost 3 and the damage of 40 with the description: During your opponent's next turn, prevent all effects, including damage, done to Aerodactyl by attacks from your opponent's Pokemon-ex. It has the ability Reactive Protection with the description: Any damage done to Aerodactyl by attacks from your opponent's Pokemon is reduced by 10 for each React Energy card attached to Aerodactyl (after applying Weakness and Resistance). It has weakness against Lightning 2. It has resistance against Fighting -30. ", | |
# 'name': 'Aerodactyl', | |
# 'hp': 70, | |
# 'set_name': 'Legend Maker'} | |
# class Kosmos2DataCollator: | |
# def __init__(self, processor): | |
# self.processor = processor | |
# def __call__(self, examples): | |
# texts = [] | |
# images = [] | |
# bboxes = [] | |
# for example in examples: | |
# texts.append(example['caption']) | |
# image_url = example["image_url"] | |
# images.append(Image.open(BytesIO(requests.get(image_url).content))) | |
# batch = self.processor(images = images, text = texts, return_tensors="pt", truncation= True, padding=True) | |
# labels = batch["input_ids"].clone() | |
# if self.processor.tokenizer.pad_token_id is not None: | |
# labels[labels == self.processor.tokenizer.pad_token_id] = -100 | |
# batch["labels"] = labels | |
# return batch | |
# data_collator = Kosmos2DataCollator(processor) |