Datasets:

ArXiv:
KIE-HVQA / eval.py
yuti201's picture
add license
ae624b5
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Levenshtein import distance as levenshtein_distance
import json
import argparse
def calculate_metrics(pred, truth):
pred_clean = pred.replace(" ", "")
truth_clean = truth.replace(" ", "")
correct_chars = sum(c1 == c2 for c1, c2 in zip(pred_clean, truth_clean))
char_acc = correct_chars / len(truth_clean) if len(truth_clean) > 0 else 0
edit_dist = levenshtein_distance(pred_clean, truth_clean)
max_len = max(len(pred_clean), len(truth_clean))
similarity = 1 - (edit_dist / max_len) if max_len > 0 else 1
return {
"char_accuracy": char_acc,
"edit_distance": edit_dist,
"similarity": similarity
}
def compare_three_values(all_samples):
clear_stats = {"total_acc": 0, "total_sim": 0, "count": 0}
notclear_stats = {"total_acc": 0, "total_sim": 0, "count": 0}
final_stats = {"total_acc": 0, "total_sim": 0, "count": 0}
for sample in all_samples:
clear_metrics = calculate_metrics(sample[0]["clear Char-level OCR"], sample[1]["clear Char-level OCR"])
clear_stats["total_acc"] += clear_metrics["char_accuracy"]
clear_stats["total_sim"] += clear_metrics["similarity"]
clear_stats["count"] += 1
notclear_metrics = calculate_metrics(sample[0]["not clear enough Char-level OCR"], sample[1]["not clear enough Char-level OCR"])
notclear_stats["total_acc"] += notclear_metrics["char_accuracy"]
notclear_stats["total_sim"] += notclear_metrics["similarity"]
notclear_stats["count"] += 1
final_metrics = calculate_metrics(sample[0]["Final OCR"],sample[1]["Final OCR"])
final_stats["total_acc"] += final_metrics["char_accuracy"]
final_stats["total_sim"] += final_metrics["similarity"]
final_stats["count"] += 1
print({
"Clear": {
"avg_accuracy": clear_stats["total_acc"] / clear_stats["count"],
"avg_similarity": clear_stats["total_sim"] / clear_stats["count"]
},
"NotClear": {
"avg_accuracy": notclear_stats["total_acc"] / notclear_stats["count"],
"avg_similarity": notclear_stats["total_sim"] / notclear_stats["count"]
},
"Final": {
"avg_accuracy": final_stats["total_acc"] / final_stats["count"],
"avg_similarity": final_stats["total_sim"] / final_stats["count"]
}
})
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str)
args = parser.parse_args()
return args
def main():
args = parse_arguments()
with open(args.input_file, 'r') as f_in:
now = []
for line in f_in:
data = json.loads(line.strip())
text = data['answer']
start_tag = "<answer>"
end_tag = "</answer>"
start_index = text.find(start_tag) + len(start_tag)
end_index = text.find(end_tag)
answer_content = text[start_index:end_index].strip()
parsed_json_gt = json.loads(answer_content)
q = data['response']
start_tag = "<answer>"
end_tag = "</answer>"
start_index = q.find(start_tag) + len(start_tag)
end_index = q.find(end_tag)
answer_content = q[start_index:end_index].strip()
answer_content = q
answer_content_json = answer_content.replace("'", '"').replace("```json","").replace("```","").replace("\n","")
parsed_json_pred = json.loads(answer_content_json)
now.append([parsed_json_gt,parsed_json_pred])
compare_three_values(now)
if __name__ == "__main__":
main()