|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from Levenshtein import distance as levenshtein_distance |
|
import json |
|
|
|
import argparse |
|
def calculate_metrics(pred, truth): |
|
|
|
pred_clean = pred.replace(" ", "") |
|
truth_clean = truth.replace(" ", "") |
|
|
|
correct_chars = sum(c1 == c2 for c1, c2 in zip(pred_clean, truth_clean)) |
|
char_acc = correct_chars / len(truth_clean) if len(truth_clean) > 0 else 0 |
|
|
|
edit_dist = levenshtein_distance(pred_clean, truth_clean) |
|
max_len = max(len(pred_clean), len(truth_clean)) |
|
similarity = 1 - (edit_dist / max_len) if max_len > 0 else 1 |
|
|
|
return { |
|
"char_accuracy": char_acc, |
|
"edit_distance": edit_dist, |
|
"similarity": similarity |
|
} |
|
def compare_three_values(all_samples): |
|
clear_stats = {"total_acc": 0, "total_sim": 0, "count": 0} |
|
notclear_stats = {"total_acc": 0, "total_sim": 0, "count": 0} |
|
final_stats = {"total_acc": 0, "total_sim": 0, "count": 0} |
|
|
|
for sample in all_samples: |
|
clear_metrics = calculate_metrics(sample[0]["clear Char-level OCR"], sample[1]["clear Char-level OCR"]) |
|
clear_stats["total_acc"] += clear_metrics["char_accuracy"] |
|
clear_stats["total_sim"] += clear_metrics["similarity"] |
|
clear_stats["count"] += 1 |
|
|
|
notclear_metrics = calculate_metrics(sample[0]["not clear enough Char-level OCR"], sample[1]["not clear enough Char-level OCR"]) |
|
notclear_stats["total_acc"] += notclear_metrics["char_accuracy"] |
|
notclear_stats["total_sim"] += notclear_metrics["similarity"] |
|
notclear_stats["count"] += 1 |
|
|
|
final_metrics = calculate_metrics(sample[0]["Final OCR"],sample[1]["Final OCR"]) |
|
final_stats["total_acc"] += final_metrics["char_accuracy"] |
|
final_stats["total_sim"] += final_metrics["similarity"] |
|
final_stats["count"] += 1 |
|
print({ |
|
"Clear": { |
|
"avg_accuracy": clear_stats["total_acc"] / clear_stats["count"], |
|
"avg_similarity": clear_stats["total_sim"] / clear_stats["count"] |
|
}, |
|
"NotClear": { |
|
"avg_accuracy": notclear_stats["total_acc"] / notclear_stats["count"], |
|
"avg_similarity": notclear_stats["total_sim"] / notclear_stats["count"] |
|
}, |
|
"Final": { |
|
"avg_accuracy": final_stats["total_acc"] / final_stats["count"], |
|
"avg_similarity": final_stats["total_sim"] / final_stats["count"] |
|
} |
|
}) |
|
def parse_arguments(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--input_file", type=str) |
|
|
|
args = parser.parse_args() |
|
|
|
return args |
|
def main(): |
|
args = parse_arguments() |
|
|
|
with open(args.input_file, 'r') as f_in: |
|
now = [] |
|
for line in f_in: |
|
data = json.loads(line.strip()) |
|
text = data['answer'] |
|
start_tag = "<answer>" |
|
end_tag = "</answer>" |
|
start_index = text.find(start_tag) + len(start_tag) |
|
end_index = text.find(end_tag) |
|
answer_content = text[start_index:end_index].strip() |
|
parsed_json_gt = json.loads(answer_content) |
|
q = data['response'] |
|
|
|
start_tag = "<answer>" |
|
end_tag = "</answer>" |
|
start_index = q.find(start_tag) + len(start_tag) |
|
end_index = q.find(end_tag) |
|
answer_content = q[start_index:end_index].strip() |
|
answer_content = q |
|
|
|
answer_content_json = answer_content.replace("'", '"').replace("```json","").replace("```","").replace("\n","") |
|
|
|
parsed_json_pred = json.loads(answer_content_json) |
|
now.append([parsed_json_gt,parsed_json_pred]) |
|
compare_three_values(now) |
|
|
|
if __name__ == "__main__": |
|
main() |