File size: 3,503 Bytes
d7970b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import pandas as pd
import os

impact_columns = [
    "Infrastructural impact", 
    "Political impact", 
    "Economic impact", 
    "Ecological impact", 
    "Agricultural impact", 
    "Human health impact"
]
groupby=["Date","Time_Period"]
gold_data = pd.read_csv("the_path_to_gold_data.csv")
gold_data.columns = [x.capitalize() for x in gold_data.columns]

def eval_row_wise_acc(data, output_file):
    data.columns = [x.capitalize() for x in data.columns]
    models = data['Model_type'].unique()

    gold_grouped = gold_data.groupby(groupby)[impact_columns].max()
    results = []

    for model in models:
        model_data = data[data['Model_type'] == model]
        grouped = model_data.groupby(groupby)[impact_columns].max()
        merged = grouped.join(gold_grouped, how='inner', lsuffix='_model', rsuffix='_gold')

        all_correct = (merged[[f"{col}_model" for col in impact_columns]].values ==
                       merged[[f"{col}_gold" for col in impact_columns]].values).all(axis=1)

        accuracy = all_correct.sum() / len(all_correct) if len(all_correct) > 0 else 0
        results.append({
            "Model_Type": model,
            "Row-Wise-Accuracy": round(accuracy, 4)
        })

    df_result = pd.DataFrame(results)
    if not os.path.isfile(output_file):
        df_result.to_csv(output_file, index=False)
    else:
        df_result.to_csv(output_file, mode='a', header=False, index=False)

def eval_metrics(data, output_file):
    data.columns = [x.capitalize() for x in data.columns]
    models = data["Model_type"].unique()
    gold_grouped = gold_data.groupby(groupby)[impact_columns].max()
    results = []

    for model in models:
        model_data = data[data["Model_type"] == model]
        grouped = model_data.groupby(groupby)[impact_columns].max()
        merged = grouped.join(gold_grouped, how="inner", lsuffix="_model", rsuffix="_gold")

        for metric_name in ["Precision", "Recall", "F1", "Accuracy"]:
            metrics = {"Model_Type": model, "Metric": metric_name}
            for col in impact_columns:
                tp = ((merged[f"{col}_model"] == 1) & (merged[f"{col}_gold"] == 1)).sum()
                tn = ((merged[f"{col}_model"] == 0) & (merged[f"{col}_gold"] == 0)).sum()
                fp = ((merged[f"{col}_model"] == 1) & (merged[f"{col}_gold"] == 0)).sum()
                fn = ((merged[f"{col}_model"] == 0) & (merged[f"{col}_gold"] == 1)).sum()

                if metric_name == "Precision":
                    value = tp / (tp + fp) if (tp + fp) > 0 else 0
                elif metric_name == "Recall":
                    value = tp / (tp + fn) if (tp + fn) > 0 else 0
                elif metric_name == "F1":
                    precision = tp / (tp + fp) if (tp + fp) > 0 else 0
                    recall = tp / (tp + fn) if (tp + fn) > 0 else 0
                    value = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
                elif metric_name == "Accuracy":
                    value = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) > 0 else 0

                metrics[col] = round(value, 4)
            results.append(metrics)

    df_result = pd.DataFrame(results)
    print(df_result)

    if not os.path.isfile(output_file):
        df_result.to_csv(output_file, index=False)
    else:
        df_result.to_csv(output_file, mode="a", header=False, index=False)

data = pd.read_csv("/content/output_gpt.csv")
eval_metrics(data, "accuracy_results.csv")