| import jsonlines | |
| # import ollama | |
| #ollama run Hudson/llemma:7b | |
| #deepeval set-ollama Hudson/llemma:7b | |
| if __name__=="__main__": | |
| question_count = 0 | |
| answer_count = 0 | |
| avg_a_per_q = 0.0 | |
| with jsonlines.open("mse_text_img_QA_dataset.jsonl", mode='r') as reader: | |
| count = 0 | |
| for row in reader: | |
| question_count += 1 | |
| for i in range(len(row["answers"])): | |
| answer_count += 1 | |
| avg_a_per_q = (answer_count * 1.0) / question_count | |
| print("MSE Dataset:") | |
| print("Number of Questions = ", question_count) | |
| print("Number of Answers = ", answer_count) | |
| print("Average number of Answers per Question = ", avg_a_per_q) | |