Upload using_dataset.py
Browse files
medlineplus_spanish/using_dataset.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""using_dataset_hugginface.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colaboratory.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
"""**Hugginface loggin for push on Hub**"""
|
| 11 |
+
###
|
| 12 |
+
#
|
| 13 |
+
# Used bibliografy:
|
| 14 |
+
# https://huggingface.co/learn/nlp-course/chapter5/5
|
| 15 |
+
#
|
| 16 |
+
###
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import time
|
| 20 |
+
import math
|
| 21 |
+
from huggingface_hub import login
|
| 22 |
+
from datasets import load_dataset, concatenate_datasets
|
| 23 |
+
from functools import reduce
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
import pandas as pd
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Load model directly
|
| 30 |
+
from transformers import AutoTokenizer
|
| 31 |
+
|
| 32 |
+
HF_TOKEN = ''
|
| 33 |
+
DATASET_TO_LOAD = 'spanish_health_output.json'
|
| 34 |
+
DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'
|
| 35 |
+
BAD_CHAIN = [
|
| 36 |
+
'es como usted puede verificarlo',
|
| 37 |
+
'Un sitio oficial del Gobierno de Estados Unidos',
|
| 38 |
+
'lo en sitios web oficiales y seguros.',
|
| 39 |
+
'forma segura a un sitio web .gov. Comparta informaci',
|
| 40 |
+
'Gobierno de Estados Unidos.',
|
| 41 |
+
'pertenece a una organizaci',
|
| 42 |
+
'(\r\n \n ) o ',
|
| 43 |
+
'Un sitio\r\n'
|
| 44 |
+
]
|
| 45 |
+
#Loggin to Huggin Face
|
| 46 |
+
login(token = HF_TOKEN)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
royalListOfCode = {}
|
| 51 |
+
issues_path = 'dataset'
|
| 52 |
+
tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
|
| 53 |
+
DATASET_SOURCE_ID = '2'
|
| 54 |
+
#Read current path
|
| 55 |
+
path = Path(__file__).parent.absolute()
|
| 56 |
+
|
| 57 |
+
dataset_CODING = pd.read_json(str(path) + os.sep + DATASET_TO_LOAD, encoding="utf8")
|
| 58 |
+
|
| 59 |
+
# raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.
|
| 60 |
+
|
| 61 |
+
# topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)
|
| 62 |
+
|
| 63 |
+
# speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)
|
| 64 |
+
|
| 65 |
+
# raw_text_type: (puede ser caso clínico, open_text, question)
|
| 66 |
+
|
| 67 |
+
# topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)
|
| 68 |
+
|
| 69 |
+
# source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.
|
| 70 |
+
|
| 71 |
+
# country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
|
| 72 |
+
cantemistDstDict = {
|
| 73 |
+
'raw_text': '',
|
| 74 |
+
'topic': '',
|
| 75 |
+
'speciallity': '',
|
| 76 |
+
'raw_text_type': 'open_text',
|
| 77 |
+
'topic_type': 'other',
|
| 78 |
+
'source': DATASET_SOURCE_ID,
|
| 79 |
+
'country': 'es',
|
| 80 |
+
'document_id': ''
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
def getExtraTexInformation(item, data_top_columname):
|
| 84 |
+
optionalTag = ["Healthtopics Name", "titles", "subtitles", "paragraphs"]
|
| 85 |
+
text = ""
|
| 86 |
+
|
| 87 |
+
for key in data_top_columname:
|
| 88 |
+
if key not in optionalTag:
|
| 89 |
+
if not np.isnan(item[key]) and len(item[key]) > 1:
|
| 90 |
+
text += str(item[key]) + '\n'
|
| 91 |
+
|
| 92 |
+
return text
|
| 93 |
+
|
| 94 |
+
totalOfTokens = 0
|
| 95 |
+
corpusToLoad = []
|
| 96 |
+
countCopySeveralDocument = 0
|
| 97 |
+
counteOriginalDocument = 0
|
| 98 |
+
data_top_columname = dataset_CODING.head()
|
| 99 |
+
|
| 100 |
+
def verifyRepetelyChain(paragraph):
|
| 101 |
+
return '' if len([ x for x in BAD_CHAIN if paragraph.find(x) != -1]) > 0 else paragraph
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
for index, item in dataset_CODING.iterrows():
|
| 105 |
+
|
| 106 |
+
if len(item['paragraphs']) > 1:
|
| 107 |
+
text = reduce(lambda a, b: verifyRepetelyChain(a) + "\n "+ verifyRepetelyChain(b), item['paragraphs'], "")
|
| 108 |
+
else:
|
| 109 |
+
text = getExtraTexInformation(item, data_top_columname)
|
| 110 |
+
#Find topic or diagnosti clasification about the text
|
| 111 |
+
|
| 112 |
+
counteOriginalDocument += 1
|
| 113 |
+
newCorpusRow = cantemistDstDict.copy()
|
| 114 |
+
|
| 115 |
+
#print('Current text has ', currentSizeOfTokens)
|
| 116 |
+
#print('Total of tokens is ', totalOfTokens)
|
| 117 |
+
|
| 118 |
+
listOfTokens = []
|
| 119 |
+
try:
|
| 120 |
+
listOfTokens = tokenizer.tokenize(text)
|
| 121 |
+
except Exception:
|
| 122 |
+
raise Exception('Error')
|
| 123 |
+
|
| 124 |
+
currentSizeOfTokens = len(listOfTokens)
|
| 125 |
+
totalOfTokens += currentSizeOfTokens
|
| 126 |
+
|
| 127 |
+
newCorpusRow['topic'] = item['Healthtopics Name'] if item['Healthtopics Name'] else reduce(lambda a, b: a + "\n "+ b, item['titles'], "")
|
| 128 |
+
newCorpusRow['raw_text'] = text
|
| 129 |
+
idFile = counteOriginalDocument
|
| 130 |
+
newCorpusRow['document_id'] = str(idFile)
|
| 131 |
+
corpusToLoad.append(newCorpusRow)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
df = pd.DataFrame.from_records(corpusToLoad)
|
| 135 |
+
|
| 136 |
+
if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
|
| 137 |
+
os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")
|
| 138 |
+
|
| 139 |
+
df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
|
| 140 |
+
print(
|
| 141 |
+
f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
print(' On dataset there are as document ', counteOriginalDocument)
|
| 145 |
+
print(' On dataset there are as copy document ', countCopySeveralDocument)
|
| 146 |
+
print(' On dataset there are as size of Tokens ', totalOfTokens)
|
| 147 |
+
file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") # or Path('./doc.txt')
|
| 148 |
+
size = file.stat().st_size
|
| 149 |
+
print ('File size on Kilobytes (kB)', size >> 10) # 5242880 kilobytes (kB)
|
| 150 |
+
print ('File size on Megabytes (MB)', size >> 20 ) # 5120 megabytes (MB)
|
| 151 |
+
print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)
|
| 152 |
+
|
| 153 |
+
#Once the issues are downloaded we can load them locally using our
|
| 154 |
+
local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")
|
| 155 |
+
|
| 156 |
+
##Update local dataset with cloud dataset
|
| 157 |
+
try:
|
| 158 |
+
spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
|
| 159 |
+
new_spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
|
| 160 |
+
except Exception:
|
| 161 |
+
print ('<== Exception ==> ')
|
| 162 |
+
raise Exception
|
| 163 |
+
#new_spanish_dataset = local_spanish_dataset
|
| 164 |
+
|
| 165 |
+
new_spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
|
| 166 |
+
|
| 167 |
+
print(new_spanish_dataset)
|
| 168 |
+
|
| 169 |
+
# Augmenting the dataset
|
| 170 |
+
|
| 171 |
+
#Importan if exist element on DATASET_TO_UPDATE we must to update element
|
| 172 |
+
# in list, and review if the are repeted elements
|
| 173 |
+
|
| 174 |
+
#spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
|
| 175 |
+
|