| |
| """using_dataset_hugginface.ipynb |
| |
| Automatically generated by Colaboratory. |
| |
| Original file is located at |
| https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT |
| """ |
|
|
| """**Hugginface loggin for push on Hub**""" |
| |
| |
| |
| |
| |
| |
|
|
| import os |
| import time |
| import math |
| from huggingface_hub import login |
| from datasets import load_dataset, concatenate_datasets |
| from functools import reduce |
| from pathlib import Path |
| import pandas as pd |
| import mysql.connector |
|
|
| |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
| HF_TOKEN = '' |
| DATASET_TO_LOAD = 'chizhikchi/CARES' |
| DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm' |
|
|
| |
| login(token = HF_TOKEN) |
|
|
| dataset_CODING = load_dataset(DATASET_TO_LOAD) |
| dataset_CODING |
| royalListOfCode = {} |
| issues_path = 'dataset' |
| DATASET_SOURCE_ID = '5' |
|
|
| tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium") |
|
|
| |
| path = Path(__file__).parent.absolute() |
|
|
| ''' |
| Bibliografy: |
| https://www.w3schools.com/python/python_mysql_getstarted.asp |
| https://www.w3schools.com/python/python_mysql_select.as |
| |
| ''' |
| mydb = mysql.connector.connect( |
| host="localhost", |
| user="root", |
| password="", |
| database="icd10_dx_hackatonnlp" |
| ) |
|
|
|
|
|
|
| def getCodeDescription(labels_of_type): |
| """ |
| Search description associated with some code |
| in royalListOfCode |
| |
| """ |
| icd10CodeDict = {} |
| mycursor = mydb.cursor() |
| codeIcd10 = '' |
| |
| for iValue in labels_of_type: |
| codeIcd10 = iValue |
|
|
| if codeIcd10.find('.') == -1: |
| codeIcd10 += '.0' |
|
|
| mycursor.execute(f"SELECT dx_code, long_desc FROM `icd10_dx_order_code` WHERE dx_code = '{codeIcd10}' LIMIT 1;") |
|
|
| myresult = mycursor.fetchall() |
|
|
| for x in myresult: |
| code, description = x |
| icd10CodeDict[code] = description |
|
|
| return icd10CodeDict |
|
|
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
| cantemistDstDict = { |
| 'raw_text': '', |
| 'topic': '', |
| 'speciallity': '', |
| 'raw_text_type': 'clinic_case', |
| 'topic_type': 'medical_diagnostic', |
| 'source': DATASET_SOURCE_ID, |
| 'country': 'es', |
| 'document_id': '' |
| } |
|
|
| totalOfTokens = 0 |
| corpusToLoad = [] |
| countCopySeveralDocument = 0 |
| counteOriginalDocument = 0 |
|
|
| for iDataset in dataset_CODING: |
| if iDataset == 'test': |
| for item in dataset_CODING[iDataset]: |
| |
| idFile = str(item['iddoc']) |
| text = item['full_text'] |
| labels_of_type = item['icd10'] |
|
|
| |
| diagnostyc_types = getCodeDescription( labels_of_type) |
| counteOriginalDocument += 1 |
| classFileSize = len(diagnostyc_types) |
|
|
| |
| |
| if classFileSize > 1: |
| countCopySeveralDocument += classFileSize - 1 |
|
|
| listOfTokens = tokenizer.tokenize(text) |
| currentSizeOfTokens = len(listOfTokens) |
| totalOfTokens += currentSizeOfTokens |
| |
| for key, iTypes in diagnostyc_types.items(): |
| |
| newCorpusRow = cantemistDstDict.copy() |
|
|
| |
| |
| newCorpusRow['raw_text'] = text |
| newCorpusRow['document_id'] = idFile |
| newCorpusRow['topic'] = iTypes |
| corpusToLoad.append(newCorpusRow) |
| |
| df = pd.DataFrame.from_records(corpusToLoad) |
|
|
| if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"): |
| os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") |
|
|
|
|
| df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True) |
| print( |
| f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl" |
| ) |
|
|
| print(' On dataset there are as document ', counteOriginalDocument) |
| print(' On dataset there are as copy document ', countCopySeveralDocument) |
| print(' On dataset there are as size of Tokens ', totalOfTokens) |
| file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") |
| size = file.stat().st_size |
| print ('File size on Kilobytes (kB)', size >> 10) |
| print ('File size on Megabytes (MB)', size >> 20 ) |
| print ('File size on Gigabytes (GB)', size >> 30 ) |
|
|
| |
| |
|
|
| local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train") |
|
|
| print (' Local Dataset ==> ') |
| print(local_spanish_dataset) |
| try: |
| spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train") |
| spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset]) |
| except Exception: |
| spanish_dataset = local_spanish_dataset |
|
|
| spanish_dataset.push_to_hub(DATASET_TO_UPDATE) |
|
|
| print(spanish_dataset) |
|
|