inoid commited on
Commit
5eef998
1 Parent(s): 75663d4

Upload 2 files

Browse files
chileancaselist/process_dataset.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import os
3
+ import re
4
+
5
+ from pathlib import Path
6
+ from zipfile import ZipFile
7
+
8
+ FILE_PATH = "ChilieanCaseList.zip"
9
+
10
+ path = Path(__file__).parent.absolute()
11
+
12
+ from urllib import request
13
+ URL = 'https://zenodo.org/records/7555181/files/cwlc.zip?download=1'
14
+
15
+ FILE_ZIP = str(path) + os.sep + "ChilieanCaseList.zip"
16
+ FILE_ZIP_EXTRAC = str(path) + os.sep + "ChilieanCaseList"
17
+
18
+ if not os.path.exists( FILE_ZIP ):
19
+ response = request.urlretrieve(URL, str(path) + os.sep + "ChilieanCaseList.zip")
20
+
21
+ # loading the temp.zip and creating a zip object
22
+ if os.path.exists( FILE_ZIP_EXTRAC ):
23
+ os.remove(FILE_ZIP_EXTRAC)
24
+ os.makedirs(FILE_ZIP_EXTRAC)
25
+
26
+ with ZipFile(FILE_ZIP, 'r') as zObject:
27
+
28
+ # Extracting specific file in the zip
29
+ # into a specific location.
30
+ zObject.extractall( FILE_ZIP_EXTRAC)
31
+ zObject.close()
32
+
33
+ #Open Zip
34
+
35
+ # with open( str(path) + os.sep + 'example.txt', encoding='utf8') as file:
36
+ # """
37
+ # # Build a dictionary with ICD-O-3 associated with
38
+ # # healtcare problems
39
+ # """
40
+ # linesInFile = file.readlines()
41
+
42
+ # for index, iLine in enumerate(linesInFile):
43
+ # print([linesInFile[index]]) if len(linesInFile[index]) > 1 else print('**************') if linesInFile[index] == '\n' else print ('******* ERROR ********')
44
+
45
+
46
+ # if re.match('^Las dilataciones bronquiales',iLine):
47
+ # break
48
+
49
+
50
+ # code = listOfData[0]
51
+ # description = reduce(lambda a, b: a + " "+ b, listOfData[1:2], "")
52
+ # royalListOfCode[code.strip()] = description.strip()
chileancaselist/using_dataset_hugginface.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """using_dataset_hugginface.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
8
+ """
9
+
10
+ """**Hugginface loggin for push on Hub**"""
11
+ ###
12
+ #
13
+ # Used bibliografy:
14
+ # https://huggingface.co/learn/nlp-course/chapter5/5
15
+ #
16
+ ###
17
+
18
+ import os
19
+ import time
20
+ import math
21
+ from huggingface_hub import login
22
+ from datasets import load_dataset, concatenate_datasets
23
+ from functools import reduce
24
+ from pathlib import Path
25
+ import pandas as pd
26
+ import pathlib
27
+ # Load model directly
28
+ from transformers import AutoTokenizer, AutoModelForCausalLM
29
+
30
+ HF_TOKEN = ''
31
+ DATASET_TO_LOAD = 'bigbio/distemist'
32
+ DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'
33
+ DATASET_SOURCE_ID = '10'
34
+ BASE_DIR = "ChilieanCaseList"
35
+
36
+ #Loggin to Huggin Face
37
+ login(token = HF_TOKEN)
38
+
39
+ dataset_CODING = load_dataset(DATASET_TO_LOAD)
40
+ royalListOfCode = {}
41
+ issues_path = 'dataset'
42
+ tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
43
+
44
+ #Read current path
45
+ path = Path(__file__).parent.absolute()
46
+ MAIN_FILE_ADRESS = str(path) + os.sep + BASE_DIR
47
+ #print ( os.listdir(str(path) + os.sep + BASE_DIR))
48
+
49
+ files = [ str(path) + os.sep + BASE_DIR + os.sep + f for f in os.listdir(MAIN_FILE_ADRESS) if os.path.isfile(str(path) + os.sep + BASE_DIR + os.sep + f) and pathlib.Path(MAIN_FILE_ADRESS + os.sep + f).suffix == ".txt" ]
50
+
51
+ #print (files)
52
+ for iFile in files:
53
+ with open( iFile,encoding='utf8') as file:
54
+ linesInFile = file.readlines()
55
+ text = reduce(lambda a, b: a + " "+ b, linesInFile, "")
56
+
57
+ #print (dataset_CODING)
58
+
59
+ # with open( str(path) + os.sep + 'ICD-O-3_valid-codes.txt',encoding='utf8') as file:
60
+ # """
61
+ # # Build a dictionary with ICD-O-3 associated with
62
+ # # healtcare problems
63
+ # """
64
+ # linesInFile = file.readlines()
65
+ # for iLine in linesInFile:
66
+ # listOfData = iLine.split('\t')
67
+
68
+ # code = listOfData[0]
69
+ # description = reduce(lambda a, b: a + " "+ b, listOfData[1:2], "")
70
+ # royalListOfCode[code.strip()] = description.strip()
71
+
72
+
73
+ # def getCodeDescription(labels_of_type, royalListOfCode):
74
+ # """
75
+ # Search description associated with some code
76
+ # in royalListOfCode
77
+
78
+ # """
79
+ # classification = []
80
+
81
+ # for iValue in labels_of_type:
82
+ # if iValue in royalListOfCode.keys():
83
+ # classification.append(royalListOfCode[iValue])
84
+ # return classification
85
+
86
+
87
+ # # raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.
88
+
89
+ # # topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)
90
+
91
+ # # speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)
92
+
93
+ # # raw_text_type: (puede ser caso clínico, open_text, question)
94
+
95
+ # # topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)
96
+
97
+ # # source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.
98
+
99
+ # # country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
100
+ cantemistDstDict = {
101
+ 'raw_text': '',
102
+ 'topic': '',
103
+ 'speciallity': '',
104
+ 'raw_text_type': 'clinic_case',
105
+ 'topic_type': '',
106
+ 'source': DATASET_SOURCE_ID,
107
+ 'country': 'es',
108
+ 'document_id': ''
109
+ }
110
+
111
+ totalOfTokens = 0
112
+ corpusToLoad = []
113
+ countCopySeveralDocument = 0
114
+ counteOriginalDocument = 0
115
+
116
+ #print (dataset_CODING['train'][5]['entities'])
117
+
118
+ for iFile in files:
119
+ with open( iFile,encoding='utf8') as file:
120
+ linesInFile = file.readlines()
121
+ text = reduce(lambda a, b: a + " "+ b, linesInFile, "")
122
+ #print ("Element in dataset")
123
+
124
+ #Find topic or diagnosti clasification about the text
125
+ counteOriginalDocument += 1
126
+
127
+ listOfTokens = tokenizer.tokenize(text)
128
+ currentSizeOfTokens = len(listOfTokens)
129
+ totalOfTokens += currentSizeOfTokens
130
+ newCorpusRow = cantemistDstDict.copy()
131
+
132
+
133
+ newCorpusRow['raw_text'] = text
134
+ corpusToLoad.append(newCorpusRow)
135
+
136
+
137
+ df = pd.DataFrame.from_records(corpusToLoad)
138
+
139
+ if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
140
+ os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")
141
+
142
+ df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
143
+ print(
144
+ f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
145
+ )
146
+
147
+ print(' On dataset there are as document ', counteOriginalDocument)
148
+ print(' On dataset there are as copy document ', countCopySeveralDocument)
149
+ print(' On dataset there are as size of Tokens ', totalOfTokens)
150
+ file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") # or Path('./doc.txt')
151
+ size = file.stat().st_size
152
+ print ('File size on Kilobytes (kB)', size >> 10) # 5242880 kilobytes (kB)
153
+ print ('File size on Megabytes (MB)', size >> 20 ) # 5120 megabytes (MB)
154
+ print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)
155
+
156
+ #Once the issues are downloaded we can load them locally using our
157
+ local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")
158
+
159
+
160
+ ##Update local dataset with cloud dataset
161
+ try:
162
+ spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
163
+ print("=== Before ====")
164
+ print(spanish_dataset)
165
+ spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
166
+ except Exception:
167
+ spanish_dataset = local_spanish_dataset
168
+
169
+ spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
170
+ print("=== After ====")
171
+ print(spanish_dataset)
172
+
173
+ # Augmenting the dataset
174
+
175
+ #Importan if exist element on DATASET_TO_UPDATE we must to update element
176
+ # in list, and review if the are repeted elements
177
+
178
+
179
+