Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
Portuguese
Tags:
legal
DOI:
Libraries:
Datasets
Dask
License:
LordWaif commited on
Commit
16b5182
·
verified ·
1 Parent(s): c3bd000

Update cleaner.py

Browse files

## Change to support <data> instead of data. For bidDatasets

Files changed (1) hide show
  1. cleaner.py +9 -10
cleaner.py CHANGED
@@ -57,7 +57,7 @@ class Cleaner:
57
  O espaço antes e depois do token é por conta do padrão que retira os espaços antes e depois do texto.
58
  """
59
  paragraphs = re.sub(r'((\s)-)+', ' - ', paragraphs)
60
- return re.sub(r'[-–]+', '-', paragraphs)
61
 
62
  @staticmethod
63
  def _remove_multiples_dots(paragraphs):
@@ -167,7 +167,7 @@ class Corretor:
167
  texts = self._tokenizaEmail(texts)
168
  texts = self._tokenizaData(texts)
169
  texts = self._tokenizaHora(texts)
170
- texts = self._tokenizaNumero(texts)
171
  texts = self._tokenizaNumeroRomano(texts)
172
  texts = self._reduzNumeros(texts)
173
  texts = self._removeHifenInicial(texts)
@@ -436,49 +436,49 @@ class Corretor:
436
  """
437
  Tokeniza URL.
438
  """
439
- return re.sub(r'(((https?:\/\/)(www\.))|(www\.)|(https?:\/\/))[-a-zA-Z0-9@:%.\+~#=]{1,256}\.[a-zA-Z@0-9()]{1,6}\b([-a-zA-Z0-9()@:%\+.~#?&\/=]*)', ' url ', paragraphs)
440
 
441
  @staticmethod
442
  def _tokenizaEmail(paragraphs):
443
  """
444
  Tokeniza email.
445
  """
446
- return re.sub(r'(\b([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,})\b)', ' mail ', paragraphs)
447
 
448
  @staticmethod
449
  def _tokenizaData(paragraphs):
450
  """
451
  Tokeniza data.
452
  """
453
- return re.sub(r'(\b([0-3][0-9]\/[0-1][0-9]\/(([0-9]{2})|([0-2][0-9]{3})))\b)', ' date ', paragraphs)
454
 
455
  @staticmethod
456
  def _tokenizaHora(paragraphs):
457
  """
458
  Tokeniza hora.
459
  """
460
- return re.sub(r'(\b(([0-1][0-9])|(2[0-3]))(\:|h)([0-5][0-9])?\b)', ' hour ', paragraphs)
461
 
462
  @staticmethod
463
  def _tokenizaNumero(paragraphs):
464
  """
465
  Tokeniza número.
466
  """
467
- return re.sub(r'([0-9])+', ' number ', paragraphs)
468
 
469
  @staticmethod
470
  def _tokenizaNumeroRomano(paragraphs):
471
  """
472
  Tokeniza número romano.
473
  """
474
- return re.sub(r"(\s|\.|\,|\;|\:|^)(?=[XVIΙ])(XC|XL|L?X{0,3})([IΙ]X|[IΙ]V|V?[IΙ]{0,3})(\s|\.|\,|\;|\:|$)", ' number ', paragraphs, flags=re.IGNORECASE)
475
 
476
  @staticmethod
477
  def _reduzNumeros(paragraphs):
478
  """
479
  Reduz números.
480
  """
481
- return re.sub(r'(([\.\\\/\;\:\s])*(number([\-–\.\\\/\;\:\,\s])*)+)', ' number ', paragraphs)
482
 
483
  @staticmethod
484
  def _removeHifenInicial(paragraphs):
@@ -583,7 +583,6 @@ class Remover:
583
  return ''
584
  else:
585
  return paragraphs
586
-
587
 
588
  def executaLimpeza(dataframe: pd.DataFrame, column: str, cased, accents) -> pd.DataFrame:
589
  """
 
57
  O espaço antes e depois do token é por conta do padrão que retira os espaços antes e depois do texto.
58
  """
59
  paragraphs = re.sub(r'((\s)-)+', ' - ', paragraphs)
60
+ return re.sub(r'(([-]){2,})', '-', paragraphs)
61
 
62
  @staticmethod
63
  def _remove_multiples_dots(paragraphs):
 
167
  texts = self._tokenizaEmail(texts)
168
  texts = self._tokenizaData(texts)
169
  texts = self._tokenizaHora(texts)
170
+ # texts = self._tokenizaNumero(texts)
171
  texts = self._tokenizaNumeroRomano(texts)
172
  texts = self._reduzNumeros(texts)
173
  texts = self._removeHifenInicial(texts)
 
436
  """
437
  Tokeniza URL.
438
  """
439
+ return re.sub(r'(((https?:\/\/)(www\.))|(www\.)|(https?:\/\/))[-a-zA-Z0-9@:%.\+~#=]{1,256}\.[a-zA-Z@0-9()]{1,6}\b([-a-zA-Z0-9()@:%\+.~#?&\/=]*)', '<URL>', paragraphs)
440
 
441
  @staticmethod
442
  def _tokenizaEmail(paragraphs):
443
  """
444
  Tokeniza email.
445
  """
446
+ return re.sub(r'(\b([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,})\b)', '<EMAIL>', paragraphs)
447
 
448
  @staticmethod
449
  def _tokenizaData(paragraphs):
450
  """
451
  Tokeniza data.
452
  """
453
+ return re.sub(r'(\b([0-3][0-9]\/[0-1][0-9]\/(([0-9]{2})|([0-2][0-9]{3})))\b)', '<DATA>', paragraphs)
454
 
455
  @staticmethod
456
  def _tokenizaHora(paragraphs):
457
  """
458
  Tokeniza hora.
459
  """
460
+ return re.sub(r'(\b(([0-1][0-9])|(2[0-3]))(\:|h)([0-5][0-9])?\b)', '<HORA>', paragraphs)
461
 
462
  @staticmethod
463
  def _tokenizaNumero(paragraphs):
464
  """
465
  Tokeniza número.
466
  """
467
+ return re.sub(r'(\b([0-9]+)\b)', '<NUMERO>', paragraphs)
468
 
469
  @staticmethod
470
  def _tokenizaNumeroRomano(paragraphs):
471
  """
472
  Tokeniza número romano.
473
  """
474
+ return re.sub(r"(\s|\.|\,|\;|\:|^)(?=[XVIΙ])(XC|XL|L?X{0,3})([IΙ]X|[IΙ]V|V?[IΙ]{0,3})(\s|\.|\,|\;|\:|$)", '<NUMERO>', paragraphs, flags=re.IGNORECASE)
475
 
476
  @staticmethod
477
  def _reduzNumeros(paragraphs):
478
  """
479
  Reduz números.
480
  """
481
+ return re.sub(r'(([\.\\\/\;\:\s])*(<NUMERO>([\-–\.\\\/\;\:\,\s])*)+)', ' <NUMERO> ', paragraphs)
482
 
483
  @staticmethod
484
  def _removeHifenInicial(paragraphs):
 
583
  return ''
584
  else:
585
  return paragraphs
 
586
 
587
  def executaLimpeza(dataframe: pd.DataFrame, column: str, cased, accents) -> pd.DataFrame:
588
  """