fercho-05 commited on
Commit
c652fe9
·
1 Parent(s): 8ba4893
Files changed (4) hide show
  1. orderIA.py +3 -3
  2. special_tokens_map.json +8 -0
  3. tokenizer_config.json +8 -0
  4. vocab.txt +6 -0
orderIA.py CHANGED
@@ -4,7 +4,7 @@ from datasets import Dataset
4
  from transformers import BertTokenizerFast
5
  # Carga del tokenizador
6
 
7
- tokenizer = BertTokenizerFast.from_pretrained('Sebastian2903/SMARTORDERIA')
8
  # Cargar el dataset
9
  data = [
10
  {"text": "¿Qué hamburguesas tienen?", "label": 0},
@@ -17,7 +17,7 @@ df = pd.DataFrame(data)
17
  dataset = Dataset.from_pandas(df)
18
 
19
  # Tokenizar los datos
20
- #tokenizer = AutoTokenizer.from_pretrained("bert-base-multilingual-cased")
21
 
22
  def preprocess_function(examples):
23
  return tokenizer(examples['text'], truncation=True, padding=True)
@@ -30,7 +30,7 @@ train_dataset = train_test_split['train']
30
  eval_dataset = train_test_split['test']
31
 
32
  # Configurar el modelo
33
- model = AutoModelForSequenceClassification.from_pretrained('Sebastian2903/SMARTORDERIA', num_labels=4)
34
 
35
  # Configurar el entrenador
36
  training_args = TrainingArguments(
 
4
  from transformers import BertTokenizerFast
5
  # Carga del tokenizador
6
 
7
+ #tokenizer = BertTokenizerFast.from_pretrained('Sebastian2903/SMARTORDERIA')
8
  # Cargar el dataset
9
  data = [
10
  {"text": "¿Qué hamburguesas tienen?", "label": 0},
 
17
  dataset = Dataset.from_pandas(df)
18
 
19
  # Tokenizar los datos
20
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-multilingual-cased")
21
 
22
  def preprocess_function(examples):
23
  return tokenizer(examples['text'], truncation=True, padding=True)
 
30
  eval_dataset = train_test_split['test']
31
 
32
  # Configurar el modelo
33
+ model = AutoModelForSequenceClassification.from_pretrained("bert-base-multilingual-cased", num_labels=4)
34
 
35
  # Configurar el entrenador
36
  training_args = TrainingArguments(
special_tokens_map.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "unk_token": "[UNK]",
3
+ "sep_token": "[SEP]",
4
+ "pad_token": "[PAD]",
5
+ "cls_token": "[CLS]",
6
+ "mask_token": "[MASK]"
7
+ }
8
+
tokenizer_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_lower_case": true,
3
+ "unk_token": "[UNK]",
4
+ "sep_token": "[SEP]",
5
+ "pad_token": "[PAD]",
6
+ "cls_token": "[CLS]",
7
+ "mask_token": "[MASK]"
8
+ }
vocab.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [CLS]
2
+ [SEP]
3
+ [PAD]
4
+ [unused1]
5
+ [unused2]
6
+ ...