tecuhtli commited on
Commit
4441146
·
1 Parent(s): a4a09ac

Actualizo app.py, agrego nueva arquitectura Mori

Browse files
Files changed (1) hide show
  1. app.py +30 -12
app.py CHANGED
@@ -1,19 +1,37 @@
1
  # -*- coding: utf-8 -*-
2
  """Mori – Inferencia Técnica (estable, UTF-8, con opción RAG ON/OFF)"""
3
- #=====================================================================================
4
- # Importing Libraries ===============================================================
5
- #=====================================================================================
6
- import streamlit as st
7
- import datetime as dt
 
 
 
8
  from pathlib import Path
9
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
10
- from Mori_TechnicalPrompts import answer_with_mori_rag, answer_with_mori_plain, answer_with_qwen_base
 
 
11
  import torch
12
- from huggingface_hub import hf_hub_download, login
13
- #***************************************************************************
14
- #Setting up variables
15
- #***************************************************************************
16
- # Token privado desde variable de entorno
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  HF_TOKEN = os.environ.get("HF_TOKEN")
18
 
19
  #***************************************************************************
 
1
  # -*- coding: utf-8 -*-
2
  """Mori – Inferencia Técnica (estable, UTF-8, con opción RAG ON/OFF)"""
3
+ # =============================================================================
4
+ # Imports
5
+ # =============================================================================
6
+ import os
7
+ import csv
8
+ import json
9
+ import random
10
+ import uuid
11
  from pathlib import Path
12
+ import datetime as dt
13
+
14
+ import numpy as np
15
+ import streamlit as st
16
  import torch
17
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
18
+
19
+ from Mori_TechnicalPrompts import (
20
+ answer_with_mori_rag,
21
+ answer_with_mori_plain,
22
+ answer_with_qwen_base,
23
+ )
24
+
25
+ # Si tus modelos son públicos, no necesitas login ni hf_hub_download.
26
+ # from huggingface_hub import hf_hub_download, login
27
+
28
+ # =============================================================================
29
+ # Configuración general
30
+ # =============================================================================
31
+
32
+ device = "cuda" if torch.cuda.is_available() else "cpu"
33
+
34
+ # Token privado (solo si vas a usar modelos privados)
35
  HF_TOKEN = os.environ.get("HF_TOKEN")
36
 
37
  #***************************************************************************