jolchmo commited on
Commit
981ad89
·
1 Parent(s): b1d5984
Files changed (1) hide show
  1. app.py +12 -38
app.py CHANGED
@@ -1,65 +1,39 @@
1
  import gradio as gr
2
  import spaces
3
  import torch
4
- from transformers import LlamaTokenizerFast, LlamaForCausalLM
5
  from peft import PeftModel
6
  import os
 
 
7
 
8
- # 加载模型和tokenizer
9
- model_name = "meta-llama/Meta-Llama-3-8B"
10
- adapter_name = "FinGPT/fingpt-mt_llama3-8b_lora"
11
 
12
  # 获取HF token(Spaces会自动提供)
13
  hf_token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGING_FACE_HUB_TOKEN")
14
 
15
- print("=" * 50)
16
- print("开始初始化模型...")
17
- print(f"Token存在: {bool(hf_token)}")
18
- print(f"基础模型: {model_name}")
19
- print(f"LoRA适配器: {adapter_name}")
20
- print("=" * 50)
21
 
22
  model = None
23
  tokenizer = None
24
- model_loaded = False
25
 
26
  try:
27
- print("\n[1/3] 加载tokenizer...")
28
- tokenizer = LlamaTokenizerFast.from_pretrained(
29
- model_name,
30
- trust_remote_code=True,
31
- token=hf_token,
32
- )
33
- tokenizer.pad_token = tokenizer.eos_token
34
- print("✓ Tokenizer加载成功")
35
 
36
- print("\n[2/3] 加载基础模型...")
37
- base_model = LlamaForCausalLM.from_pretrained(
38
- model_name,
39
- torch_dtype=torch.float16,
40
- device_map="auto",
41
  trust_remote_code=True,
 
 
42
  token=hf_token,
43
  )
44
- print("\n[3/3] 加载LoRA适配器...")
45
- model = PeftModel.from_pretrained(base_model, adapter_name)
 
 
 
46
  model = model.eval()
47
 
48
- print("✓ LoRA适配器加载成功")
49
- print("\n" + "=" * 50)
50
- print("✅ 所有模型组件加载完成!")
51
- print("=" * 50 + "\n")
52
-
53
  except Exception as e:
54
  print("\n" + "=" * 50)
55
  print("❌ 模型加载失败!")
56
  print(f"错误信息: {e}")
57
- print("\n可能的解决方案:")
58
- print("1. 确保你的HF账号已获得Meta-Llama-3-8B访问权限")
59
- print(" 访问: https://huggingface.co/meta-llama/Meta-Llama-3-8B")
60
- print("2. 在Spaces的Settings → Repository secrets中添加HF_TOKEN")
61
- print(" 或启用 'hf_oauth: true' 以使用OAuth认证")
62
- print("=" * 50 + "\n")
63
  raise
64
 
65
 
@@ -116,7 +90,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
116
  """
117
  # 🤖 FinGPT Chatbot
118
 
119
- 这是一个基于 **FinGPT/fingpt-mt_llama3-8b_lora** 模型的金融对话助手。
120
 
121
  您可以询问关于金融市场、投资、经济分析等问题。
122
  """
 
1
  import gradio as gr
2
  import spaces
3
  import torch
 
4
  from peft import PeftModel
5
  import os
6
+ from datasets import load_dataset
7
+ from transformers import AutoTokenizer, AutoModelForCausalLM
8
 
 
 
 
9
 
10
  # 获取HF token(Spaces会自动提供)
11
  hf_token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGING_FACE_HUB_TOKEN")
12
 
 
 
 
 
 
 
13
 
14
  model = None
15
  tokenizer = None
 
16
 
17
  try:
 
 
 
 
 
 
 
 
18
 
19
+ base_model = AutoModelForCausalLM.from_pretrained(
20
+ 'meta-llama/Llama-2-7b-chat-hf',
 
 
 
21
  trust_remote_code=True,
22
+ device_map="auto",
23
+ torch_dtype=torch.float16, # optional if you have enough VRAM
24
  token=hf_token,
25
  )
26
+ tokenizer = AutoTokenizer.from_pretrained('meta-llama/Llama-2-7b-chat-hf',
27
+ trust_remote_code=True,
28
+ token=hf_token,
29
+ )
30
+ model = PeftModel.from_pretrained(base_model, 'FinGPT/fingpt-forecaster_dow30_llama2-7b_lora')
31
  model = model.eval()
32
 
 
 
 
 
 
33
  except Exception as e:
34
  print("\n" + "=" * 50)
35
  print("❌ 模型加载失败!")
36
  print(f"错误信息: {e}")
 
 
 
 
 
 
37
  raise
38
 
39
 
 
90
  """
91
  # 🤖 FinGPT Chatbot
92
 
93
+ 这是一个基于 **FinGPT/fingpt-forecaster_dow30_llama2-7b_lora** 模型的金融对话助手。
94
 
95
  您可以询问关于金融市场、投资、经济分析等问题。
96
  """