Chris4K commited on
Commit
3870925
·
verified ·
1 Parent(s): 2e88831

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -54,14 +54,15 @@ class EventScraper:
54
  # Try local model first
55
  if TRANSFORMERS_AVAILABLE:
56
  try:
57
- model_name = "meta-llama/Llama-3.2-3B-Instruct"
58
  self.tokenizer = AutoTokenizer.from_pretrained(model_name)
59
  self.model = AutoModelForCausalLM.from_pretrained(
60
  model_name,
61
  torch_dtype=torch.float16,
62
  return_dict_in_generate=False,
63
  device_map='auto',
64
- max_new_tokens=12000
 
65
  )
66
  return
67
  except Exception as local_err:
@@ -76,14 +77,12 @@ class EventScraper:
76
  if hf_token:
77
  self.client = InferenceClient(
78
  model="meta-llama/Llama-3.2-3B-Instruct",
79
- token=hf_token,
80
- max_new_tokens=12000
81
  )
82
  else:
83
  # Public model access without token
84
  self.client = InferenceClient(
85
- model="meta-llama/Llama-3.2-3B-Instruct",
86
- max_new_tokens=12000
87
  )
88
  except Exception as e:
89
  gr.Warning(f"Inference Client setup error: {str(e)}")
 
54
  # Try local model first
55
  if TRANSFORMERS_AVAILABLE:
56
  try:
57
+ model_name = "meta-llama/Llama-3.2-1B-Instruct"
58
  self.tokenizer = AutoTokenizer.from_pretrained(model_name)
59
  self.model = AutoModelForCausalLM.from_pretrained(
60
  model_name,
61
  torch_dtype=torch.float16,
62
  return_dict_in_generate=False,
63
  device_map='auto',
64
+ max_new_tokens=12000,
65
+ return_full_text=False,
66
  )
67
  return
68
  except Exception as local_err:
 
77
  if hf_token:
78
  self.client = InferenceClient(
79
  model="meta-llama/Llama-3.2-3B-Instruct",
80
+ token=hf_token
 
81
  )
82
  else:
83
  # Public model access without token
84
  self.client = InferenceClient(
85
+ model="meta-llama/Llama-3.2-3B-Instruct"
 
86
  )
87
  except Exception as e:
88
  gr.Warning(f"Inference Client setup error: {str(e)}")