Pisethan commited on
Commit
02ac06a
·
verified ·
1 Parent(s): 5cac4b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -3,17 +3,17 @@ import re
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
4
  from datasets import load_dataset
5
 
 
 
6
  # Get the token from the environment variable
7
  TOKEN = os.getenv("HF_API_TOKEN")
8
  if not TOKEN:
9
  raise ValueError("Hugging Face API token not found. Set it as an environment variable.")
10
 
11
- MODEL_NAME = "Pisethan/sangapac-math"
12
-
13
  # Load model and tokenizer
14
  try:
15
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=TOKEN)
16
- model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, use_auth_token=TOKEN)
17
  classifier = pipeline("text-classification", model=model, tokenizer=tokenizer)
18
  except Exception as e:
19
  classifier = None
@@ -21,7 +21,7 @@ except Exception as e:
21
 
22
  # Load dataset dynamically from Hugging Face or locally
23
  try:
24
- dataset = load_dataset("Pisethan/sangapac-math-dataset", use_auth_token=TOKEN)["train"]
25
  dataset_dict = {re.sub(r'\s+', ' ', entry["input"].strip()): entry for entry in dataset}
26
  except Exception as e:
27
  dataset_dict = {}
 
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
4
  from datasets import load_dataset
5
 
6
+ MODEL_NAME = "Pisethan/sangapac-math"
7
+
8
  # Get the token from the environment variable
9
  TOKEN = os.getenv("HF_API_TOKEN")
10
  if not TOKEN:
11
  raise ValueError("Hugging Face API token not found. Set it as an environment variable.")
12
 
 
 
13
  # Load model and tokenizer
14
  try:
15
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=TOKEN)
16
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, token=TOKEN)
17
  classifier = pipeline("text-classification", model=model, tokenizer=tokenizer)
18
  except Exception as e:
19
  classifier = None
 
21
 
22
  # Load dataset dynamically from Hugging Face or locally
23
  try:
24
+ dataset = load_dataset("Pisethan/sangapac-math-dataset", token=TOKEN)["train"]
25
  dataset_dict = {re.sub(r'\s+', ' ', entry["input"].strip()): entry for entry in dataset}
26
  except Exception as e:
27
  dataset_dict = {}