celikn commited on
Commit
dda1c63
·
verified ·
1 Parent(s): 462e09e

Update app.py

Browse files

Fixed the token

Files changed (1) hide show
  1. app.py +9 -1
app.py CHANGED
@@ -17,6 +17,14 @@ import yaml
17
  import gradio as gr
18
  import pandas as pd
19
  from tqdm import tqdm
 
 
 
 
 
 
 
 
20
 
21
  # Optional metrics
22
  try:
@@ -71,7 +79,7 @@ def compute_metrics(task, prediction, reference):
71
  # ---------------- Hugging Face Inference ---------------- #
72
  def hf_generate(model_name, prompt, max_new_tokens=256, temperature=0.2):
73
  from huggingface_hub import InferenceClient
74
- client = InferenceClient(model=model_name, token=os.getenv("HUGGINGFACE_HUB_TOKEN"))
75
  start = time.time()
76
  try:
77
  output = client.text_generation(prompt, max_new_tokens=max_new_tokens, temperature=temperature)
 
17
  import gradio as gr
18
  import pandas as pd
19
  from tqdm import tqdm
20
+ from huggingface_hub import login
21
+
22
+ HF_TOKEN = (os.environ.get("HUGGINGFACE_HUB_TOKEN", "") or "").strip()
23
+
24
+ if HF_TOKEN:
25
+ login(token=HF_TOKEN)
26
+ else:
27
+ print("UYARI: HF_TOKEN / HUGGINGFACE_HUB_TOKEN bulunamadı, gated modellere erişilemeyebilir.")
28
 
29
  # Optional metrics
30
  try:
 
79
  # ---------------- Hugging Face Inference ---------------- #
80
  def hf_generate(model_name, prompt, max_new_tokens=256, temperature=0.2):
81
  from huggingface_hub import InferenceClient
82
+ client = InferenceClient(model=model_name, token=HF_TOKEN)
83
  start = time.time()
84
  try:
85
  output = client.text_generation(prompt, max_new_tokens=max_new_tokens, temperature=temperature)