Claude commited on
Commit
bfdf513
·
unverified ·
1 Parent(s): 3e09a0a

refactor: Remove all OAuth dependencies, use HF_TOKEN only

Browse files
Files changed (1) hide show
  1. app.py +28 -80
app.py CHANGED
@@ -26,20 +26,9 @@ except ImportError:
26
  ZEROGPU_AVAILABLE = False
27
 
28
 
29
- def get_inference_token(oauth_token: gr.OAuthToken | None = None) -> str | None:
30
- """Get token for HuggingFace Inference API.
31
-
32
- Prefers HF_TOKEN environment variable (works with 2FA accounts),
33
- falls back to OAuth token if available.
34
- """
35
- # Prefer HF_TOKEN env var (works even with 2FA)
36
- hf_token = os.environ.get("HF_TOKEN")
37
- if hf_token:
38
- return hf_token
39
- # Fall back to OAuth token
40
- if oauth_token:
41
- return oauth_token.token
42
- return None
43
 
44
  # Global embedding model (shared - stateless)
45
  _embedding_model = None
@@ -80,14 +69,9 @@ class SessionState:
80
  )
81
 
82
 
83
- def create_session_state(profile: gr.OAuthProfile | None) -> SessionState:
84
- """Create a session state, optionally based on user profile."""
85
- if profile:
86
- # Use a hash of the user's name for consistent session across reloads
87
- session_id = uuid.uuid5(uuid.NAMESPACE_DNS, profile.name).hex
88
- else:
89
- session_id = uuid.uuid4().hex
90
- return SessionState(session_id)
91
 
92
 
93
  # Default collection for backward compatibility (used by tests)
@@ -98,10 +82,6 @@ collection = _default_client.get_or_create_collection(
98
  )
99
 
100
 
101
- def hello(profile: gr.OAuthProfile | None) -> str:
102
- if profile is None:
103
- return "Please log in to continue."
104
- return f"Hello {profile.name}!"
105
 
106
 
107
  def get_device():
@@ -461,14 +441,10 @@ def is_valid_youtube_url(url: str) -> tuple[bool, str]:
461
  def _process_youtube_impl(
462
  url: str,
463
  num_frames: int,
464
- profile: gr.OAuthProfile | None,
465
  session_state: SessionState | None = None,
466
  progress: gr.Progress = gr.Progress(),
467
  ) -> str:
468
  """Internal implementation of video processing."""
469
- if profile is None:
470
- return "**Please log in first** using the button at the top right."
471
-
472
  is_valid, result = is_valid_youtube_url(url)
473
  if not is_valid:
474
  return result
@@ -554,36 +530,29 @@ if ZEROGPU_AVAILABLE:
554
  def process_youtube(
555
  url: str,
556
  num_frames: int,
557
- profile: gr.OAuthProfile | None,
558
  session_state: SessionState | None = None,
559
  progress: gr.Progress = gr.Progress(),
560
  ) -> str:
561
- return _process_youtube_impl(url, num_frames, profile, session_state, progress)
562
  else:
563
  def process_youtube(
564
  url: str,
565
  num_frames: int,
566
- profile: gr.OAuthProfile | None,
567
  session_state: SessionState | None = None,
568
  progress: gr.Progress = gr.Progress(),
569
  ) -> str:
570
- return _process_youtube_impl(url, num_frames, profile, session_state, progress)
571
 
572
 
573
  def chat_with_videos(
574
  message: str,
575
  history: list[dict],
576
- profile: gr.OAuthProfile | None,
577
- oauth_token: gr.OAuthToken | None,
578
  session_state: SessionState | None = None,
579
  ) -> str:
580
- if profile is None:
581
- return "Please log in to use the chat feature."
582
-
583
- # Get inference token (prefers HF_TOKEN env var, falls back to OAuth)
584
- token = get_inference_token(oauth_token)
585
  if token is None:
586
- return "Authentication required. Please log in again."
587
 
588
  if not message or not message.strip():
589
  return "Please enter a question."
@@ -648,8 +617,8 @@ Question: {message}"""
648
  else:
649
  # All models failed
650
  error_msg = str(last_error) if last_error else "Unknown error"
651
- if "otp" in error_msg.lower() or "400" in error_msg:
652
- return "Authentication error: Your HuggingFace account has 2FA enabled. Please ask the Space owner to configure an HF_TOKEN secret."
653
  if "401" in error_msg or "unauthorized" in error_msg.lower():
654
  return "Authentication error. Please try logging out and back in."
655
  if "429" in error_msg or "rate" in error_msg.lower():
@@ -732,8 +701,6 @@ def handle_chat(
732
  message: str,
733
  history: list[dict],
734
  session_state: SessionState,
735
- profile: gr.OAuthProfile | None = None,
736
- oauth_token: gr.OAuthToken | None = None,
737
  progress: gr.Progress = gr.Progress(),
738
  ) -> tuple[list[dict], str, SessionState]:
739
  """Unified chat handler that processes URLs or answers questions."""
@@ -741,7 +708,7 @@ def handle_chat(
741
 
742
  # Create session state if needed
743
  if session_state is None:
744
- session_state = create_session_state(profile)
745
 
746
  if not message or not message.strip():
747
  return history, "", session_state
@@ -749,8 +716,8 @@ def handle_chat(
749
  # Add user message to history
750
  history.append({"role": "user", "content": message})
751
 
752
- # Check if we have authentication (either HF_TOKEN or OAuth)
753
- token = get_inference_token(oauth_token)
754
  if token is None:
755
  history.append({
756
  "role": "assistant",
@@ -771,7 +738,7 @@ def handle_chat(
771
  })
772
 
773
  try:
774
- result = _process_youtube_impl(normalized, 5, profile, session_state, progress)
775
 
776
  # Summarize the result for chat
777
  if "Error" in result or "Please" in result:
@@ -812,37 +779,22 @@ def handle_chat(
812
  })
813
  else:
814
  # Answer question about videos
815
- if oauth_token is None:
816
- history.append({
817
- "role": "assistant",
818
- "content": "Authentication error. Please try refreshing the page."
819
- })
820
- else:
821
- response = chat_with_videos(message, history, profile, oauth_token, session_state)
822
- history.append({"role": "assistant", "content": response})
823
 
824
  return history, "", session_state
825
 
826
 
827
- def get_welcome_message(profile: gr.OAuthProfile | None) -> list[dict]:
828
- """Get initial chat message based on login state."""
829
- if profile:
830
- return [{
831
- "role": "assistant",
832
- "content": (
833
- f"Hi **{profile.name}**! I'm your Video Analyzer assistant.\n\n"
834
- f"**Here's how I work:**\n"
835
- f"1. Paste a YouTube URL and I'll analyze it\n"
836
- f"2. Ask me questions about the video content\n\n"
837
- f"Let's get started - paste a YouTube video URL!"
838
- )
839
- }]
840
  return [{
841
  "role": "assistant",
842
  "content": (
843
- "Welcome to Video Analyzer!\n\n"
844
- "I can analyze YouTube videos and answer questions about them.\n\n"
845
- "**Please sign in with HuggingFace** to get started."
 
 
846
  )
847
  }]
848
 
@@ -912,8 +864,8 @@ def create_demo() -> gr.Blocks:
912
  ) -> tuple[list[dict], str, SessionState]:
913
  """Initialize session state and welcome message."""
914
  if current_state is None:
915
- current_state = create_session_state(None)
916
- welcome = get_welcome_message(None)
917
  stats = get_knowledge_stats(current_state)
918
  return welcome, stats, current_state
919
 
@@ -921,8 +873,6 @@ def create_demo() -> gr.Blocks:
921
  audio_path: str,
922
  history: list[dict],
923
  session_state_val: SessionState,
924
- profile: gr.OAuthProfile | None = None,
925
- oauth_token: gr.OAuthToken | None = None,
926
  ) -> tuple[list[dict], str | None, SessionState, None]:
927
  """Handle voice input - transcribe, process, and generate audio response."""
928
  # Transcribe voice input
@@ -936,8 +886,6 @@ def create_demo() -> gr.Blocks:
936
  message=transcribed_text,
937
  history=history,
938
  session_state=session_state_val,
939
- profile=profile,
940
- oauth_token=oauth_token,
941
  )
942
 
943
  # Get the last assistant response for TTS
 
26
  ZEROGPU_AVAILABLE = False
27
 
28
 
29
+ def get_inference_token() -> str | None:
30
+ """Get token for HuggingFace Inference API from environment."""
31
+ return os.environ.get("HF_TOKEN")
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  # Global embedding model (shared - stateless)
34
  _embedding_model = None
 
69
  )
70
 
71
 
72
+ def create_session_state() -> SessionState:
73
+ """Create a new session state with random ID."""
74
+ return SessionState(uuid.uuid4().hex)
 
 
 
 
 
75
 
76
 
77
  # Default collection for backward compatibility (used by tests)
 
82
  )
83
 
84
 
 
 
 
 
85
 
86
 
87
  def get_device():
 
441
  def _process_youtube_impl(
442
  url: str,
443
  num_frames: int,
 
444
  session_state: SessionState | None = None,
445
  progress: gr.Progress = gr.Progress(),
446
  ) -> str:
447
  """Internal implementation of video processing."""
 
 
 
448
  is_valid, result = is_valid_youtube_url(url)
449
  if not is_valid:
450
  return result
 
530
  def process_youtube(
531
  url: str,
532
  num_frames: int,
 
533
  session_state: SessionState | None = None,
534
  progress: gr.Progress = gr.Progress(),
535
  ) -> str:
536
+ return _process_youtube_impl(url, num_frames, session_state, progress)
537
  else:
538
  def process_youtube(
539
  url: str,
540
  num_frames: int,
 
541
  session_state: SessionState | None = None,
542
  progress: gr.Progress = gr.Progress(),
543
  ) -> str:
544
+ return _process_youtube_impl(url, num_frames, session_state, progress)
545
 
546
 
547
  def chat_with_videos(
548
  message: str,
549
  history: list[dict],
 
 
550
  session_state: SessionState | None = None,
551
  ) -> str:
552
+ # Get inference token from environment
553
+ token = get_inference_token()
 
 
 
554
  if token is None:
555
+ return "No API token configured. Please ask the Space owner to set HF_TOKEN."
556
 
557
  if not message or not message.strip():
558
  return "Please enter a question."
 
617
  else:
618
  # All models failed
619
  error_msg = str(last_error) if last_error else "Unknown error"
620
+ if "otp" in error_msg.lower():
621
+ return "Authentication error. Please check HF_TOKEN configuration."
622
  if "401" in error_msg or "unauthorized" in error_msg.lower():
623
  return "Authentication error. Please try logging out and back in."
624
  if "429" in error_msg or "rate" in error_msg.lower():
 
701
  message: str,
702
  history: list[dict],
703
  session_state: SessionState,
 
 
704
  progress: gr.Progress = gr.Progress(),
705
  ) -> tuple[list[dict], str, SessionState]:
706
  """Unified chat handler that processes URLs or answers questions."""
 
708
 
709
  # Create session state if needed
710
  if session_state is None:
711
+ session_state = create_session_state()
712
 
713
  if not message or not message.strip():
714
  return history, "", session_state
 
716
  # Add user message to history
717
  history.append({"role": "user", "content": message})
718
 
719
+ # Check if we have HF_TOKEN configured
720
+ token = get_inference_token()
721
  if token is None:
722
  history.append({
723
  "role": "assistant",
 
738
  })
739
 
740
  try:
741
+ result = _process_youtube_impl(normalized, 5, session_state, progress)
742
 
743
  # Summarize the result for chat
744
  if "Error" in result or "Please" in result:
 
779
  })
780
  else:
781
  # Answer question about videos
782
+ response = chat_with_videos(message, history, session_state)
783
+ history.append({"role": "assistant", "content": response})
 
 
 
 
 
 
784
 
785
  return history, "", session_state
786
 
787
 
788
+ def get_welcome_message() -> list[dict]:
789
+ """Get initial welcome message."""
 
 
 
 
 
 
 
 
 
 
 
790
  return [{
791
  "role": "assistant",
792
  "content": (
793
+ "Welcome to **Video Analyzer**!\n\n"
794
+ "**Here's how I work:**\n"
795
+ "1. Paste a YouTube URL and I'll analyze it\n"
796
+ "2. Ask me questions about the video content\n\n"
797
+ "Let's get started - paste a YouTube video URL!"
798
  )
799
  }]
800
 
 
864
  ) -> tuple[list[dict], str, SessionState]:
865
  """Initialize session state and welcome message."""
866
  if current_state is None:
867
+ current_state = create_session_state()
868
+ welcome = get_welcome_message()
869
  stats = get_knowledge_stats(current_state)
870
  return welcome, stats, current_state
871
 
 
873
  audio_path: str,
874
  history: list[dict],
875
  session_state_val: SessionState,
 
 
876
  ) -> tuple[list[dict], str | None, SessionState, None]:
877
  """Handle voice input - transcribe, process, and generate audio response."""
878
  # Transcribe voice input
 
886
  message=transcribed_text,
887
  history=history,
888
  session_state=session_state_val,
 
 
889
  )
890
 
891
  # Get the last assistant response for TTS