nkasmanoff commited on
Commit
feeacfa
·
verified ·
1 Parent(s): 8a06121

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. .github/workflows/update_space.yml +18 -19
  2. app.py +5 -3
.github/workflows/update_space.yml CHANGED
@@ -1,28 +1,27 @@
1
  name: Run Python script
2
 
3
  on:
4
- push:
5
- branches:
6
- - main
7
 
8
  jobs:
9
- build:
10
- runs-on: ubuntu-latest
11
 
12
- steps:
13
- - name: Checkout
14
- uses: actions/checkout@v2
15
 
16
- - name: Set up Python
17
- uses: actions/setup-python@v2
18
- with:
19
- python-version: '3.9'
20
 
21
- - name: Install Gradio
22
- run: python -m pip install gradio
23
 
24
- - name: Log in to Hugging Face
25
- run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
-
27
- - name: Deploy to Spaces
28
- run: gradio deploy
 
1
  name: Run Python script
2
 
3
  on:
4
+ push:
5
+ branches:
6
+ - main
7
 
8
  jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
 
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
 
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
 
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
 
24
+ - name: Deploy to Spaces
25
+ env:
26
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
27
+ run: gradio deploy
 
app.py CHANGED
@@ -46,7 +46,6 @@ model = None
46
  tokenizer = None
47
 
48
 
49
- @spaces.GPU
50
  def load_steering_model():
51
  """Load the model and tokenizer for steering."""
52
  global model, tokenizer
@@ -511,6 +510,7 @@ def classify_message(message):
511
  return best_category, confidence, suggested_brand, reasoning
512
 
513
 
 
514
  def generate_steered_response(
515
  message, history, steering_coeff, max_tokens, brand, steering_type
516
  ):
@@ -724,7 +724,7 @@ def chat(
724
 
725
 
726
  # Build the Gradio interface
727
- with gr.Blocks(title="shillLM", theme=gr.themes.Soft()) as demo:
728
  gr.Markdown(
729
  """
730
  # 💸 shiLLM 💸
@@ -1300,4 +1300,6 @@ if __name__ == "__main__":
1300
  print(" Open http://localhost:7860 in your browser to start chatting!")
1301
  print("=" * 50)
1302
 
1303
- demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
 
 
 
46
  tokenizer = None
47
 
48
 
 
49
  def load_steering_model():
50
  """Load the model and tokenizer for steering."""
51
  global model, tokenizer
 
510
  return best_category, confidence, suggested_brand, reasoning
511
 
512
 
513
+ @spaces.GPU
514
  def generate_steered_response(
515
  message, history, steering_coeff, max_tokens, brand, steering_type
516
  ):
 
724
 
725
 
726
  # Build the Gradio interface
727
+ with gr.Blocks(title="shillLM") as demo:
728
  gr.Markdown(
729
  """
730
  # 💸 shiLLM 💸
 
1300
  print(" Open http://localhost:7860 in your browser to start chatting!")
1301
  print("=" * 50)
1302
 
1303
+ demo.launch(
1304
+ server_name="0.0.0.0", server_port=7860, share=True, theme=gr.themes.Soft()
1305
+ )