dodd869 commited on
Commit
b62e110
·
verified ·
1 Parent(s): 58a0d24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -8
app.py CHANGED
@@ -3,9 +3,20 @@ import gradio as gr
3
  from huggingface_hub import InferenceClient
4
  import requests
5
 
6
- url = "https://huggingface.co/models-json?num_parameters=min%3A76B&inference_provider=novita&sort=trending&withCount=true"
7
- res = requests.get(url).json()
8
- model_ids = [m['id'] for m in res.get('models', [])]
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
11
 
@@ -18,11 +29,12 @@ def respond(message, history, model):
18
  messages=[{"role": "user", "content": message}],
19
  stream=True
20
  ):
21
- content = chunk.choices[0].delta.content
22
- if content:
23
- full_reply += content
24
- history[-1] = (message, full_reply)
25
- yield "", history
 
26
 
27
  with gr.Blocks() as demo:
28
  gr.Markdown("## AI")
 
3
  from huggingface_hub import InferenceClient
4
  import requests
5
 
6
+ def fetch_models():
7
+ models = []
8
+ page = 1
9
+ while True:
10
+ url = f"https://huggingface.co/models-json?num_parameters=min%3A0B&inference_provider=novita&p={page}&sort=modified&withCount=true"
11
+ res = requests.get(url).json()
12
+ page_models = [m['id'] for m in res.get('models', [])]
13
+ if not page_models:
14
+ break
15
+ models.extend(page_models)
16
+ page += 1
17
+ return models
18
+
19
+ model_ids = fetch_models()
20
 
21
  client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
22
 
 
29
  messages=[{"role": "user", "content": message}],
30
  stream=True
31
  ):
32
+ if chunk.choices and len(chunk.choices) > 0:
33
+ delta = chunk.choices[0].delta
34
+ if hasattr(delta, 'content') and delta.content:
35
+ full_reply += delta.content
36
+ history[-1] = (message, full_reply)
37
+ yield "", history
38
 
39
  with gr.Blocks() as demo:
40
  gr.Markdown("## AI")