leadr64 commited on
Commit
8e7416e
·
1 Parent(s): 795d78e

Ajouter le script Gradio et les dépendances

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -1,27 +1,24 @@
1
  import os
 
2
  import gradio as gr
3
  from qdrant_client import QdrantClient
4
  from transformers import ClapModel, ClapProcessor
5
-
6
- # Charger les variables d'environnement
7
  QDRANT_URL = os.getenv("QDRANT_URL")
8
  QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
9
-
10
- # Vérifiez si les variables d'environnement sont définies
11
-
12
- # Chargement de la base de données Qdrant en local
13
- client = QdrantClient(QDRANT_URL, api_key=QDRANT_API_KEY)
14
  print("[INFO] Client created...")
15
 
16
- # Chargement du modèle
17
  print("[INFO] Loading the model...")
18
  model_name = "laion/larger_clap_general"
19
  model = ClapModel.from_pretrained(model_name)
20
  processor = ClapProcessor.from_pretrained(model_name)
21
 
22
- # Interface Gradio
23
  max_results = 10
24
 
 
25
  def sound_search(query):
26
  text_inputs = processor(text=query, return_tensors="pt")
27
  text_embed = model.get_text_features(**text_inputs)[0]
@@ -38,12 +35,13 @@ def sound_search(query):
38
  for hit in hits
39
  ]
40
 
 
41
  with gr.Blocks() as demo:
42
  gr.Markdown(
43
  """# Sound search database """
44
  )
45
  inp = gr.Textbox(placeholder="What sound are you looking for ?")
46
- out = [gr.Audio(label=f"{x}") for x in range(max_results)] # Nécessaire d'avoir des objets différents
47
  inp.change(sound_search, inp, out)
48
 
49
  demo.launch()
 
1
  import os
2
+
3
  import gradio as gr
4
  from qdrant_client import QdrantClient
5
  from transformers import ClapModel, ClapProcessor
 
 
6
  QDRANT_URL = os.getenv("QDRANT_URL")
7
  QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
8
+ # Loading the Qdrant DB in local ###################################################################
9
+ client = QdrantClient(QDRANT_URL, QDRANT_API_KEY)
 
 
 
10
  print("[INFO] Client created...")
11
 
12
+ # loading the model
13
  print("[INFO] Loading the model...")
14
  model_name = "laion/larger_clap_general"
15
  model = ClapModel.from_pretrained(model_name)
16
  processor = ClapProcessor.from_pretrained(model_name)
17
 
18
+ # Gradio Interface #################################################################################
19
  max_results = 10
20
 
21
+
22
  def sound_search(query):
23
  text_inputs = processor(text=query, return_tensors="pt")
24
  text_embed = model.get_text_features(**text_inputs)[0]
 
35
  for hit in hits
36
  ]
37
 
38
+
39
  with gr.Blocks() as demo:
40
  gr.Markdown(
41
  """# Sound search database """
42
  )
43
  inp = gr.Textbox(placeholder="What sound are you looking for ?")
44
+ out = [gr.Audio(label=f"{x}") for x in range(max_results)] # Necessary to have different objs
45
  inp.change(sound_search, inp, out)
46
 
47
  demo.launch()