leadr64 commited on
Commit
15729e3
·
1 Parent(s): 0d3e4a3

Ajouter le script Gradio et les dépendances

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -1,23 +1,29 @@
1
  import os
2
-
3
  import gradio as gr
4
  from qdrant_client import QdrantClient
5
  from transformers import ClapModel, ClapProcessor
6
 
7
- # Loading the Qdrant DB in local ###################################################################
8
- client = QdrantClient("https://ebe79742-e3ac-4d09-a2c6-63946024cc7a.us-east4-0.gcp.cloud.qdrant.io", api_key="_NnGLuSMH4Qwv-ancoFh88YvzuR7WbyidAorVOVQ_eMCbPhxTb2TSw")
 
 
 
 
 
 
 
 
9
  print("[INFO] Client created...")
10
 
11
- # loading the model
12
  print("[INFO] Loading the model...")
13
  model_name = "laion/larger_clap_general"
14
  model = ClapModel.from_pretrained(model_name)
15
  processor = ClapProcessor.from_pretrained(model_name)
16
 
17
- # Gradio Interface #################################################################################
18
  max_results = 10
19
 
20
-
21
  def sound_search(query):
22
  text_inputs = processor(text=query, return_tensors="pt")
23
  text_embed = model.get_text_features(**text_inputs)[0]
@@ -34,13 +40,12 @@ def sound_search(query):
34
  for hit in hits
35
  ]
36
 
37
-
38
  with gr.Blocks() as demo:
39
  gr.Markdown(
40
  """# Sound search database """
41
  )
42
  inp = gr.Textbox(placeholder="What sound are you looking for ?")
43
- out = [gr.Audio(label=f"{x}") for x in range(max_results)] # Necessary to have different objs
44
  inp.change(sound_search, inp, out)
45
 
46
  demo.launch()
 
1
  import os
 
2
  import gradio as gr
3
  from qdrant_client import QdrantClient
4
  from transformers import ClapModel, ClapProcessor
5
 
6
+ # Charger les variables d'environnement
7
+ QDRANT_URL = os.getenv("QDRANT_URL")
8
+ QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
9
+
10
+ # Vérifiez si les variables d'environnement sont définies
11
+ if not QDRANT_URL ou not QDRANT_API_KEY:
12
+ raise ValueError("Veuillez définir les variables d'environnement QDRANT_URL et QDRANT_API_KEY")
13
+
14
+ # Chargement de la base de données Qdrant en local
15
+ client = QdrantClient(QDRANT_URL, api_key=QDRANT_API_KEY)
16
  print("[INFO] Client created...")
17
 
18
+ # Chargement du modèle
19
  print("[INFO] Loading the model...")
20
  model_name = "laion/larger_clap_general"
21
  model = ClapModel.from_pretrained(model_name)
22
  processor = ClapProcessor.from_pretrained(model_name)
23
 
24
+ # Interface Gradio
25
  max_results = 10
26
 
 
27
  def sound_search(query):
28
  text_inputs = processor(text=query, return_tensors="pt")
29
  text_embed = model.get_text_features(**text_inputs)[0]
 
40
  for hit in hits
41
  ]
42
 
 
43
  with gr.Blocks() as demo:
44
  gr.Markdown(
45
  """# Sound search database """
46
  )
47
  inp = gr.Textbox(placeholder="What sound are you looking for ?")
48
+ out = [gr.Audio(label=f"{x}") for x in range(max_results)] # Nécessaire d'avoir des objets différents
49
  inp.change(sound_search, inp, out)
50
 
51
  demo.launch()