PierreHanna commited on
Commit
28b1346
·
1 Parent(s): 3bad066

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -12,6 +12,8 @@ import datetime
12
  import joblib
13
 
14
  from huggingface_hub import hf_hub_download
 
 
15
  encoder_text_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename=os.environ['ENCODER_TEXT'],
16
  use_auth_token=os.environ['TOKEN'])
17
  print("DEBUG ", encoder_text_path)
@@ -19,7 +21,8 @@ print("DEBUG ", encoder_text_path)
19
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
20
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
21
 
22
- python_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename="models.py",
 
23
  use_auth_token=os.environ['TOKEN'])
24
  print(python_path)
25
  os.system('ls -la')
@@ -29,11 +32,11 @@ preprocess_model, model = get_models()
29
  index_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename=os.environ['INDEX'],
30
  use_auth_token=os.environ['TOKEN'])
31
  indexnames_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename=os.environ['INDEX_NAMES'],
32
- use_auth_token=os.environ['TOKEN'])
33
  catalog_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename=os.environ['CATALOG'],
34
- use_auth_token=os.environ['TOKEN'])
35
- url_dict=get_durl(catalog_path)
36
- audio_names = joblib.load(open(indexnames_path, 'rb'))
37
  index = faiss.read_index(index_path)
38
  encoder_text = tf.keras.models.load_model(encoder_text_path)
39
 
@@ -51,8 +54,8 @@ def process(prompt, lang):
51
  print(" text representation computed.")
52
 
53
  # Embed text
54
- embed_query = encoder_text.predict(embed_prompt["pooled_output"])
55
- faiss.normalize_L2(embed_query)
56
  print(" text embed computed.")
57
 
58
  # distance computing
@@ -66,7 +69,7 @@ def process(prompt, lang):
66
  print(audio_names[I[0][i]], " with distance ", D[0][i])
67
  print(" url : ", url_dict[audio_names[I[0][i]]])
68
 
69
- return [url_dict[audio_names[I[0][0]]], url_dict[audio_names[I[0][1]]], url_dict[audio_names[I[0][2]]], url_dict[audio_names[I[0][3]]], url_dict[audio_names[I[0][4]]]]
70
 
71
  inputs = [gr.Textbox(label="Input", value="type your description", max_lines=2),
72
  gr.Radio(label="Language", choices=["en"], value="en")]
@@ -89,6 +92,8 @@ poc_examples = [#[["I love learning machine learning"],["autre"]]
89
  ["Big Band","en"],
90
  ["90 eurodance beat","en"],
91
  ]
 
 
92
 
93
  outputs = [gr.Audio(label="Track 1"), gr.Audio(label="Track 2"), gr.Audio(label="Track 3"), gr.Audio(label="Track 4"), gr.Audio(label="Track 5")]
94
  demo1 = gr.Interface(fn=process, inputs=inputs, outputs=outputs, examples=poc_examples, cache_examples=False)
 
12
  import joblib
13
 
14
  from huggingface_hub import hf_hub_download
15
+
16
+ # Cacher le nom du repo
17
  encoder_text_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename=os.environ['ENCODER_TEXT'],
18
  use_auth_token=os.environ['TOKEN'])
19
  print("DEBUG ", encoder_text_path)
 
21
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
22
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
23
 
24
+ # Cacher le nom du repo
25
+ python_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename="models.py", # cacher le nom du fichier !
26
  use_auth_token=os.environ['TOKEN'])
27
  print(python_path)
28
  os.system('ls -la')
 
32
  index_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename=os.environ['INDEX'],
33
  use_auth_token=os.environ['TOKEN'])
34
  indexnames_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename=os.environ['INDEX_NAMES'],
35
+ use_auth_token=os.environ['TOKEN']) #########
36
  catalog_path = hf_hub_download(repo_id="PierreHanna/TextRetrieval", repo_type="space", filename=os.environ['CATALOG'],
37
+ use_auth_token=os.environ['TOKEN']) ###############
38
+ url_dict=get_durl(catalog_path) ############
39
+ audio_names = joblib.load(open(indexnames_path, 'rb')) ############
40
  index = faiss.read_index(index_path)
41
  encoder_text = tf.keras.models.load_model(encoder_text_path)
42
 
 
54
  print(" text representation computed.")
55
 
56
  # Embed text
57
+ embed_query = encoder_text.predict(embed_prompt["pooled_output"]) #######
58
+ faiss.normalize_L2(embed_query)
59
  print(" text embed computed.")
60
 
61
  # distance computing
 
69
  print(audio_names[I[0][i]], " with distance ", D[0][i])
70
  print(" url : ", url_dict[audio_names[I[0][i]]])
71
 
72
+ return [url_dict[audio_names[I[0][0]]], url_dict[audio_names[I[0][1]]], url_dict[audio_names[I[0][2]]], url_dict[audio_names[I[0][3]]], url_dict[audio_names[I[0][4]]]] #######
73
 
74
  inputs = [gr.Textbox(label="Input", value="type your description", max_lines=2),
75
  gr.Radio(label="Language", choices=["en"], value="en")]
 
92
  ["Big Band","en"],
93
  ["90 eurodance beat","en"],
94
  ]
95
+ # cacher ces textes aussi pour pas que le user puisse afficher des choses....
96
+
97
 
98
  outputs = [gr.Audio(label="Track 1"), gr.Audio(label="Track 2"), gr.Audio(label="Track 3"), gr.Audio(label="Track 4"), gr.Audio(label="Track 5")]
99
  demo1 = gr.Interface(fn=process, inputs=inputs, outputs=outputs, examples=poc_examples, cache_examples=False)