BICORP commited on
Commit
5b99dbd
·
verified ·
1 Parent(s): 2de9f3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -48
app.py CHANGED
@@ -3,7 +3,7 @@ from huggingface_hub import InferenceClient
3
  import time
4
  from collections import defaultdict
5
 
6
- # Model definitions
7
  model_value = {
8
  "Lake 1": "google/mt5-base",
9
  "Lake 1 Flash": "google/gemma-2-2b-it",
@@ -17,11 +17,11 @@ access_codes = {
17
  "8tj82-2UvU-8Lft-Dupb": "plus"
18
  }
19
 
20
- # Model access levels
21
  model_access_levels = {
22
  "google/mt5-base": "everyone",
23
- "google/gemma-2-2b-it": "plus", # Corrected to a string
24
- "google/mt5-large": "plus", # Corrected to a string
25
  "google-bert/bert-base-multilingual-cased": "pro"
26
  }
27
 
@@ -38,69 +38,71 @@ def recommend_model(current_model, access_level):
38
  return "Consider upgrading to Lake 1 Pro for advanced features."
39
  return None
40
 
41
- def can_use_flash_model(user_id):
42
  current_time = time.time()
43
- usage_tracker[user_id] = [t for t in usage_tracker[user_id] if current_time - t < 5 * 3600]
 
44
 
45
- if len(usage_tracker[user_id]) < 20:
46
- usage_tracker[user_id].append(current_time)
47
- return True
48
- else:
49
- return False
 
 
 
50
 
51
- def respond(message, history, model, access_level):
52
- # Build the message history as a single string
53
- full_message = ""
54
  for val in history:
55
  if val[0]:
56
- full_message += f":User {val[0]}\n" # User message
57
  if val[1]:
58
- full_message += f"Assistant: {val[1]}\n" # Assistant response
59
 
60
- # Add the current user message
61
- full_message += f":User {message}\n"
62
 
63
  client = InferenceClient(model)
64
  response = ""
65
- try:
66
- # Call the chat completion method with the correct format
67
- response_message = client.chat_completion(
68
- model=model,
69
- inputs=full_message, # Use the concatenated string
70
- max_tokens=512,
71
- temperature=0.7,
72
- top_p=0.95,
73
- stream=False # Disable streaming
74
- )
75
- response = response_message['choices'][0]['text'] # Access the content directly
76
- except Exception as e:
77
- return f"Error during model inference: {str(e)}", history
78
 
79
  history.append((message, response))
80
  return response, history
81
 
82
- def main(message, history, model_name, access_code, user_id):
83
  model = model_value[model_name]
84
  access_level = check_access_code(access_code)
85
 
 
86
  if model == model_value["Lake 1 Flash"]:
87
- if not can_use_flash_model(user_id):
88
  return "Usage limit reached for Lake 1 Flash. Please try again later.", history
89
 
90
- if model == model_value["Lake 1"]:
91
- return respond(message, history, model, "everyone")
92
- elif access_level:
93
- required_access_level = model_access_levels.get(model, None)
94
- if access_level == required_access_level:
95
- return respond(message, history, model, access_level)
96
- else:
97
- recommendation = recommend_model(model_name, access_level)
98
- if recommendation:
99
- return f"You do not have access to the {model_name}. {recommendation} Please enter a valid access code for this model.", history
100
- else:
101
- return f"You do not have access to the {model_name}. Please enter a valid access code for this model.", history
 
 
 
102
  else:
103
- return "Invalid access code. Please enter a valid code to use this service .", history
 
 
104
 
105
  with gr.Blocks() as demo:
106
  gr.Markdown("## Welcome to the Model Interaction App")
@@ -109,7 +111,6 @@ with gr.Blocks() as demo:
109
  with gr.Row():
110
  with gr.Column():
111
  access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
112
- user_id_input = gr.Textbox(label="User ID", placeholder="Enter your user ID")
113
 
114
  with gr.Row():
115
  with gr.Column():
@@ -128,7 +129,7 @@ with gr.Blocks() as demo:
128
 
129
  submit_button.click(
130
  fn=main,
131
- inputs=[message_input, history, model_dropdown, access_code_input, user_id_input],
132
  outputs=[response_output, history]
133
  )
134
 
 
3
  import time
4
  from collections import defaultdict
5
 
6
+ # Model definitions with real models
7
  model_value = {
8
  "Lake 1": "google/mt5-base",
9
  "Lake 1 Flash": "google/gemma-2-2b-it",
 
17
  "8tj82-2UvU-8Lft-Dupb": "plus"
18
  }
19
 
20
+ # Model access levels with real models
21
  model_access_levels = {
22
  "google/mt5-base": "everyone",
23
+ "google/gemma-2-2b-it": "plus",
24
+ "google/mt5-large": "plus",
25
  "google-bert/bert-base-multilingual-cased": "pro"
26
  }
27
 
 
38
  return "Consider upgrading to Lake 1 Pro for advanced features."
39
  return None
40
 
41
+ def can_use_flash_model():
42
  current_time = time.time()
43
+ for user in usage_tracker.keys():
44
+ usage_tracker[user] = [t for t in usage_tracker[user] if current_time - t < 5 * 3600]
45
 
46
+ for user in usage_tracker.keys():
47
+ if len(usage_tracker[user]) < 20:
48
+ usage_tracker[user].append(current_time)
49
+ return True
50
+ return False
51
+
52
+ def respond(message, history, model):
53
+ messages = []
54
 
 
 
 
55
  for val in history:
56
  if val[0]:
57
+ messages.append({"role": "user", "content": val[0]})
58
  if val[1]:
59
+ messages.append({"role": "assistant", "content": val[1]})
60
 
61
+ messages.append({"role": "user", "content": message})
 
62
 
63
  client = InferenceClient(model)
64
  response = ""
65
+ for message in client.chat_completion(
66
+ messages,
67
+ max_tokens=512,
68
+ stream=True,
69
+ temperature=0.7,
70
+ top_p=0.95,
71
+ ):
72
+ token = message.choices[0].delta.content
73
+ response += token
 
 
 
 
74
 
75
  history.append((message, response))
76
  return response, history
77
 
78
+ def main(message, history, model_name, access_code):
79
  model = model_value[model_name]
80
  access_level = check_access_code(access_code)
81
 
82
+ # Check if the model is Lake 1 Flash
83
  if model == model_value["Lake 1 Flash"]:
84
+ if not can_use_flash_model():
85
  return "Usage limit reached for Lake 1 Flash. Please try again later.", history
86
 
87
+ # Get the required access level for the selected model
88
+ required_access_level = model_access_levels.get(model, None)
89
+
90
+ # Allow access to Lake 1 for everyone
91
+ if required_access_level == "everyone":
92
+ return respond(message, history, model)
93
+
94
+ # Check access for other models
95
+ if access_level == "pro" or (access_level == "plus" and required_access_level in ["plus", "everyone"]):
96
+ return respond(message, history, model)
97
+
98
+ # If the user does not have the required access, provide a recommendation
99
+ recommendation = recommend_model(model_name, access_level)
100
+ if recommendation:
101
+ return f"You do not have access to the {model_name}. {recommendation} Please enter a valid access code for this model.", history
102
  else:
103
+ return f"You do not have access to the {model_name}. Please enter a valid access code for this model.", history
104
+
105
+ return "Invalid access code. Please enter a valid code to use this service.", history
106
 
107
  with gr.Blocks() as demo:
108
  gr.Markdown("## Welcome to the Model Interaction App")
 
111
  with gr.Row():
112
  with gr.Column():
113
  access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
 
114
 
115
  with gr.Row():
116
  with gr.Column():
 
129
 
130
  submit_button.click(
131
  fn=main,
132
+ inputs=[message_input, history, model_dropdown, access_code_input],
133
  outputs=[response_output, history]
134
  )
135