BICORP commited on
Commit
57a0729
·
verified ·
1 Parent(s): d4ad2a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -40
app.py CHANGED
@@ -1,45 +1,19 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- import requests
4
 
5
- # Use the text-generation task
6
- standard_model = pipeline("text-generation", model="google/mt5-base")
7
- premium_model = pipeline("text-generation", model="google/mt5-base")
8
 
9
- # Your Boosty API key or any method of checking subscriptions
10
- BOOSTY_API_KEY = "YOUR_BOOSTY_API_KEY"
11
- BOOSTY_API_URL = "https://boosty.to/api/subscription_check" # Replace with the actual Boosty API URL
 
 
 
12
 
13
- # Function to check if a user is a premium member on Boosty
14
- def is_premium_user(boosty_user_id: str) -> bool:
15
- response = requests.get(f"{BOOSTY_API_URL}?user_id={boosty_user_id}", headers={"Authorization": f"Bearer {BOOSTY_API_KEY}"})
16
-
17
- if response.status_code != 200:
18
- return False # Assuming non-200 response means not premium
19
-
20
- # Assuming the response contains a field like 'is_premium': True/False
21
- return response.json().get("is_premium", False)
22
 
23
- # Chat function to use the correct model based on Boosty subscription
24
- def chat_with_ai(user_input, boosty_user_id):
25
- # Check if the user is a premium member
26
- if is_premium_user(boosty_user_id):
27
- model_to_use = premium_model
28
- else:
29
- model_to_use = standard_model
30
-
31
- # Get AI response
32
- conversation = model_to_use(user_input)
33
- return conversation[0]["generated_text"]
34
-
35
- # Create the Gradio interface
36
- iface = gr.Interface(
37
- fn=chat_with_ai,
38
- inputs=["text", gr.Textbox(label="Boosty User ID")],
39
- outputs="text",
40
- live=True,
41
- title="AI Chatbot"
42
- )
43
-
44
- # Launch the Gradio app as an API
45
- iface.launch(server_name="0.0.0.0", server_port=7860, share=True)
 
1
  import gradio as gr
2
+ from transformers import T5Tokenizer, T5ForConditionalGeneration, pipeline
 
3
 
4
+ # Load the model and tokenizer
5
+ tokenizer = T5Tokenizer.from_pretrained("google/mt5-base")
6
+ model = T5ForConditionalGeneration.from_pretrained("google/mt5-base")
7
 
8
+ # Set up the Gradio interface for text generation
9
+ def generate_text(prompt):
10
+ # Use the tokenizer and model to generate text
11
+ inputs = tokenizer(prompt, return_tensors="pt")
12
+ outputs = model.generate(inputs["input_ids"], max_length=50, num_return_sequences=1)
13
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
 
15
+ # Create a Gradio interface
16
+ interface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
 
 
 
 
 
 
 
17
 
18
+ # Launch the interface
19
+ interface.launch()