missbaj commited on
Commit
eb889cd
·
verified ·
1 Parent(s): fe98e1a
Files changed (1) hide show
  1. app.py +19 -38
app.py CHANGED
@@ -1,43 +1,24 @@
1
  import gradio as gr
2
- import requests
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
- import torch
5
 
6
- # Load the GPT-Neo 1.3B model and tokenizer
7
- tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
8
- model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
 
9
 
10
- # Function to fetch Bitcoin price from CoinGecko API
11
- def fetch_btc_price():
12
- url = "https://api.coingecko.com/api/v3/simple/price"
13
- params = {'ids': 'bitcoin', 'vs_currencies': 'usd'}
14
- response = requests.get(url, params=params)
15
- if response.status_code == 200:
16
- data = response.json()
17
- return data['bitcoin']['usd']
18
- return None
19
 
20
- # Generate crypto insights based on BTC price using GPT-Neo
21
- def generate_crypto_insight():
22
- btc_price = fetch_btc_price()
23
- if btc_price:
24
- prompt = f"Bitcoin's current price is ${btc_price}. What is the future outlook for the cryptocurrency market? Provide advice for long-term investors."
25
- inputs = tokenizer(prompt, return_tensors="pt")
26
- outputs = model.generate(**inputs, max_length=150)
27
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
28
- else:
29
- return "Error fetching Bitcoin price."
30
 
31
- # Define the Gradio interface
32
- def crypto_analysis():
33
- return generate_crypto_insight()
34
-
35
- # Create Gradio interface
36
- gr_interface = gr.Interface(fn=crypto_analysis,
37
- inputs=None,
38
- outputs="text",
39
- title="Real-Time Crypto Analysis",
40
- description="Fetch the current Bitcoin price and get market insights using GPT-Neo.")
41
-
42
- # Launch the app
43
- gr_interface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
3
 
4
+ # Load the smaller model and tokenizer
5
+ model_name = "distilgpt2"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
+ def generate_response(prompt):
10
+ inputs = tokenizer(prompt, return_tensors="pt")
11
+ outputs = model.generate(**inputs, max_length=150, num_return_sequences=1)
12
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
+ return response
 
 
 
 
14
 
15
+ # Set up Gradio interface
16
+ iface = gr.Interface(
17
+ fn=generate_response,
18
+ inputs="text",
19
+ outputs="text",
20
+ title="Crypto Analysis Model",
21
+ description="Enter your prompt related to Bitcoin or cryptocurrency."
22
+ )
 
 
23
 
24
+ iface.launch()