CosmoAI commited on
Commit
dca59c7
·
verified ·
1 Parent(s): e1aba2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +141 -81
app.py CHANGED
@@ -1,34 +1,15 @@
1
  import os
2
  import gradio as gr
3
- from groq import Groq
4
  from dotenv import load_dotenv
5
  import json
6
  from deep_translator import GoogleTranslator
7
  import google.generativeai as genai
8
- load_dotenv()
9
-
10
- api1 = os.getenv("GEMINI_API_KEY")
11
- # api2 = os.getenv("Groq_key")
12
- # api3 = os.getenv("GRoq_key")
13
- # api2 = os.getenv("Groq_key")
14
- # api2 = os.getenv("Groq_key")
15
- # api2 = os.getenv("Groq_key")
16
- # api2 = os.getenv("Groq_key")
17
-
18
- apis = [
19
- api1
20
- ]
21
- # from google import genai
22
-
23
- # client = genai.Client()
24
 
25
- # response = client.models.generate_content(
26
- # model="gemini-2.5-flash",
27
- # contents="Explain how AI works in a few words",
28
- # )
29
-
30
- # print(response.text)
31
 
 
 
 
32
 
33
  def make_call(data):
34
  print(data)
@@ -38,45 +19,124 @@ def make_call(data):
38
  query = items['text']
39
  query = query.lower()
40
  answer = None
 
 
 
 
41
  while True:
42
- for api in apis:
43
- client = Groq(
44
- api_key=api,
45
- ) # Configure the model with the API key
46
- # query = st.text_input("Enter your query")
47
- prmptquery= f"Answer this query in a short message with wisdom, love and compassion, in context to bhagwat geeta, that feels like chatting to a person and provide references of shloks from chapters of bhagwat geeta which is relevant to the query. keep the answer short, precise and simple. Query= {query}"
48
- try:
49
- response = client.chat.completions.create(
50
- messages=[
51
- {
52
- "role": "user",
53
- "content": prmptquery,
54
- }
55
- ],
56
- model="mixtral-8x7b-32768",
57
- )
58
- answer = response.choices[0].message.content
59
- translated = GoogleTranslator(source='auto', target=language).translate(answer)
60
- except Exception as e:
61
- print(f"API call failed for: {e}")
62
- if answer:
63
- break
64
  if answer:
65
- break
 
66
  respo = {
67
- "message": translated,
68
- "action": "nothing",
69
- "function": "nothing",
70
- }
71
  print(translated)
72
  return json.dumps(respo)
73
 
74
-
75
-
76
  gradio_interface = gr.Interface(fn=make_call, inputs="text", outputs="text")
77
  gradio_interface.launch()
78
 
79
- # print(chat_completion)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
 
82
 
@@ -102,38 +162,38 @@ gradio_interface.launch()
102
 
103
 
104
 
105
- # # Text to 3D
106
 
107
- # import streamlit as st
108
- # import torch
109
- # from diffusers import ShapEPipeline
110
- # from diffusers.utils import export_to_gif
111
 
112
- # # Model loading (Ideally done once at the start for efficiency)
113
- # ckpt_id = "openai/shap-e"
114
- # @st.cache_resource # Caches the model for faster subsequent runs
115
- # def load_model():
116
- # return ShapEPipeline.from_pretrained(ckpt_id).to("cuda")
117
 
118
- # pipe = load_model()
119
 
120
- # # App Title
121
- # st.title("Shark 3D Image Generator")
122
 
123
- # # User Inputs
124
- # prompt = st.text_input("Enter your prompt:", "a shark")
125
- # guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5)
126
 
127
- # # Generate and Display Images
128
- # if st.button("Generate"):
129
- # with st.spinner("Generating images..."):
130
- # images = pipe(
131
- # prompt,
132
- # guidance_scale=guidance_scale,
133
- # num_inference_steps=64,
134
- # size=256,
135
- # ).images
136
- # gif_path = export_to_gif(images, "shark_3d.gif")
137
 
138
- # st.image(images[0]) # Display the first image
139
- # st.success("GIF saved as shark_3d.gif")
 
1
  import os
2
  import gradio as gr
 
3
  from dotenv import load_dotenv
4
  import json
5
  from deep_translator import GoogleTranslator
6
  import google.generativeai as genai
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ load_dotenv()
 
 
 
 
 
9
 
10
+ # Configure the Gemini API with your API key
11
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
12
+ genai.configure(api_key=GEMINI_API_KEY)
13
 
14
  def make_call(data):
15
  print(data)
 
19
  query = items['text']
20
  query = query.lower()
21
  answer = None
22
+
23
+ # Use the GenerativeModel class to interact with Gemini
24
+ model = genai.GenerativeModel('gemini-pro') # You can choose a different Gemini model
25
+
26
  while True:
27
+ try:
28
+ # Craft your prompt specific for Gemini and desired output format
29
+ prompt_query = f"Answer this query in a short message with wisdom, love, and compassion, in context to Bhagavad Gita, that feels like chatting to a person and provide references of shlokas from chapters of Bhagavad Gita which are relevant to the query. Keep the answer short, precise, and simple. Query: {query}"
30
+
31
+ # Generate content using Gemini
32
+ response = model.generate_content(prompt_query)
33
+
34
+ # Access the generated text from the response
35
+ answer = response.text # The generated content is in the 'text' attribute
36
+
37
+ # Translate the answer
38
+ translated = GoogleTranslator(source='auto', target=language).translate(answer)
39
+ except Exception as e:
40
+ print(f"API call failed for: {e}")
 
 
 
 
 
 
 
 
41
  if answer:
42
+ break
43
+
44
  respo = {
45
+ "message": translated,
46
+ "action": "nothing",
47
+ "function": "nothing",
48
+ }
49
  print(translated)
50
  return json.dumps(respo)
51
 
 
 
52
  gradio_interface = gr.Interface(fn=make_call, inputs="text", outputs="text")
53
  gradio_interface.launch()
54
 
55
+
56
+
57
+ # import os
58
+ # import gradio as gr
59
+ # from groq import Groq
60
+ # from dotenv import load_dotenv
61
+ # import json
62
+ # from deep_translator import GoogleTranslator
63
+ # import google.generativeai as genai
64
+ # load_dotenv()
65
+
66
+
67
+ # api1 = os.getenv("GEMINI_API_KEY")
68
+ # genai.configure(api_key=api1)
69
+
70
+
71
+ # # api2 = os.getenv("Groq_key")
72
+ # # api3 = os.getenv("GRoq_key")
73
+ # # api2 = os.getenv("Groq_key")
74
+ # # api2 = os.getenv("Groq_key")
75
+ # # api2 = os.getenv("Groq_key")
76
+ # # api2 = os.getenv("Groq_key")
77
+
78
+ # # apis = [
79
+ # # api1
80
+ # # ]
81
+ # # from google import genai
82
+
83
+ # # client = genai.Client()
84
+
85
+ # # response = client.models.generate_content(
86
+ # # model="gemini-2.5-flash",
87
+ # # contents="Explain how AI works in a few words",
88
+ # # )
89
+
90
+ # # print(response.text)
91
+
92
+
93
+ # def make_call(data):
94
+ # print(data)
95
+ # newdata = data.replace("'", '"')
96
+ # items = json.loads(newdata)
97
+ # language = items['lang']
98
+ # query = items['text']
99
+ # query = query.lower()
100
+ # answer = None
101
+ # while True:
102
+ # for api in apis:
103
+ # client = genai.Client(
104
+ # api_key=api,
105
+ # ) # Configure the model with the API key
106
+ # # query = st.text_input("Enter your query")
107
+ # prmptquery= f"Answer this query in a short message with wisdom, love and compassion, in context to bhagwat geeta, that feels like chatting to a person and provide references of shloks from chapters of bhagwat geeta which is relevant to the query. keep the answer short, precise and simple. Query= {query}"
108
+ # try:
109
+ # response = client.chat.completions.create(
110
+ # messages=[
111
+ # {
112
+ # "role": "user",
113
+ # "content": prmptquery,
114
+ # }
115
+ # ],
116
+ # model="mixtral-8x7b-32768",
117
+ # )
118
+ # answer = response.choices[0].message.content
119
+ # translated = GoogleTranslator(source='auto', target=language).translate(answer)
120
+ # except Exception as e:
121
+ # print(f"API call failed for: {e}")
122
+ # if answer:
123
+ # break
124
+ # if answer:
125
+ # break
126
+ # respo = {
127
+ # "message": translated,
128
+ # "action": "nothing",
129
+ # "function": "nothing",
130
+ # }
131
+ # print(translated)
132
+ # return json.dumps(respo)
133
+
134
+
135
+
136
+ # gradio_interface = gr.Interface(fn=make_call, inputs="text", outputs="text")
137
+ # gradio_interface.launch()
138
+
139
+ # # print(chat_completion)
140
 
141
 
142
 
 
162
 
163
 
164
 
165
+ # # # Text to 3D
166
 
167
+ # # import streamlit as st
168
+ # # import torch
169
+ # # from diffusers import ShapEPipeline
170
+ # # from diffusers.utils import export_to_gif
171
 
172
+ # # # Model loading (Ideally done once at the start for efficiency)
173
+ # # ckpt_id = "openai/shap-e"
174
+ # # @st.cache_resource # Caches the model for faster subsequent runs
175
+ # # def load_model():
176
+ # # return ShapEPipeline.from_pretrained(ckpt_id).to("cuda")
177
 
178
+ # # pipe = load_model()
179
 
180
+ # # # App Title
181
+ # # st.title("Shark 3D Image Generator")
182
 
183
+ # # # User Inputs
184
+ # # prompt = st.text_input("Enter your prompt:", "a shark")
185
+ # # guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5)
186
 
187
+ # # # Generate and Display Images
188
+ # # if st.button("Generate"):
189
+ # # with st.spinner("Generating images..."):
190
+ # # images = pipe(
191
+ # # prompt,
192
+ # # guidance_scale=guidance_scale,
193
+ # # num_inference_steps=64,
194
+ # # size=256,
195
+ # # ).images
196
+ # # gif_path = export_to_gif(images, "shark_3d.gif")
197
 
198
+ # # st.image(images[0]) # Display the first image
199
+ # # st.success("GIF saved as shark_3d.gif")