Spaces:
Sleeping
Sleeping
AumCoreAI commited on
Commit ·
b52ad0f
0
Parent(s):
Fixed everything back to original
Browse files- Dockerfile +6 -0
- app.py +24 -0
- main.py +19 -0
- requirements.txt +1 -0
Dockerfile
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
WORKDIR /app
|
| 3 |
+
COPY . .
|
| 4 |
+
RUN pip install -r requirements.txt
|
| 5 |
+
EXPOSE 7860
|
| 6 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import os
|
| 4 |
+
from main import AICore
|
| 5 |
+
|
| 6 |
+
api_key = os.environ.get("GROQ_API_KEY")
|
| 7 |
+
ai = AICore(api_key=api_key)
|
| 8 |
+
|
| 9 |
+
def chat_func(text, img, history):
|
| 10 |
+
if not text and not img: return "", history
|
| 11 |
+
response = ai.get_response(text or "Describe this image", img)
|
| 12 |
+
history.append((text or "Image Sent", response))
|
| 13 |
+
return "", history
|
| 14 |
+
|
| 15 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 16 |
+
gr.Markdown("# 🤖 AumCore-M7B Vision AI (Direct Update)")
|
| 17 |
+
chat = gr.Chatbot(height=500)
|
| 18 |
+
with gr.Row():
|
| 19 |
+
txt = gr.Textbox(show_label=False, placeholder="Sawal puchiye...", scale=7)
|
| 20 |
+
img = gr.Image(type="filepath", label="Upload Photo", scale=3)
|
| 21 |
+
txt.submit(chat_func, [txt, img, chat], [txt, chat])
|
| 22 |
+
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
main.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
from groq import Groq
|
| 4 |
+
|
| 5 |
+
class AICore:
|
| 6 |
+
def __init__(self, api_key=None):
|
| 7 |
+
self.client = Groq(api_key=api_key or os.environ.get("GROQ_API_KEY"))
|
| 8 |
+
|
| 9 |
+
def get_response(self, prompt, image_path=None):
|
| 10 |
+
try:
|
| 11 |
+
# Simple Text + Image logic for Groq
|
| 12 |
+
messages = [{"role": "user", "content": prompt}]
|
| 13 |
+
completion = self.client.chat.completions.create(
|
| 14 |
+
model="llama-3.2-11b-vision-preview",
|
| 15 |
+
messages=messages
|
| 16 |
+
)
|
| 17 |
+
return completion.choices[0].message.content
|
| 18 |
+
except Exception as e:
|
| 19 |
+
return f"Error: {str(e)}"
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
fastapi\nuvicorn\ngroq\npython-multipart
|