Spaces:
Sleeping
Sleeping
Gaurav vashistha
commited on
Commit
·
e0ea972
1
Parent(s):
0a9c963
feat: Production deployment release
Browse files- .dockerignore +9 -0
- .gitignore +6 -0
- Dockerfile +9 -0
- README.md +32 -1
- continuity_agent/agent.py +2 -2
- server.py +10 -25
- stitch_continuity_dashboard/code.html +7 -26
- tests/create_assets.py +23 -0
- tests/test_api.py +34 -0
.dockerignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
| 2 |
+
*.pyc
|
| 3 |
+
.git
|
| 4 |
+
.env
|
| 5 |
+
venv
|
| 6 |
+
.venv
|
| 7 |
+
node_modules
|
| 8 |
+
.DS_Store
|
| 9 |
+
outputs/
|
.gitignore
CHANGED
|
@@ -1,3 +1,9 @@
|
|
| 1 |
.env
|
| 2 |
__pycache__
|
| 3 |
venv/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
.env
|
| 2 |
__pycache__
|
| 3 |
venv/
|
| 4 |
+
.venv/
|
| 5 |
+
outputs/
|
| 6 |
+
*.pyc
|
| 7 |
+
.DS_Store
|
| 8 |
+
*.mp4
|
| 9 |
+
*.png
|
Dockerfile
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
WORKDIR /app
|
| 3 |
+
RUN apt-get update && apt-get install -y libgl1-mesa-glx libglib2.0-0 && rm -rf /var/lib/apt/lists/*
|
| 4 |
+
COPY requirements.txt .
|
| 5 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 6 |
+
COPY . .
|
| 7 |
+
RUN mkdir -p outputs && chmod 777 outputs
|
| 8 |
+
EXPOSE 7860
|
| 9 |
+
CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1 +1,32 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Continuity
|
| 3 |
+
emoji: 🎬
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: gray
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# Continuity
|
| 11 |
+
|
| 12 |
+
AI-powered video post-production tool that automatically generates seamless bridging transitions between two video clips using Gemini 1.5 Flash (Director) and Wan 2.2 (Cinematographer).
|
| 13 |
+
|
| 14 |
+
## How it Works
|
| 15 |
+
|
| 16 |
+
Continuity behaves like a professional film crew inside your browser, orchestrating a multi-step workflow to bridge scenes:
|
| 17 |
+
|
| 18 |
+
1. **Ingest**: You upload your starting clip (Scene A) and ending clip (Scene C).
|
| 19 |
+
2. **Director Node (Gemini 1.5 Flash)**:
|
| 20 |
+
* Analyzes the visual content, motion, and lighting of both clips.
|
| 21 |
+
* Drafts a creative direction for the transition (Scene B) to ensure narrative consistency.
|
| 22 |
+
3. **Cinematographer Node (Wan 2.2)**:
|
| 23 |
+
* Takes the Director's prompt and generates the bridging video content.
|
| 24 |
+
* Ensures the transition matches the style and tone of the input footage.
|
| 25 |
+
4. **Assembly**: The system stitches the generated bridge with the original clips (logic handled by the agent) and presents the final result.
|
| 26 |
+
|
| 27 |
+
## Usage
|
| 28 |
+
|
| 29 |
+
1. Upload **Scene A** (Start) and **Scene C** (End).
|
| 30 |
+
2. Add optional **Director Notes** (e.g., "Slow dissolve", "Cyber glitch").
|
| 31 |
+
3. Click **Generate Transition**.
|
| 32 |
+
4. Preview the generated bridge generated by the AI agent.
|
continuity_agent/agent.py
CHANGED
|
@@ -29,7 +29,7 @@ class ContinuityState(TypedDict):
|
|
| 29 |
|
| 30 |
# Node 1: Analyst
|
| 31 |
def analyze_videos(state: ContinuityState) -> dict:
|
| 32 |
-
print("---
|
| 33 |
|
| 34 |
video_a_url = state['video_a_url']
|
| 35 |
video_c_url = state['video_c_url']
|
|
@@ -103,7 +103,7 @@ def analyze_videos(state: ContinuityState) -> dict:
|
|
| 103 |
|
| 104 |
# Node 2: Generator (Wan 2.2 First Last Frame)
|
| 105 |
def generate_video(state: ContinuityState) -> dict:
|
| 106 |
-
print("---
|
| 107 |
|
| 108 |
prompt = state.get('veo_prompt', "")
|
| 109 |
path_a = state.get('video_a_local_path')
|
|
|
|
| 29 |
|
| 30 |
# Node 1: Analyst
|
| 31 |
def analyze_videos(state: ContinuityState) -> dict:
|
| 32 |
+
print("--- Analyst Node (Director) ---")
|
| 33 |
|
| 34 |
video_a_url = state['video_a_url']
|
| 35 |
video_c_url = state['video_c_url']
|
|
|
|
| 103 |
|
| 104 |
# Node 2: Generator (Wan 2.2 First Last Frame)
|
| 105 |
def generate_video(state: ContinuityState) -> dict:
|
| 106 |
+
print("--- Generator Node (Wan 2.2) ---")
|
| 107 |
|
| 108 |
prompt = state.get('veo_prompt', "")
|
| 109 |
path_a = state.get('video_a_local_path')
|
server.py
CHANGED
|
@@ -1,17 +1,15 @@
|
|
| 1 |
from fastapi import FastAPI, HTTPException, UploadFile, Form, File
|
| 2 |
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
from fastapi.staticfiles import StaticFiles
|
|
|
|
| 4 |
import uvicorn
|
| 5 |
import os
|
| 6 |
import shutil
|
| 7 |
import uuid
|
| 8 |
-
|
| 9 |
-
# Import from the subpackage as before
|
| 10 |
from continuity_agent.agent import app as continuity_graph
|
| 11 |
|
| 12 |
app = FastAPI(title="Continuity", description="AI Video Bridging Service")
|
| 13 |
|
| 14 |
-
# 1. Enable CORS
|
| 15 |
app.add_middleware(
|
| 16 |
CORSMiddleware,
|
| 17 |
allow_origins=["*"],
|
|
@@ -20,11 +18,14 @@ app.add_middleware(
|
|
| 20 |
allow_headers=["*"],
|
| 21 |
)
|
| 22 |
|
| 23 |
-
# 2. Setup Static Files for Outputs
|
| 24 |
OUTPUT_DIR = "outputs"
|
| 25 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 26 |
app.mount("/outputs", StaticFiles(directory=OUTPUT_DIR), name="outputs")
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
@app.post("/generate-transition")
|
| 29 |
async def generate_transition(
|
| 30 |
video_a: UploadFile = File(...),
|
|
@@ -32,29 +33,21 @@ async def generate_transition(
|
|
| 32 |
prompt: str = Form("Cinematic transition")
|
| 33 |
):
|
| 34 |
try:
|
| 35 |
-
# Generate unique ID for this request
|
| 36 |
request_id = str(uuid.uuid4())
|
| 37 |
-
|
| 38 |
-
# Save inputs
|
| 39 |
-
# Preserve extension
|
| 40 |
ext_a = os.path.splitext(video_a.filename)[1] or ".mp4"
|
| 41 |
ext_c = os.path.splitext(video_c.filename)[1] or ".mp4"
|
| 42 |
-
|
| 43 |
path_a = os.path.join(OUTPUT_DIR, f"{request_id}_a{ext_a}")
|
| 44 |
path_c = os.path.join(OUTPUT_DIR, f"{request_id}_c{ext_c}")
|
| 45 |
|
| 46 |
with open(path_a, "wb") as buffer:
|
| 47 |
shutil.copyfileobj(video_a.file, buffer)
|
| 48 |
-
|
| 49 |
with open(path_c, "wb") as buffer:
|
| 50 |
shutil.copyfileobj(video_c.file, buffer)
|
| 51 |
|
| 52 |
-
# Initialize State with LOCAL PATHS
|
| 53 |
-
# We don't need URLs for the new logic, but state definition might map them.
|
| 54 |
-
# agent.py logic checks video_a_local_path if present.
|
| 55 |
initial_state = {
|
| 56 |
-
"video_a_url": "local_upload",
|
| 57 |
-
"video_c_url": "local_upload",
|
| 58 |
"user_notes": prompt,
|
| 59 |
"veo_prompt": prompt,
|
| 60 |
"video_a_local_path": os.path.abspath(path_a),
|
|
@@ -63,28 +56,20 @@ async def generate_transition(
|
|
| 63 |
"status": "started"
|
| 64 |
}
|
| 65 |
|
| 66 |
-
# Invoke Agent
|
| 67 |
result = continuity_graph.invoke(initial_state)
|
| 68 |
-
|
| 69 |
-
# The agent returns 'generated_video_url' which is a local absolute path (e.g., from tempfile or cache)
|
| 70 |
-
# We need to copy/move this to our STATIC directory to serve it.
|
| 71 |
gen_path = result.get("generated_video_url")
|
| 72 |
|
| 73 |
if not gen_path or "Error" in gen_path:
|
| 74 |
raise HTTPException(status_code=500, detail=f"Generation failed: {gen_path}")
|
| 75 |
|
| 76 |
-
# Copy generated video to outputs
|
| 77 |
final_filename = f"{request_id}_bridge.mp4"
|
| 78 |
final_output_path = os.path.join(OUTPUT_DIR, final_filename)
|
|
|
|
| 79 |
|
| 80 |
-
shutil.copy(gen_path, final_output_path)
|
| 81 |
-
|
| 82 |
-
# Return URL relative to server root
|
| 83 |
return {"video_url": f"/outputs/{final_filename}"}
|
| 84 |
-
|
| 85 |
except Exception as e:
|
| 86 |
print(f"Server Error: {e}")
|
| 87 |
raise HTTPException(status_code=500, detail=str(e))
|
| 88 |
|
| 89 |
if __name__ == "__main__":
|
| 90 |
-
uvicorn.run("server:app", host="0.0.0.0", port=
|
|
|
|
| 1 |
from fastapi import FastAPI, HTTPException, UploadFile, Form, File
|
| 2 |
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
from fastapi.staticfiles import StaticFiles
|
| 4 |
+
from fastapi.responses import FileResponse
|
| 5 |
import uvicorn
|
| 6 |
import os
|
| 7 |
import shutil
|
| 8 |
import uuid
|
|
|
|
|
|
|
| 9 |
from continuity_agent.agent import app as continuity_graph
|
| 10 |
|
| 11 |
app = FastAPI(title="Continuity", description="AI Video Bridging Service")
|
| 12 |
|
|
|
|
| 13 |
app.add_middleware(
|
| 14 |
CORSMiddleware,
|
| 15 |
allow_origins=["*"],
|
|
|
|
| 18 |
allow_headers=["*"],
|
| 19 |
)
|
| 20 |
|
|
|
|
| 21 |
OUTPUT_DIR = "outputs"
|
| 22 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 23 |
app.mount("/outputs", StaticFiles(directory=OUTPUT_DIR), name="outputs")
|
| 24 |
|
| 25 |
+
@app.get("/")
|
| 26 |
+
async def read_root():
|
| 27 |
+
return FileResponse("stitch_continuity_dashboard/code.html")
|
| 28 |
+
|
| 29 |
@app.post("/generate-transition")
|
| 30 |
async def generate_transition(
|
| 31 |
video_a: UploadFile = File(...),
|
|
|
|
| 33 |
prompt: str = Form("Cinematic transition")
|
| 34 |
):
|
| 35 |
try:
|
|
|
|
| 36 |
request_id = str(uuid.uuid4())
|
|
|
|
|
|
|
|
|
|
| 37 |
ext_a = os.path.splitext(video_a.filename)[1] or ".mp4"
|
| 38 |
ext_c = os.path.splitext(video_c.filename)[1] or ".mp4"
|
| 39 |
+
|
| 40 |
path_a = os.path.join(OUTPUT_DIR, f"{request_id}_a{ext_a}")
|
| 41 |
path_c = os.path.join(OUTPUT_DIR, f"{request_id}_c{ext_c}")
|
| 42 |
|
| 43 |
with open(path_a, "wb") as buffer:
|
| 44 |
shutil.copyfileobj(video_a.file, buffer)
|
|
|
|
| 45 |
with open(path_c, "wb") as buffer:
|
| 46 |
shutil.copyfileobj(video_c.file, buffer)
|
| 47 |
|
|
|
|
|
|
|
|
|
|
| 48 |
initial_state = {
|
| 49 |
+
"video_a_url": "local_upload",
|
| 50 |
+
"video_c_url": "local_upload",
|
| 51 |
"user_notes": prompt,
|
| 52 |
"veo_prompt": prompt,
|
| 53 |
"video_a_local_path": os.path.abspath(path_a),
|
|
|
|
| 56 |
"status": "started"
|
| 57 |
}
|
| 58 |
|
|
|
|
| 59 |
result = continuity_graph.invoke(initial_state)
|
|
|
|
|
|
|
|
|
|
| 60 |
gen_path = result.get("generated_video_url")
|
| 61 |
|
| 62 |
if not gen_path or "Error" in gen_path:
|
| 63 |
raise HTTPException(status_code=500, detail=f"Generation failed: {gen_path}")
|
| 64 |
|
|
|
|
| 65 |
final_filename = f"{request_id}_bridge.mp4"
|
| 66 |
final_output_path = os.path.join(OUTPUT_DIR, final_filename)
|
| 67 |
+
shutil.move(gen_path, final_output_path)
|
| 68 |
|
|
|
|
|
|
|
|
|
|
| 69 |
return {"video_url": f"/outputs/{final_filename}"}
|
|
|
|
| 70 |
except Exception as e:
|
| 71 |
print(f"Server Error: {e}")
|
| 72 |
raise HTTPException(status_code=500, detail=str(e))
|
| 73 |
|
| 74 |
if __name__ == "__main__":
|
| 75 |
+
uvicorn.run("server:app", host="0.0.0.0", port=7860, reload=False)
|
stitch_continuity_dashboard/code.html
CHANGED
|
@@ -173,7 +173,7 @@
|
|
| 173 |
Bridge</span>
|
| 174 |
</div>
|
| 175 |
<!-- Active Card Container -->
|
| 176 |
-
<div
|
| 177 |
class="relative w-full aspect-[4/3] rounded-2xl overflow-hidden shadow-neon border border-primary/30 group">
|
| 178 |
<!-- Placeholder Gradient Background -->
|
| 179 |
<div class="absolute inset-0 bg-gradient-to-br from-surface-dark to-[#0f0a16] z-0"></div>
|
|
@@ -218,7 +218,7 @@
|
|
| 218 |
</div>
|
| 219 |
<!-- SCENE C: Upload State -->
|
| 220 |
<div class="flex flex-col gap-4 flex-1 max-w-[320px] group">
|
| 221 |
-
<div class="flex items-center justify-
|
| 222 |
<span class="text-xs font-bold tracking-widest text-primary">SCENE C</span>
|
| 223 |
<span class="text-xs font-bold tracking-widest text-gray-400 uppercase">Target Source</span>
|
| 224 |
</div>
|
|
@@ -299,7 +299,7 @@
|
|
| 299 |
|
| 300 |
try {
|
| 301 |
// 3. Send to Server
|
| 302 |
-
const response = await fetch("
|
| 303 |
method: "POST",
|
| 304 |
body: formData
|
| 305 |
});
|
|
@@ -309,32 +309,13 @@
|
|
| 309 |
const data = await response.json();
|
| 310 |
|
| 311 |
// 4. Success: Inject Video into the Middle Card
|
| 312 |
-
|
| 313 |
-
const bridgeCard = document.querySelector(".aspect-\\[4\\/3\\]");
|
| 314 |
-
// Find the middle card specifically if possible, relying on the class is risky if multiple match.
|
| 315 |
-
// But in this layout, the middle one is distinct or we can target by structure.
|
| 316 |
-
// The middle card is the second .aspect-[4/3] roughly, but the user script uses querySelector which matches the first one?
|
| 317 |
-
// Wait, the Code.html has 3 cards.
|
| 318 |
-
// Scene A: .aspect-[4/3]
|
| 319 |
-
// Bridge: .aspect-[4/3] (line 148)
|
| 320 |
-
// Scene C: .aspect-[4/3]
|
| 321 |
-
|
| 322 |
-
// User script used document.querySelector(".aspect-\\[4\\/3\\]").
|
| 323 |
-
// This matches the first one (Scene A)! That's a bug in the user's snippet.
|
| 324 |
-
// I will fix it to target the BRIDGE card.
|
| 325 |
-
// The bridge card container has text "The Bridge".
|
| 326 |
-
// I'll add an ID to the bridge container or select by index.
|
| 327 |
-
// Safer to select the middle one: document.querySelectorAll(".aspect-\\[4\\/3\\]")[1]
|
| 328 |
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
if (cards.length >= 2) {
|
| 332 |
-
// Scene A is [0], Bridge is [1], Scene C is [2]
|
| 333 |
-
const bridgeCard = cards[1];
|
| 334 |
-
// Ensure we are replacing the inner content
|
| 335 |
bridgeCard.innerHTML = `
|
| 336 |
<video controls autoplay loop class="w-full h-full object-cover rounded-2xl border-2 border-primary shadow-neon">
|
| 337 |
-
<source src="
|
| 338 |
Your browser does not support the video tag.
|
| 339 |
</video>
|
| 340 |
`;
|
|
|
|
| 173 |
Bridge</span>
|
| 174 |
</div>
|
| 175 |
<!-- Active Card Container -->
|
| 176 |
+
<div id="bridge-card"
|
| 177 |
class="relative w-full aspect-[4/3] rounded-2xl overflow-hidden shadow-neon border border-primary/30 group">
|
| 178 |
<!-- Placeholder Gradient Background -->
|
| 179 |
<div class="absolute inset-0 bg-gradient-to-br from-surface-dark to-[#0f0a16] z-0"></div>
|
|
|
|
| 218 |
</div>
|
| 219 |
<!-- SCENE C: Upload State -->
|
| 220 |
<div class="flex flex-col gap-4 flex-1 max-w-[320px] group">
|
| 221 |
+
<div class="flex items-center justify-center px-1">
|
| 222 |
<span class="text-xs font-bold tracking-widest text-primary">SCENE C</span>
|
| 223 |
<span class="text-xs font-bold tracking-widest text-gray-400 uppercase">Target Source</span>
|
| 224 |
</div>
|
|
|
|
| 299 |
|
| 300 |
try {
|
| 301 |
// 3. Send to Server
|
| 302 |
+
const response = await fetch("/generate-transition", {
|
| 303 |
method: "POST",
|
| 304 |
body: formData
|
| 305 |
});
|
|
|
|
| 309 |
const data = await response.json();
|
| 310 |
|
| 311 |
// 4. Success: Inject Video into the Middle Card
|
| 312 |
+
const bridgeCard = document.getElementById("bridge-card");
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
|
| 314 |
+
if (bridgeCard) {
|
| 315 |
+
// Ensure we are replacing the inner content completely
|
|
|
|
|
|
|
|
|
|
|
|
|
| 316 |
bridgeCard.innerHTML = `
|
| 317 |
<video controls autoplay loop class="w-full h-full object-cover rounded-2xl border-2 border-primary shadow-neon">
|
| 318 |
+
<source src="${data.video_url}" type="video/mp4">
|
| 319 |
Your browser does not support the video tag.
|
| 320 |
</video>
|
| 321 |
`;
|
tests/create_assets.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
def make_video(filename, color):
|
| 6 |
+
height, width = 480, 640
|
| 7 |
+
# Use 'mp4v' for .mp4 containers
|
| 8 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 9 |
+
out = cv2.VideoWriter(filename, fourcc, 30.0, (width, height))
|
| 10 |
+
|
| 11 |
+
# Create a frame with the solid color
|
| 12 |
+
frame = np.zeros((height, width, 3), dtype=np.uint8)
|
| 13 |
+
frame[:] = color # BGR format
|
| 14 |
+
|
| 15 |
+
# Write 30 frames (1 second)
|
| 16 |
+
for _ in range(30):
|
| 17 |
+
out.write(frame)
|
| 18 |
+
out.release()
|
| 19 |
+
print(f"Created {filename}")
|
| 20 |
+
|
| 21 |
+
if __name__ == "__main__":
|
| 22 |
+
make_video('tests/scene_a.mp4', (255, 0, 0)) # Blue
|
| 23 |
+
make_video('tests/scene_c.mp4', (0, 0, 255)) # Red
|
tests/test_api.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
def test_api():
|
| 6 |
+
url = "http://127.0.0.1:8000/generate-transition"
|
| 7 |
+
|
| 8 |
+
video_a_path = 'tests/scene_a.mp4'
|
| 9 |
+
video_c_path = 'tests/scene_c.mp4'
|
| 10 |
+
|
| 11 |
+
if not os.path.exists(video_a_path) or not os.path.exists(video_c_path):
|
| 12 |
+
print("Error: Test videos not found. Run create_assets.py first.")
|
| 13 |
+
sys.exit(1)
|
| 14 |
+
|
| 15 |
+
files = {
|
| 16 |
+
'video_a': ('scene_a.mp4', open(video_a_path, 'rb'), 'video/mp4'),
|
| 17 |
+
'video_c': ('scene_c.mp4', open(video_c_path, 'rb'), 'video/mp4')
|
| 18 |
+
}
|
| 19 |
+
data = {'prompt': 'Test transition'}
|
| 20 |
+
|
| 21 |
+
print(f"Sending POST request to {url}...")
|
| 22 |
+
try:
|
| 23 |
+
response = requests.post(url, files=files, data=data)
|
| 24 |
+
print(f"Status Code: {response.status_code}")
|
| 25 |
+
if response.status_code == 200:
|
| 26 |
+
print(f"Success! Response: {response.json()}")
|
| 27 |
+
else:
|
| 28 |
+
print(f"Failed. Response: {response.text}")
|
| 29 |
+
except Exception as e:
|
| 30 |
+
print(f"Connection Failed: {e}")
|
| 31 |
+
print("Make sure the server is running on port 8000.")
|
| 32 |
+
|
| 33 |
+
if __name__ == "__main__":
|
| 34 |
+
test_api()
|