Upload 10 files
Browse files- .gitattributes +1 -0
- 0TqQje61Hoo.3.1.mp4 +3 -0
- README.md +89 -19
- README2.md +95 -0
- app.py +128 -0
- download_video.py +18 -0
- get_videos.py +12 -0
- logo.png +0 -0
- movement_detector.py +49 -0
- requirements.txt +5 -3
- test_dataset.py +5 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
0TqQje61Hoo.3.1.mp4 filter=lfs diff=lfs merge=lfs -text
|
0TqQje61Hoo.3.1.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e43dc88426637b14fd369705158d3eafbd186137cdd47a7df3c5ee37302d0ba0
|
| 3 |
+
size 2052375
|
README.md
CHANGED
|
@@ -1,19 +1,89 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 📷 Camera Movement Detector
|
| 2 |
+
|
| 3 |
+
This project was developed as part of the **ATP Core Talent AI Coder Challenge 2025**.
|
| 4 |
+
It is a Streamlit-based web application that analyzes uploaded videos and detects **significant camera movements** such as panning, tilting, or shaking.
|
| 5 |
+
It does **not** focus on object motion — instead, it detects changes in the global scene indicating that the camera itself moved.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## 📷 Screenshot
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
## 🚀 Live App
|
| 14 |
+
|
| 15 |
+
🔗 [Click here to try the app online](https://your-deployment-url.com)
|
| 16 |
+
(Replace this with your Streamlit or Hugging Face link after deployment)
|
| 17 |
+
|
| 18 |
+
---
|
| 19 |
+
|
| 20 |
+
## 🎯 Features
|
| 21 |
+
|
| 22 |
+
- 📼 Upload a video (MP4, AVI, MOV)
|
| 23 |
+
- 🎞️ Extracts and analyzes frames
|
| 24 |
+
- 🎯 Detects significant camera movement (not object motion)
|
| 25 |
+
- 💡 Clean, modern UI with ATP branding
|
| 26 |
+
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
## 🧠 Approach
|
| 30 |
+
|
| 31 |
+
- **Frame Extraction:** Extracts all frames from the video using OpenCV.
|
| 32 |
+
- **Feature Detection & Matching:** ORB feature detector is used to compare consecutive frames.
|
| 33 |
+
- **Homography Transformation:** Calculates transformation matrix to estimate global scene shift.
|
| 34 |
+
- **Movement Decision:** If the transformation deviates above a threshold, it is classified as camera movement.
|
| 35 |
+
- **Bonus Logic:** Tries to filter out object motion by comparing matched keypoints and transformation consistency.
|
| 36 |
+
|
| 37 |
+
---
|
| 38 |
+
|
| 39 |
+
## 🛠️ Tech Stack
|
| 40 |
+
|
| 41 |
+
- **Python 3.13**
|
| 42 |
+
- **OpenCV**
|
| 43 |
+
- **Streamlit** (for UI)
|
| 44 |
+
- **Datasets** from Hugging Face (for CameraBench demo data)
|
| 45 |
+
- **NumPy** for numerical processing
|
| 46 |
+
|
| 47 |
+
---
|
| 48 |
+
|
| 49 |
+
## 📦 Installation
|
| 50 |
+
|
| 51 |
+
To run locally:
|
| 52 |
+
|
| 53 |
+
```bash
|
| 54 |
+
git clone https://github.com/yourusername/camera-detector.git
|
| 55 |
+
cd camera-detector
|
| 56 |
+
pip install -r requirements.txt
|
| 57 |
+
streamlit run app.py
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
## 📁 File Structure
|
| 61 |
+
|
| 62 |
+
```bash
|
| 63 |
+
camera-detector/
|
| 64 |
+
├── app.py # Streamlit web UI
|
| 65 |
+
├── movement_detector.py # Core detection logic
|
| 66 |
+
├── requirements.txt # Dependencies
|
| 67 |
+
├── README.md # This file
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
### 📸 Example Output
|
| 71 |
+
|
| 72 |
+
Input: Sample video with camera shake
|
| 73 |
+
|
| 74 |
+
#### Output:
|
| 75 |
+
|
| 76 |
+
```bash
|
| 77 |
+
|
| 78 |
+
📌 Detected camera movement at frame indices: [6, 7, 8, 10, 15, 22, 23, 24]
|
| 79 |
+
```
|
| 80 |
+
## 💬 Acknowledgements
|
| 81 |
+
|
| 82 |
+
* Hugging Face CameraBench Dataset
|
| 83 |
+
* ATP Core Talent Team for organizing the challenge
|
| 84 |
+
|
| 85 |
+
## 🧑💻 Developed by
|
| 86 |
+
|
| 87 |
+
* Saadet Elizaveta Babal
|
| 88 |
+
* Candidate – ATP Core Talent AI Coder Challenge 2025
|
| 89 |
+
|
README2.md
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ATP Core Talent 2025
|
| 2 |
+
# Core Talent AI Coder Challenge: Camera Movement Detection
|
| 3 |
+
|
| 4 |
+
**Detecting Significant Camera Movement Using Image Recognition**
|
| 5 |
+
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
## Scenario
|
| 9 |
+
|
| 10 |
+
Imagine you are tasked with building a component for a smart camera system. Your goal is to detect **significant movement**—for example, if someone moves or tilts the camera or if the entire camera is knocked or shifted. This is different from simply detecting moving objects in the scene.
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## Requirements
|
| 15 |
+
|
| 16 |
+
1. **Input:**
|
| 17 |
+
|
| 18 |
+
* A sequence of images or frames (at least 10-20), simulating a fixed camera, with some frames representing significant camera movement (tilt, pan, large translation), and others showing a static scene or minor background/object motion.
|
| 19 |
+
* You may use public datasets, generate synthetic data, or simulate with your own webcam.
|
| 20 |
+
|
| 21 |
+
* Example: [CameraBench Dataset on Hugging Face](https://huggingface.co/datasets/syCen/CameraBench)
|
| 22 |
+
2. **Task:**
|
| 23 |
+
|
| 24 |
+
* Build an algorithm (**Python preferred**) that analyzes consecutive frames and detects when significant camera movement occurs.
|
| 25 |
+
* Output a list of frames (by index/number) where significant movement is detected.
|
| 26 |
+
3. **Expected Features:**
|
| 27 |
+
|
| 28 |
+
* **Basic:** Frame differencing or feature matching to detect large global shifts (e.g., using OpenCV’s ORB/SIFT/SURF, optical flow, or homography).
|
| 29 |
+
* **Bonus:** Distinguish between camera movement and object movement within the scene (e.g., use keypoint matching, estimate transformation matrices, etc.).
|
| 30 |
+
4. **Deployment:**
|
| 31 |
+
|
| 32 |
+
* Wrap your solution in a small web app (**Streamlit, Gradio, or Flask**) that allows the user to upload a sequence of images (or a video), runs the detection, and displays the result.
|
| 33 |
+
* Deploy the app on a public platform (**Vercel, Streamlit Cloud, Hugging Face Spaces**, etc.)
|
| 34 |
+
5. **Deliverables:**
|
| 35 |
+
|
| 36 |
+
* Public app URL
|
| 37 |
+
* GitHub repo (with code and requirements.txt)
|
| 38 |
+
* README (explaining your approach, dataset, and how to use the app)
|
| 39 |
+
|
| 40 |
+
* **Sample README Outline:**
|
| 41 |
+
|
| 42 |
+
* Overview of your approach and movement detection logic
|
| 43 |
+
* Any challenges or assumptions
|
| 44 |
+
* How to run the app locally
|
| 45 |
+
* Link to the live app
|
| 46 |
+
* Example input/output screenshots
|
| 47 |
+
* AI Prompts or Chat History (if used for support)
|
| 48 |
+
|
| 49 |
+
---
|
| 50 |
+
|
| 51 |
+
## Evaluation Rubric
|
| 52 |
+
|
| 53 |
+
| Criteria | Points | Details |
|
| 54 |
+
| ------------------ | ------ | ------------------------------------------------------------------------------------------ |
|
| 55 |
+
| **Correctness** | 5 | Accurately detects significant camera movement; low false positives/negatives. |
|
| 56 |
+
| **Implementation** | 5 | Clean code, good use of OpenCV or relevant libraries, modular structure. |
|
| 57 |
+
| **Deployment** | 5 | App is online, easy to use, and functions as described. |
|
| 58 |
+
| **Innovation** | 3 | Advanced techniques (feature matching, transformation estimation, clear object vs camera). |
|
| 59 |
+
| **Documentation** | 2 | Clear README, instructions, and concise explanation of method/logic. |
|
| 60 |
+
|
| 61 |
+
---
|
| 62 |
+
|
| 63 |
+
## Suggested Stack
|
| 64 |
+
|
| 65 |
+
* **Python** or **C#**
|
| 66 |
+
* **OpenCV** for computer vision
|
| 67 |
+
* **Streamlit**, **Gradio**, or a **shadcn-powered Vercel site** for quick web UI
|
| 68 |
+
* **GitHub** for code repo, **Streamlit Cloud**, **Hugging Face Spaces**, or **Vercel** for deployment
|
| 69 |
+
|
| 70 |
+
---
|
| 71 |
+
|
| 72 |
+
# 📋 Candidate Instructions
|
| 73 |
+
|
| 74 |
+
1. **Fork this repository** (or start your own repository with the same structure).
|
| 75 |
+
2. **Implement your movement detection algorithm** in `movement_detector.py`.
|
| 76 |
+
3. **Develop a simple web app** (`app.py`) that allows users to upload images/sequences and view detection results.
|
| 77 |
+
4. **Deploy your app** on a public platform (e.g., Streamlit Cloud, Hugging Face Spaces, Vercel, Heroku) and **share both your deployed app URL and GitHub repository link**.
|
| 78 |
+
5. **Document your work**: Include a `README.md` that explains your approach, how to run your code, and sample results (with screenshots or example outputs).
|
| 79 |
+
|
| 80 |
+
---
|
| 81 |
+
|
| 82 |
+
**Deadline:**
|
| 83 |
+
🕓 **27.06.2025**
|
| 84 |
+
|
| 85 |
+
---
|
| 86 |
+
|
| 87 |
+
**Plagiarism Policy:**
|
| 88 |
+
|
| 89 |
+
* This must be **individual, AI-powered work**.
|
| 90 |
+
* You may use open-source libraries, but you **must cite** all external resources and code snippets.
|
| 91 |
+
* Do not submit work copied from others or from the internet without proper acknowledgment.
|
| 92 |
+
|
| 93 |
+
---
|
| 94 |
+
|
| 95 |
+
**Good luck! Show us your best hands-on AI skills!**
|
app.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
# Streamlit config dosyasını kullanıcının home dizininde oluştur
|
| 4 |
+
config_dir = os.path.join(os.path.expanduser("~"), ".streamlit")
|
| 5 |
+
os.makedirs(config_dir, exist_ok=True)
|
| 6 |
+
|
| 7 |
+
config_path = os.path.join(config_dir, "config.toml")
|
| 8 |
+
with open(config_path, "w") as f:
|
| 9 |
+
f.write("""
|
| 10 |
+
[server]
|
| 11 |
+
headless = true
|
| 12 |
+
port = $PORT
|
| 13 |
+
enableCORS = false
|
| 14 |
+
""")
|
| 15 |
+
|
| 16 |
+
import streamlit as st
|
| 17 |
+
import tempfile
|
| 18 |
+
from movement_detector import extract_frames, detect_camera_movement
|
| 19 |
+
|
| 20 |
+
st.set_page_config(page_title="Camera Movement Detector", layout="centered")
|
| 21 |
+
|
| 22 |
+
# CSS + HTML (Profesyonel Tasarım)
|
| 23 |
+
st.markdown("""
|
| 24 |
+
<style>
|
| 25 |
+
body {
|
| 26 |
+
font-family: 'Segoe UI', sans-serif;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
.stApp {
|
| 30 |
+
background-color: #f4f4f4;
|
| 31 |
+
padding-top: 30px;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
.logo-container {
|
| 35 |
+
display: flex;
|
| 36 |
+
justify-content: center;
|
| 37 |
+
align-items: center;
|
| 38 |
+
margin-bottom: 15px;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
.logo-container img {
|
| 42 |
+
width: 60px;
|
| 43 |
+
height: auto;
|
| 44 |
+
opacity: 0.9;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
.card {
|
| 48 |
+
background-color: white;
|
| 49 |
+
border-radius: 12px;
|
| 50 |
+
padding: 30px;
|
| 51 |
+
max-width: 700px;
|
| 52 |
+
margin: auto;
|
| 53 |
+
box-shadow: 0 8px 20px rgba(0,0,0,0.08);
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
.title {
|
| 57 |
+
text-align: center;
|
| 58 |
+
font-size: 26px;
|
| 59 |
+
color: #333;
|
| 60 |
+
margin-bottom: 10px;
|
| 61 |
+
font-weight: 600;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
.description {
|
| 65 |
+
text-align: center;
|
| 66 |
+
font-size: 16px;
|
| 67 |
+
color: #666;
|
| 68 |
+
margin-bottom: 25px;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
.stButton>button {
|
| 72 |
+
background-color: #ff4b4b;
|
| 73 |
+
color: white;
|
| 74 |
+
border-radius: 6px;
|
| 75 |
+
padding: 8px 20px;
|
| 76 |
+
font-weight: bold;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
.stFileUploader label {
|
| 80 |
+
font-weight: 500;
|
| 81 |
+
color: #444;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
/* 🔧 Info ve spinner kutularının metin rengini siyah yap */
|
| 85 |
+
.stAlert > div {
|
| 86 |
+
color: black !important;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
.stSpinner > div {
|
| 90 |
+
color: black !important;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
</style>
|
| 94 |
+
""", unsafe_allow_html=True)
|
| 95 |
+
|
| 96 |
+
# 🔹 Logo
|
| 97 |
+
st.markdown("""
|
| 98 |
+
<div class="logo-container">
|
| 99 |
+
<img src="https://i.hizliresim.com/khnekdt.png" alt="ATP Logo">
|
| 100 |
+
</div>
|
| 101 |
+
""", unsafe_allow_html=True)
|
| 102 |
+
|
| 103 |
+
# 🔹 İçerik Kartı
|
| 104 |
+
st.markdown('<div class="card">', unsafe_allow_html=True)
|
| 105 |
+
|
| 106 |
+
st.markdown('<div class="title">📷 Camera Movement Detector</div>', unsafe_allow_html=True)
|
| 107 |
+
st.markdown('<div class="description">Upload a video to detect <b>significant camera movement</b> (not object motion).</div>', unsafe_allow_html=True)
|
| 108 |
+
|
| 109 |
+
# 🔸 Dosya yükleyici
|
| 110 |
+
uploaded_video = st.file_uploader("🎞️ Upload your video", type=["mp4", "avi", "mov"])
|
| 111 |
+
|
| 112 |
+
# 🔸 Analiz işlemi
|
| 113 |
+
if uploaded_video:
|
| 114 |
+
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
| 115 |
+
temp_file.write(uploaded_video.read())
|
| 116 |
+
temp_video_path = temp_file.name
|
| 117 |
+
|
| 118 |
+
st.info("Extracting frames...")
|
| 119 |
+
frames = extract_frames(temp_video_path)
|
| 120 |
+
|
| 121 |
+
st.info("Analyzing for camera movement...")
|
| 122 |
+
with st.spinner("Running detection..."):
|
| 123 |
+
indices = detect_camera_movement(frames)
|
| 124 |
+
|
| 125 |
+
st.success("✅ Detection complete.")
|
| 126 |
+
st.markdown(f"<div class='description'>📌 Movement detected at frames:<br><code>{indices}</code></div>", unsafe_allow_html=True)
|
| 127 |
+
|
| 128 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
download_video.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
# Dataset path (elde ettiğin path)
|
| 4 |
+
video_path = "videos/0TqQje61Hoo.3.1.mp4"
|
| 5 |
+
|
| 6 |
+
# Hugging Face URL
|
| 7 |
+
base_url = "https://huggingface.co/datasets/syCen/CameraBench/resolve/main/"
|
| 8 |
+
video_url = base_url + video_path
|
| 9 |
+
|
| 10 |
+
# Kaydedilecek dosya adı
|
| 11 |
+
output_filename = video_path.split("/")[-1]
|
| 12 |
+
|
| 13 |
+
# İndirme işlemi
|
| 14 |
+
response = requests.get(video_url)
|
| 15 |
+
with open(output_filename, "wb") as f:
|
| 16 |
+
f.write(response.content)
|
| 17 |
+
|
| 18 |
+
print(f"✅ Video indirildi: {output_filename}")
|
get_videos.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
|
| 3 |
+
# Dataset'i yükle
|
| 4 |
+
ds = load_dataset("syCen/CameraBench")
|
| 5 |
+
test_data = ds["test"]
|
| 6 |
+
|
| 7 |
+
# İlk 3 video bilgisini yazdır
|
| 8 |
+
for i in range(3):
|
| 9 |
+
print(f"Caption: {test_data[i]['caption']}")
|
| 10 |
+
print(f"Label: {test_data[i]['labels']}")
|
| 11 |
+
print(f"Path: {test_data[i]['path']}")
|
| 12 |
+
print()
|
logo.png
ADDED
|
movement_detector.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
def extract_frames(video_path, resize=(640, 480)):
|
| 5 |
+
cap = cv2.VideoCapture(video_path)
|
| 6 |
+
frames = []
|
| 7 |
+
while cap.isOpened():
|
| 8 |
+
ret, frame = cap.read()
|
| 9 |
+
if not ret:
|
| 10 |
+
break
|
| 11 |
+
if resize:
|
| 12 |
+
frame = cv2.resize(frame, resize)
|
| 13 |
+
frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
|
| 14 |
+
cap.release()
|
| 15 |
+
return frames
|
| 16 |
+
|
| 17 |
+
def detect_camera_movement(frames, threshold=15):
|
| 18 |
+
orb = cv2.ORB_create()
|
| 19 |
+
movement_indices = []
|
| 20 |
+
|
| 21 |
+
for i in range(len(frames) - 1):
|
| 22 |
+
kp1, des1 = orb.detectAndCompute(frames[i], None)
|
| 23 |
+
kp2, des2 = orb.detectAndCompute(frames[i+1], None)
|
| 24 |
+
|
| 25 |
+
if des1 is None or des2 is None:
|
| 26 |
+
continue
|
| 27 |
+
|
| 28 |
+
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
|
| 29 |
+
matches = bf.match(des1, des2)
|
| 30 |
+
|
| 31 |
+
if len(matches) < 10:
|
| 32 |
+
continue
|
| 33 |
+
|
| 34 |
+
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
|
| 35 |
+
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
|
| 36 |
+
|
| 37 |
+
# Homografi matrisi hesaplanıyor
|
| 38 |
+
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
|
| 39 |
+
|
| 40 |
+
if H is not None:
|
| 41 |
+
# Homografi’den dönüşüm şiddetini tahmin ediyoruz
|
| 42 |
+
dx = H[0, 2]
|
| 43 |
+
dy = H[1, 2]
|
| 44 |
+
movement_magnitude = np.sqrt(dx**2 + dy**2)
|
| 45 |
+
|
| 46 |
+
if movement_magnitude > threshold:
|
| 47 |
+
movement_indices.append(i+1)
|
| 48 |
+
|
| 49 |
+
return movement_indices
|
requirements.txt
CHANGED
|
@@ -1,3 +1,5 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit>=1.35.0
|
| 2 |
+
opencv-python>=4.9.0
|
| 3 |
+
numpy>=1.24.0
|
| 4 |
+
Pillow>=10.0.0
|
| 5 |
+
datasets>=3.6.0
|
test_dataset.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
|
| 3 |
+
# Hugging Face'den CameraBench veri setini indir
|
| 4 |
+
ds = load_dataset("syCen/CameraBench")
|
| 5 |
+
print(ds)
|