| | import os |
| | import random |
| | import uuid |
| | import time |
| | import json |
| | import base64 |
| | import logging |
| | import atexit |
| | import numpy as np |
| | import requests |
| | from flask import Flask, render_template, request, redirect |
| | from threading import Thread |
| | from pymongo import MongoClient |
| | from google.oauth2.credentials import Credentials |
| | from google.auth.transport.requests import Request |
| | from googleapiclient.discovery import build |
| | from googleapiclient.http import MediaFileUpload |
| | from instagrapi import Client |
| | from moviepy.editor import VideoFileClip, ColorClip, CompositeVideoClip, ImageClip |
| | from moviepy.video.fx import resize |
| | from moviepy.editor import VideoFileClip, CompositeVideoClip, ColorClip, ImageClip, TextClip |
| | from PIL import Image, ImageDraw, ImageFont |
| | from datetime import datetime, timedelta, timezone |
| | from moviepy.editor import * |
| | import builtins |
| | import logging |
| | import re |
| |
|
| | import builtins |
| | import re |
| | import subprocess |
| |
|
| |
|
| | |
| | logging.basicConfig( |
| | level=logging.INFO, |
| | format="%(asctime)s - %(levelname)s - %(message)s", |
| | handlers=[ |
| | logging.FileHandler("app.log"), |
| | logging.StreamHandler() |
| | ] |
| | ) |
| | logger = logging.getLogger(__name__) |
| |
|
| | |
| | app = Flask(__name__) |
| |
|
| | |
| | client = MongoClient(os.getenv("MONGO_URI")) |
| | db1 = client.shortttt |
| | meta = db1.meta |
| | db2 = client["instagram_bot"] |
| | reel_progress = db2["reel_progress"] |
| | |
| | CAPTIONS = [ |
| | "Wait for it π", "Watch till end π", "Try not to laugh π€£", |
| | "Don't skip this π₯", "You won't expect this! π", "Keep watching π", |
| | "Stay till end! π₯", "Funniest one yet" |
| | ] |
| |
|
| | BLOCKLIST = [ |
| | "nsfw","18+", "xxx", "sexy", "adult", "porn", "onlyfans", "escort", |
| | "betting", "gambling", "iplwin", "1xbet", "winzo", "my11circle", "dream11", |
| | "rummy", "teenpatti", "fantasy", "casino", "promotion" |
| | ] |
| |
|
| | UPLOAD_TIMES = [] |
| | NEXT_RESET = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1) |
| |
|
| | |
| | def get_next_part(): |
| | last = meta.find_one(sort=[("part", -1)]) |
| | return 1 if not last else last["part"] + 1 |
| |
|
| | def generate_description(title): |
| | return f"Watch this hilarious clip: {title}" |
| |
|
| | |
| | def get_next_fetch_index(username): |
| | entry = reel_progress.find_one({"username": username}) |
| | return entry["last_index"] if entry and "last_index" in entry else 0 |
| |
|
| | def update_fetch_index(username, new_index): |
| | reel_progress.update_one( |
| | {"username": username}, |
| | {"$set": {"last_index": new_index}}, |
| | upsert=True |
| | ) |
| |
|
| | def is_reel_fetched_or_skipped(username, reel_id): |
| | entry = reel_progress.find_one({"username": username}) |
| | if not entry: |
| | return False |
| | return reel_id in entry.get("fetched_ids", []) or reel_id in entry.get("skipped_ids", []) |
| |
|
| | def mark_reel_fetched(username, reel_id): |
| | reel_progress.update_one( |
| | {"username": username}, |
| | {"$addToSet": {"fetched_ids": reel_id}}, |
| | upsert=True |
| | ) |
| |
|
| | def mark_reel_skipped(username, reel_id): |
| | reel_progress.update_one( |
| | {"username": username}, |
| | {"$addToSet": {"skipped_ids": reel_id}}, |
| | upsert=True |
| | ) |
| |
|
| | import os |
| | import json |
| | import base64 |
| | import random |
| | import time |
| | import uuid |
| | import requests |
| | import logging |
| | import pathlib |
| | import re |
| | from typing import Tuple, Optional |
| |
|
| | from instagrapi import Client |
| | import undetected_chromedriver as uc |
| | from selenium.webdriver.common.by import By |
| | from selenium.webdriver.common.keys import Keys |
| | from selenium.webdriver.support.ui import WebDriverWait |
| | from selenium.webdriver.support import expected_conditions as EC |
| |
|
| | logger = logging.getLogger("reel_fetcher") |
| | logger.setLevel(logging.INFO) |
| |
|
| | |
| | MAX_PER_USER = 1 |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | import os |
| | import json |
| | import base64 |
| | import time |
| | import random |
| | import re |
| | from typing import Optional |
| | from instagrapi import Client |
| | import undetected_chromedriver as uc |
| | from selenium.webdriver.common.by import By |
| | from selenium.webdriver.common.keys import Keys |
| | from selenium.webdriver.support.ui import WebDriverWait |
| | from selenium.webdriver.support import expected_conditions as EC |
| |
|
| |
|
| | import os, json, base64, random, time, uuid, pathlib, re, logging |
| | from typing import Optional, Tuple |
| |
|
| | import requests |
| | from instagrapi import Client |
| | import undetected_chromedriver as uc |
| | from selenium.webdriver.common.by import By |
| | from selenium.webdriver.common.keys import Keys |
| | from selenium.webdriver.support.ui import WebDriverWait |
| | from selenium.webdriver.support import expected_conditions as EC |
| |
|
| | |
| | |
| |
|
| | BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN", "8035634204:AAGK5zgaAw63d93PRCaDP0dw2VNu_an_0Q4") |
| | CHAT_ID = os.getenv("TELEGRAM_CHAT_ID", "5873900195") |
| |
|
| | MIN_DUR = 40 |
| | MAX_DUR = 120 |
| | SCROLLS = 10 |
| | _reel_pat = re.compile(r"^/reel/[^/]+/$") |
| |
|
| | |
| | |
| | |
| |
|
| | def send_telegram(method: str, files=None, **params): |
| | url = f"https://api.telegram.org/bot{BOT_TOKEN}/{method}" |
| | try: |
| | resp = requests.post(url, data=params, files=files, timeout=15) |
| | resp.raise_for_status() |
| | except Exception as e: |
| | logger.error(f"β Telegram {method} failed: {e}") |
| |
|
| |
|
| | def send_telegram_text(text: str): |
| | send_telegram("sendMessage", chat_id=CHAT_ID, text=text) |
| |
|
| |
|
| | def send_telegram_photo(photo_path: str, caption: str = ""): |
| | with open(photo_path, "rb") as f: |
| | send_telegram("sendPhoto", files={"photo": f}, chat_id=CHAT_ID, caption=caption) |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def get_ig_cookies() -> dict: |
| | user = os.getenv("INSTAGRAM_USERNAME") |
| | pwd = os.getenv("INSTAGRAM_PASSWORD") |
| | sess_b64 = os.getenv("INSTAGRAM_SESSION_B64", "") |
| |
|
| | cl = Client() |
| | if sess_b64: |
| | try: |
| | settings = json.loads(base64.b64decode(sess_b64).decode()) |
| | cl.set_settings(settings) |
| | except Exception as e: |
| | logger.warning(f"β οΈ Failed to decode session: {e}") |
| |
|
| | cl.login(user, pwd) |
| | settings = cl.get_settings() |
| | cookies = settings.get("cookies") or settings.get("cookie") or {} |
| | return cookies |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def rnd(a: float, b: float) -> float: |
| | return random.uniform(a, b) |
| |
|
| |
|
| | def make_driver(cookies: dict) -> uc.Chrome: |
| | opts = uc.ChromeOptions() |
| | opts.add_argument("--headless=new") |
| | opts.add_argument("--no-sandbox") |
| | opts.add_argument("--disable-dev-shm-usage") |
| | opts.add_argument("--window-size=1280,900") |
| | opts.add_argument("--disable-blink-features=AutomationControlled") |
| |
|
| | driver = uc.Chrome(options=opts) |
| |
|
| | |
| | driver.get("https://www.instagram.com/") |
| | time.sleep(rnd(1, 2)) |
| | for k, v in cookies.items(): |
| | try: |
| | driver.add_cookie({"name": k, "value": v, "domain": ".instagram.com"}) |
| | except Exception as e: |
| | logger.info(f"β οΈ cookie {k}: {e}") |
| |
|
| | driver.refresh() |
| | time.sleep(rnd(2, 3)) |
| | return driver |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def capture_screenshot(driver, label="error"): |
| | ts = int(time.time()) |
| | fname = f"debug_{label}_{ts}.png" |
| | try: |
| | driver.save_screenshot(fname) |
| | send_telegram_photo(fname, caption=f"πΈ Screenshot ({label})") |
| | except Exception as e: |
| | logger.error(f"β Could not capture/send screenshot: {e}") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def reel_links(driver, username: str) -> list[str]: |
| | url = f"https://www.instagram.com/{username}/reels/" |
| | logger.info(f"π Opening grid: {url}") |
| | try: |
| | driver.get(url) |
| | WebDriverWait(driver, 12).until(EC.presence_of_element_located((By.TAG_NAME, "article"))) |
| | except Exception as e: |
| | logger.error(f"β Could not load reel grid: {e}") |
| | capture_screenshot(driver, f"grid_{username}") |
| | return [] |
| |
|
| | for _ in range(SCROLLS): |
| | try: |
| | driver.find_element(By.TAG_NAME, "body").send_keys(Keys.END) |
| | time.sleep(rnd(1, 2)) |
| | except Exception as e: |
| | logger.warning(f"β οΈ Scroll error: {e}") |
| | capture_screenshot(driver, f"scroll_{username}") |
| | break |
| |
|
| | links = [] |
| | for a in driver.find_elements(By.TAG_NAME, "a"): |
| | href = a.get_attribute("href") or "" |
| | path = "/" + "/".join(href.split("/", 4)[3:5]) + "/" |
| | if _reel_pat.match(path): |
| | links.append("https://www.instagram.com" + path) |
| |
|
| | logger.info(f"π Collected {len(links)} reel URLs") |
| | return links |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def analyse_reel(driver, url: str) -> Optional[Tuple[str, float, str]]: |
| | logger.info(f"π Analyzing reel: {url}") |
| | try: |
| | driver.get(url) |
| | WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.TAG_NAME, "video"))) |
| | driver.execute_script("window.scrollBy(0, 120);") |
| | time.sleep(rnd(1, 2)) |
| |
|
| | video = driver.find_element(By.TAG_NAME, "video") |
| | v_url = video.get_attribute("src") |
| | dur = float(driver.execute_script("return arguments[0].duration", video) or 0) |
| | body = driver.find_element(By.TAG_NAME, "body").text.lower() |
| |
|
| | return (v_url, dur, body) if v_url else None |
| |
|
| | except Exception as e: |
| | logger.warning(f"β analyse_reel failed: {e}") |
| | capture_screenshot(driver, "analyse") |
| | return None |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def fetch_valid_reel() -> Tuple[Optional[str], Optional[str]]: |
| | reacted = [u.strip() for u in os.getenv("REACTED_USERNAMES", "").split(",") if u.strip()] |
| | raw = [u.strip() for u in os.getenv("RAW_USERNAMES", "").split(",") if u.strip()] |
| | pools = [(reacted, "reacted"), (raw, "raw")] |
| | pools = [(p, t) for p, t in pools if p] |
| |
|
| | if not pools: |
| | logger.warning("β οΈ No usernames configured") |
| | return None, None |
| |
|
| | cookies = get_ig_cookies() |
| | driver = make_driver(cookies) |
| |
|
| | try: |
| | random.shuffle(pools) |
| | for usernames, pool_type in pools: |
| | random.shuffle(usernames) |
| | for user in usernames: |
| | fetch_idx = get_next_fetch_index(user) |
| | links = reel_links(driver, user) |
| |
|
| | for idx, link in enumerate(links[fetch_idx:], start=fetch_idx): |
| | if is_reel_fetched_or_skipped(user, link): |
| | continue |
| |
|
| | time.sleep(rnd(3, 6)) |
| | analysed = analyse_reel(driver, link) |
| | if not analysed: |
| | continue |
| |
|
| | v_url, dur, txt = analysed |
| |
|
| | if not (MIN_DUR <= dur <= MAX_DUR): |
| | mark_reel_skipped(user, link) |
| | continue |
| |
|
| | if any(w in txt for w in BLOCKLIST): |
| | mark_reel_skipped(user, link) |
| | continue |
| |
|
| | |
| | fname = f"reels/user_{uuid.uuid4().hex[:6]}.mp4" |
| | pathlib.Path("reels").mkdir(exist_ok=True) |
| | try: |
| | with open(fname, "wb") as f: |
| | f.write(requests.get(v_url, timeout=15).content) |
| | except Exception as e: |
| | logger.error(f"β Download failed: {e}") |
| | capture_screenshot(driver, "download") |
| | mark_reel_skipped(user, link) |
| | continue |
| |
|
| | logger.info(f"[β
] Downloaded {fname} ({dur:.0f}s)") |
| | send_telegram_text(f"β
Downloaded {fname} from @{user}") |
| | mark_reel_fetched(user, link) |
| | update_fetch_index(user, idx + 1) |
| | return fname, pool_type |
| |
|
| | |
| | update_fetch_index(user, fetch_idx + len(links)) |
| | logger.info(f"π« No valid reel for @{user}") |
| | time.sleep(rnd(25, 40)) |
| |
|
| | logger.warning("β οΈ Exhausted all usernames, nothing fetched") |
| | finally: |
| | try: |
| | driver.quit() |
| | except Exception: |
| | pass |
| |
|
| | return None, None |
| |
|
| | def patch_moviepy(): |
| | original_resizer = resize.resize |
| |
|
| | def patched_resizer(clip, *args, **kwargs): |
| | newsize = kwargs.get("newsize", None) |
| | if newsize: |
| | newsize = tuple(map(int, newsize)) |
| | clip = clip.fl_image(lambda img: img.resize(newsize, Image.Resampling.LANCZOS)) |
| | else: |
| | clip = original_resizer(clip, *args, **kwargs) |
| | return clip |
| |
|
| | resize.resize = patched_resizer |
| |
|
| | patch_moviepy() |
| |
|
| | import emoji |
| | from PIL import Image, ImageDraw, ImageFont |
| | import emoji |
| | import os |
| | import requests |
| |
|
| |
|
| | def create_text_image(text, width, height): |
| | img = Image.new("RGB", (width, height), color=(255, 255, 255)) |
| | draw = ImageDraw.Draw(img) |
| |
|
| | |
| | try: |
| | font = ImageFont.truetype("DejaVuSans-Bold.ttf", size=60) |
| | except: |
| | font = ImageFont.load_default() |
| |
|
| | |
| | emojis = emoji.emoji_list(text) |
| | pure_text = emoji.replace_emoji(text, replace='') |
| |
|
| | |
| | max_font_size = 70 |
| | while True: |
| | font = ImageFont.truetype("DejaVuSans-Bold.ttf", size=max_font_size) |
| | text_width, text_height = draw.textsize(pure_text, font=font) |
| | total_width = text_width + (len(emojis) * 60) + 20 |
| | if total_width <= width - 40 or max_font_size <= 30: |
| | break |
| | max_font_size -= 2 |
| |
|
| | |
| | start_x = (width - total_width) // 2 |
| | y = (height - text_height) // 2 |
| |
|
| | |
| | draw.text((start_x, y), pure_text, font=font, fill=(0, 0, 0)) |
| |
|
| | |
| | x = start_x + text_width + 10 |
| | for e in emojis: |
| | hexcode = '-'.join(f"{ord(c):x}" for c in e['emoji']) |
| | emoji_path = f"emoji_pngs/{hexcode}.png" |
| | if not os.path.exists(emoji_path): |
| | download_emoji_png(e['emoji']) |
| |
|
| | if os.path.exists(emoji_path): |
| | emoji_img = Image.open(emoji_path).convert("RGBA") |
| | emoji_img = emoji_img.resize((60, 60)) |
| | img.paste(emoji_img, (x, y), emoji_img) |
| | x += 60 + 4 |
| |
|
| | return img |
| |
|
| | from PIL import Image, ImageDraw, ImageFont |
| | import numpy as np |
| | from moviepy.editor import ImageClip |
| |
|
| | def generate_watermark_img(text, width, height=50): |
| | img = Image.new("RGBA", (width, height), (0, 0, 0, 0)) |
| | draw = ImageDraw.Draw(img) |
| |
|
| | try: |
| | font = ImageFont.truetype("DejaVuSans-Bold.ttf", size=35) |
| | except: |
| | font = ImageFont.load_default() |
| |
|
| | text_width, text_height = draw.textsize(text, font=font) |
| | draw.text((5, height - text_height - 2), text, fill="white", font=font, stroke_width=1, stroke_fill="black") |
| |
|
| | return img |
| | |
| | def download_emoji_png(emoji_char): |
| | hexcode = '-'.join(f"{ord(c):x}" for c in emoji_char) |
| | url = f"https://github.com/twitter/twemoji/raw/master/assets/72x72/{hexcode}.png" |
| | os.makedirs("emoji_pngs", exist_ok=True) |
| | path = f"emoji_pngs/{hexcode}.png" |
| | try: |
| | r = requests.get(url) |
| | if r.status_code == 200: |
| | with open(path, "wb") as f: |
| | f.write(r.content) |
| | print(f"β
Downloaded emoji: {emoji_char} β {path}") |
| | else: |
| | print(f"β Failed to download emoji: {emoji_char}") |
| | except Exception as e: |
| | print(f"β οΈ Error downloading emoji {emoji_char}: {e}") |
| |
|
| |
|
| | def edit_video(video_path): |
| | clip = VideoFileClip(video_path) |
| |
|
| | video_width = clip.w |
| | video_height = clip.h |
| | bar_height = 120 |
| | total_height = video_height + bar_height |
| |
|
| | |
| | final_bg = ColorClip(size=(video_width, total_height), color=(255, 255, 255), duration=clip.duration) |
| |
|
| | |
| | caption = random.choice(CAPTIONS) |
| | caption_img = create_text_image(caption, video_width, bar_height) |
| | caption_clip = ImageClip(np.array(caption_img)).set_duration(clip.duration).set_position((0, 0)) |
| |
|
| | |
| | eye_protection = ColorClip(size=(video_width, video_height), color=(255, 255, 255), duration=clip.duration) |
| | eye_protection = eye_protection.set_opacity(0.1).set_position((0, bar_height)) |
| |
|
| | |
| | watermark_img = generate_watermark_img("@fulltosscomedy4u", video_width, height=50) |
| | watermark_clip = ImageClip(np.array(watermark_img)).set_duration(clip.duration).set_position(("left", bar_height + video_height - 50)) |
| |
|
| | |
| | video_clip = clip.set_position((0, bar_height)) |
| |
|
| | |
| | final = CompositeVideoClip( |
| | [final_bg, caption_clip, video_clip, eye_protection, watermark_clip], |
| | size=(video_width, total_height) |
| | ) |
| |
|
| | os.makedirs("edited", exist_ok=True) |
| | output_path = f"edited/{uuid.uuid4().hex}.mp4" |
| | final.write_videofile( |
| | output_path, |
| | codec="libx264", |
| | audio_codec="aac", |
| | preset="slow", |
| | bitrate="12000k", |
| | verbose=False, |
| | logger=None |
| | ) |
| | return output_path |
| |
|
| | def edit_video_raw(video_path): |
| | clip = VideoFileClip(video_path) |
| | video_width = clip.w |
| | video_height = clip.h |
| | top_bar_height = 120 |
| | mid_bar_height = 80 |
| | total_height = video_height + top_bar_height + mid_bar_height |
| |
|
| | |
| | final_bg = ColorClip(size=(video_width, total_height), color=(255, 255, 255), duration=clip.duration + 6) |
| |
|
| | |
| | caption = random.choice(CAPTIONS) |
| | caption_img = create_text_image( |
| | caption, |
| | width=video_width, |
| | height=top_bar_height, |
| | font_size=min(max(int(video_width * 0.08), 48), 80), |
| | align="center", |
| | bg_color=(255, 255, 255), |
| | text_color=(0, 0, 0) |
| | ) |
| | caption_clip = ImageClip(np.array(caption_img)).set_duration(clip.duration + 6).set_position((0, 0)) |
| |
|
| | |
| | mid_text = random.choice([ |
| | "Pura 1 din laga tab ye reel mili π€£", |
| | "Ye miss mat kr dena π", |
| | "Kha thi ye reel ab tak π€¨π€", |
| | "Wait, ye dekh kr hi janna π₯π₯" |
| | ]) |
| | mid_img = create_text_image( |
| | mid_text, |
| | width=video_width, |
| | height=mid_bar_height, |
| | font_size=min(max(int(video_width * 0.06), 40), 64), |
| | align="center", |
| | bg_color=(0, 0, 0), |
| | text_color=(255, 255, 255) |
| | ) |
| | mid_caption_clip = ImageClip(np.array(mid_img)).set_duration(clip.duration + 6).set_position((0, top_bar_height)) |
| |
|
| | |
| | eye_protection = ColorClip(size=(video_width, video_height), color=(255, 255, 255), duration=clip.duration + 6) |
| | eye_protection = eye_protection.set_opacity(0.1).set_position((0, top_bar_height + mid_bar_height)) |
| |
|
| | |
| | watermark_img = generate_watermark_img("@fulltosscomedy4u", video_width, height=50) |
| | watermark_clip = ImageClip(np.array(watermark_img)).set_duration(clip.duration + 6).set_position(("left", total_height - 50)) |
| |
|
| | |
| | laugh_index = random.choice([1, 2]) |
| | laugh_clip = VideoFileClip(f"laugh/{laugh_index}.mp4").resize(width=video_width).set_duration(2) |
| | freeze_frame = laugh_clip.to_ImageClip().set_duration(max(1, clip.duration - 4)) |
| | meme_part = concatenate_videoclips([laugh_clip, freeze_frame, laugh_clip]) |
| |
|
| | |
| | full_video = concatenate_videoclips([meme_part, clip]) |
| | full_video = full_video.set_position((0, top_bar_height + mid_bar_height)) |
| |
|
| | |
| | final = CompositeVideoClip( |
| | [final_bg, caption_clip, mid_caption_clip, full_video, eye_protection, watermark_clip], |
| | size=(video_width, total_height) |
| | ) |
| |
|
| | |
| | os.makedirs("edited", exist_ok=True) |
| | output_path = f"edited/{uuid.uuid4().hex}.mp4" |
| | final.write_videofile( |
| | output_path, |
| | codec="libx264", |
| | audio_codec="aac", |
| | preset="slow", |
| | bitrate="12000k", |
| | threads=4, |
| | verbose=False, |
| | logger=None |
| | ) |
| | return output_path |
| | |
| | def upload_to_youtube(video_path, title, desc): |
| | creds = Credentials( |
| | token=None, |
| | refresh_token=os.getenv("YT_REFRESH_TOKEN"), |
| | token_uri="https://oauth2.googleapis.com/token", |
| | client_id=os.getenv("YT_CLIENT_ID"), |
| | client_secret=os.getenv("YT_CLIENT_SECRET"), |
| | scopes=["https://www.googleapis.com/auth/youtube.upload"] |
| | ) |
| | creds.refresh(Request()) |
| | youtube = build("youtube", "v3", credentials=creds) |
| | request = youtube.videos().insert( |
| | part="snippet,status", |
| | body={ |
| | "snippet": { |
| | "title": title, |
| | "description": desc, |
| | "tags": ["funny", "memes", "comedy", "shorts"], |
| | "categoryId": "23" |
| | }, |
| | "status": { |
| | "privacyStatus": "public", |
| | "madeForKids": False |
| | } |
| | }, |
| | media_body=MediaFileUpload(video_path) |
| | ) |
| | res = request.execute() |
| | logger.info(f"Uploaded: https://youtube.com/watch?v={res['id']}") |
| | return f"https://youtube.com/watch?v={res['id']}" |
| |
|
| | first_run = True |
| | def save_to_db(part, title, desc, link): |
| | meta.insert_one({"part": part, "title": title, "description": desc, "link": link, "uploaded": time.time()}) |
| |
|
| | def auto_loop(): |
| | global UPLOAD_TIMES, NEXT_RESET, first_run |
| |
|
| | ist = timezone(timedelta(hours=5, minutes=30)) |
| |
|
| | daily_upload_count = random.randint(3, 5) |
| | uploads_done_today = 0 |
| | NEXT_RESET = datetime.now(ist).replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1) |
| |
|
| | logger.info(f"[π
] Today's upload target: {daily_upload_count} reels.") |
| |
|
| | def wait_until(hour: int, minute: int = 0): |
| | now = datetime.now(ist) |
| | target = now.replace(hour=hour, minute=minute, second=0, microsecond=0) |
| | if target < now: |
| | return |
| | logger.info(f"[π] Waiting until {target.strftime('%H:%M')} IST...") |
| | while datetime.now(ist) < target: |
| | time.sleep(10) |
| |
|
| | wait_until(8) |
| |
|
| | while True: |
| | try: |
| | now = datetime.now(ist) |
| |
|
| | if now >= NEXT_RESET: |
| | UPLOAD_TIMES.clear() |
| | NEXT_RESET = now.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1) |
| | daily_upload_count = random.randint(3, 5) |
| | uploads_done_today = 0 |
| | logger.info(f"[π] Reset for new day. New target: {daily_upload_count} uploads.") |
| |
|
| | if uploads_done_today >= daily_upload_count: |
| | logger.info("[β
] Daily upload target reached.") |
| | time.sleep(60) |
| | continue |
| |
|
| | if now.hour < 8 or now.hour >= 20: |
| | time.sleep(60) |
| | continue |
| |
|
| | if not UPLOAD_TIMES or (now - UPLOAD_TIMES[-1]).total_seconds() >= random.randint(7200, 14400): |
| | video_path, reel_type = fetch_valid_reel() |
| |
|
| | if not video_path: |
| | logger.warning("[β οΈ] No valid reel found. Retrying...") |
| | time.sleep(60) |
| | continue |
| | if reel_type == "reacted": |
| | edited = edit_video(video_path) |
| | else: |
| | edited = edit_video_raw(video_path) |
| | part = get_next_part() |
| | title = f"Try not to laugh || #{part} #funny #memes #comedy #shorts" |
| | desc = generate_description(title) |
| | link = upload_to_youtube(edited, title, desc) |
| | save_to_db(part, title, desc, link) |
| |
|
| | logger.info(f"[π€] Uploaded #{part}: {link}") |
| | UPLOAD_TIMES.append(now) |
| | uploads_done_today += 1 |
| |
|
| | os.remove(video) |
| | os.remove(edited) |
| |
|
| | if uploads_done_today < daily_upload_count: |
| | gap_seconds = random.randint(7200, 14400) |
| | next_time = datetime.now(ist) + timedelta(seconds=gap_seconds) |
| | if next_time.hour >= 20: |
| | logger.info("[π] Next upload would exceed 8PM. Skipping.") |
| | continue |
| | logger.info(f"[β³] Waiting ~{gap_seconds // 60} minutes before next upload.") |
| | time.sleep(gap_seconds) |
| | else: |
| | time.sleep(60) |
| |
|
| | except Exception as e: |
| | logger.error(f"Loop error: {e}") |
| | time.sleep(60) |
| |
|
| | @app.route('/') |
| | def home(): |
| | last = meta.find_one(sort=[("uploaded", -1)]) |
| | video_id = last["link"].split("v=")[-1] if last and "link" in last else None |
| | return render_template("index.html", time=time.ctime(), video_id=video_id) |
| |
|
| | @app.route('/run-now', methods=["POST"]) |
| | def run_now(): |
| | Thread(target=auto_loop, daemon=True).start() |
| | return redirect("/") |
| |
|
| | if __name__ == "__main__": |
| | Thread(target=auto_loop, daemon=True).start() |
| | app.run(host="0.0.0.0", port=7860) |
| |
|