repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
andybrandt/mcp-simple-pubmed | https://github.com/andybrandt/mcp-simple-pubmed/blob/de245d350c456df353363a50051ed5547dedafc0/mcp_simple_pubmed/pubmed_fetch.py | mcp_simple_pubmed/pubmed_fetch.py | """
Full text fetching functionality for PubMed articles.
This module focuses solely on retrieving full text content from PMC
using Bio.Entrez.
"""
import logging
from typing import Optional
import xml.etree.ElementTree as ET
from Bio import Entrez, Medline
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("pubmed-fetch")
class PubMedFetch:
"""Client for fetching full text from PubMed Central."""
def _clean_text(self, text: Optional[str]) -> Optional[str]:
"""Clean text content.
Args:
text: Text to clean
Returns:
Cleaned text with normalized whitespace
"""
if text is None:
return None
return ' '.join(text.split())
def _extract_text_from_pmc_xml(self, xml_content: bytes) -> str:
"""Extract readable text content from PMC XML.
Args:
xml_content: PMC article XML
Returns:
Extracted text content
"""
try:
root = ET.fromstring(xml_content)
# Dictionary for text parts
parts = {}
# Get article title
title_elem = root.find(".//article-title")
if title_elem is not None and title_elem.text:
parts['title'] = self._clean_text(title_elem.text)
# Get abstract
abstract_parts = []
for abstract in root.findall(".//abstract//p"):
if abstract.text:
abstract_parts.append(self._clean_text(abstract.text))
if abstract_parts:
parts['abstract'] = " ".join(abstract_parts)
# Get main body text
body_parts = []
for section in root.findall(".//body//sec"):
# Get section title if available
title = section.find("title")
if title is not None and title.text:
body_parts.append(f"\n\n{title.text}\n")
# Get paragraphs in section
for p in section.findall(".//p"):
if p.text:
body_parts.append(self._clean_text(p.text))
if body_parts:
parts['body'] = "\n\n".join(body_parts)
# Combine all parts
text_parts = []
if 'title' in parts:
text_parts.append(parts['title'])
if 'abstract' in parts:
text_parts.append("\nABSTRACT\n" + parts['abstract'])
if 'body' in parts:
text_parts.append("\nMAIN TEXT\n" + parts['body'])
if not text_parts:
raise ValueError("No text content found in PMC XML")
return "\n\n".join(text_parts)
except ET.ParseError as e:
logger.error(f"Error parsing PMC XML: {str(e)}")
raise ValueError(f"Could not parse PMC XML content: {str(e)}")
except Exception as e:
logger.error(f"Error extracting text from PMC XML: {str(e)}")
raise ValueError(f"Error processing PMC content: {str(e)}")
async def get_full_text(self, pmid: str) -> str:
"""Get full text of an article if available.
Args:
pmid: PubMed ID of the article
Returns:
Full text content if available, otherwise an error message
explaining why the text is not available.
Raises:
ValueError: If there are issues accessing or parsing the content
"""
try:
# First get PMC ID if available
logger.info(f"Fetching article {pmid}")
handle = Entrez.efetch(db="pubmed", id=pmid, rettype="medline", retmode="text")
record = Medline.read(handle)
handle.close()
if 'PMC' in record:
pmc_id = record['PMC']
logger.info(f"Found PMC ID {pmc_id}, fetching full text")
# Get full text from PMC
pmc_handle = Entrez.efetch(db='pmc', id=pmc_id, rettype='full', retmode='xml')
xml_content = pmc_handle.read()
pmc_handle.close()
# Parse XML and extract text
return self._extract_text_from_pmc_xml(xml_content)
elif 'DOI' in record:
return f"Full text not available in PMC. Article has DOI {record['DOI']} - full text may be available through publisher"
else:
return "Full text not available - article is not in PMC and has no DOI"
except Exception as e:
logger.exception(f"Error getting full text for article {pmid}")
return f"Error retrieving full text: {str(e)}" | python | MIT | de245d350c456df353363a50051ed5547dedafc0 | 2026-01-05T07:12:06.493732Z | false |
tmb5cg/Fifa-Autobidder | https://github.com/tmb5cg/Fifa-Autobidder/blob/a68e84e070c73e8595ea238791e49c9e025a7cd4/src/run.py | src/run.py | from cProfile import label
from posixpath import split
import tkinter as tk
from tkinter import ttk
from gui import GUI
if __name__ == "__main__":
root = tk.Tk()
root.title("TMB's FIFA Autobidder")
# Set theme
root.tk.call("source", "azure.tcl")
root.tk.call("set_theme", "dark")
app = GUI(root)
app.pack(fill="both", expand=True)
# Set a minsize for the window, and place it in the middle
root.update()
root.minsize(root.winfo_width(), root.winfo_height())
x_cordinate = int((root.winfo_screenwidth() / 2) - (root.winfo_width() / 2))
y_cordinate = int((root.winfo_screenheight() / 2) - (root.winfo_height() / 2))
root.geometry("+{}+{}".format(x_cordinate, y_cordinate-20))
root.mainloop() | python | MIT | a68e84e070c73e8595ea238791e49c9e025a7cd4 | 2026-01-05T07:11:03.703387Z | false |
tmb5cg/Fifa-Autobidder | https://github.com/tmb5cg/Fifa-Autobidder/blob/a68e84e070c73e8595ea238791e49c9e025a7cd4/src/gui.py | src/gui.py | from cProfile import label
from datetime import datetime
import importlib
from posixpath import split
import queue
import threading
import time
import tkinter as tk
from tkinter import ttk
import configparser
from turtle import color
from helpers import checkStartupFiles, create_driver, getFilters, log_event, setup_adblock, login, clearGUIstats, checkStartupFiles
import autobidder
from autobidder import *
from autobidder import Autobidder
NORM_FONT = ("Helvetica", 10)
class GUI(ttk.Frame):
def __init__(self, parent):
ttk.Frame.__init__(self)
# Make the app responsive
for index in [0, 1, 2]:
self.columnconfigure(index=index, weight=1)
self.rowconfigure(index=index, weight=1)
# Create message queue
self.parentQueue = queue.Queue()
# Create value lists
self.sleep_time_list = [0, 1, 2, 3, 5, 8, 10]
self.num_cycles_list = [1, 2, 3, 4, 5, 6]
self.expiration_cutoff_mins_list = [2, 3, 4, 5, 6, 7, 10]
self.margin_list = [100, 150, 200, 250, 300, 350, 400, 450]
self.undercut_market_on_list_list = [0, 1]
self.undercut_market_on_relist_list = [1, 0, 2]
self.futbin_max_price_list = [800, 1000, 1200]
self.platform_list = ["Xbox", "Playstation", "PC"]
# Read initial config state on start, load into variables
self.config = configparser.ConfigParser(allow_no_value=True)
self.config.read("./data/settings.ini")
# Create variables for email password and futbin URL
self.emailVar = tk.StringVar(name="email")
self.emailVar.set(str(self.config.get("Logins", "email")))
self.passwordVar = tk.StringVar(name="password")
self.passwordVar.set(str(self.config.get("Logins", "password")))
self.futbinURLVar = tk.StringVar(name="futbinURL")
self.futbinURLVar.set(str(self.config.get("Other", "futbin_url")))
self.autoInputVar = tk.IntVar(name="autoinputVar")
self.autoInputVar.set(int(self.config.get("Other", "autoinput")))
# Create variables for bot statistics
stats_options = self.config.options("Statistics")
self.GUI_STATS_VARS = []
for useroption in stats_options:
var = tk.StringVar(name=str(useroption))
value = self.config.get("Statistics", useroption)
var.set(str(value))
self.GUI_STATS_VARS.append(var)
# Create variables for bot settings
stats_options = self.config.options("Settings")
self.GUI_SETTINGS_VARS = []
for useroption in stats_options:
# Create var
var = tk.StringVar(name=useroption)
value = self.config.get("Settings", useroption)
var.set(str(value))
self.GUI_SETTINGS_VARS.append(var)
# Create variables for bot filters
options = self.config.options("Other")
webapp_options = ['quality', 'rarity', 'league', 'club', 'country', 'position']
self.GUI_URL_VARS = []
for useroption in options:
if useroption in webapp_options:
value = self.config.get("Other", useroption)
labeltext = str(useroption) + ": "
labeltext = labeltext.lower()
var = tk.StringVar(name=str(useroption))
var.set(str(value))
self.GUI_URL_VARS.append(var)
# Create widgets :)
self.setup_widgets()
clearGUIstats()
checkStartupFiles()
self.update_settings()
self.initialize_driver()
def setup_widgets(self):
# - - - - - - - STATISTICS - - - - - - -
# Create a Frame
self.statistics_frame = ttk.LabelFrame(self, text="Statistics", padding=(5, 10))
self.statistics_frame.grid( row=0, column=0, padx=(20, 10), pady=(20, 10), sticky="nsew")
# Assign vars
self.NUM_STATISTICS = 0
for stat in self.GUI_STATS_VARS:
self.NUM_STATISTICS += 1
label_name = str(stat) + ": "
splitted = label_name.replace("_", " ").split(" ")
label_name = str(splitted[0].capitalize()) + " " + str(splitted[1].capitalize())
# Create left side label for the statistic using stringvars name
stat_label = ttk.Label(self.statistics_frame, text=label_name, font=NORM_FONT)
stat_label.grid(row=self.NUM_STATISTICS, column=0, padx=5, pady=3, sticky="nsew")
# Create label that tracks the stringvar, which we will pass to a list that function will update
value_label = ttk.Label(self.statistics_frame, textvariable=stat, font=NORM_FONT)
value_label.grid(row=self.NUM_STATISTICS, column=1, padx=5, pady=3, sticky="nsew")
# - - - - - - - SETTINGS - - - - - - -
# Create a Frame
self.settings_frame = ttk.LabelFrame(self, text="Settings", padding=(20, 10))
self.settings_frame.grid(row=0, column=1, padx=(20, 10), pady=(20, 10), sticky="nsew")
# Assign vars
self.NUM_SETTINGS = 0
for stat in self.GUI_SETTINGS_VARS:
self.NUM_SETTINGS += 1
label_name = str(stat) + ": "
splitted = label_name.replace("_", " ").split(" ")
final_label = ''
for word in splitted:
if word != "market":
final_label += (str(word.capitalize()) + " ")
# Create left side label for the statistic using stringvars name
stat_label = ttk.Label(self.settings_frame, text=final_label, font=NORM_FONT)
stat_label.grid(row=self.NUM_SETTINGS, column=0, padx=5, pady=3, sticky="nsew")
self.DROPDOWN = []
if str(stat) == "sleep_time": self.DROPDOWN = self.sleep_time_list
if str(stat) == "num_cycles": self.DROPDOWN = self.num_cycles_list
if str(stat) == "expiration_cutoff_mins": self.DROPDOWN = self.expiration_cutoff_mins_list
if str(stat) == "margin": self.DROPDOWN = self.margin_list
if str(stat) == "undercut_market_on_list": self.DROPDOWN = self.undercut_market_on_list_list
if str(stat) == "undercut_market_on_relist": self.DROPDOWN = self.undercut_market_on_relist_list
if str(stat) == "futbin_max_price": self.DROPDOWN = self.futbin_max_price_list
if str(stat) == "platform": self.DROPDOWN = self.platform_list
# Create right side option menu, assign stringvar
optionmenu = ttk.OptionMenu(self.settings_frame, stat, str(self.config["Settings"][str(stat)]), *self.DROPDOWN)
optionmenu.grid(row=self.NUM_SETTINGS, column=1, padx=5, pady=3, sticky="nsew")
# - - - - - - - FUTBIN URL - - - - - - -
# Create a Frame
self.filters_frame_top = ttk.LabelFrame(self, text="Filters Holder", padding=(20, 10))
self.filters_frame_top.grid( row=0, column=2, padx=(20, 10), pady=(20, 10), sticky="nsew")
self.filters_frame = ttk.LabelFrame(self.filters_frame_top, text="Current Filters:", padding=(20, 10))
self.filters_frame.grid( row=0, column=0, padx=(10, 5), pady=(6, 1), sticky="nsew")
# Entry
self.entry = ttk.Entry(self.filters_frame_top, textvariable=self.futbinURLVar)
self.entry.grid(row=1, column=0, padx=5, pady=(10, 10), sticky="ew")
# Auto enter filters input
self.autoinput = ttk.Checkbutton(self.filters_frame_top, text="Auto enter filters (beta)", style="Switch.TCheckbutton", variable=self.autoInputVar, onvalue=1, offvalue=0)
self.autoinput.grid(row=2, column=0, padx=5, pady=10, sticky="nsew")
# Assign vars
self.NUM_FILTERS = 0
for filter in self.GUI_URL_VARS:
self.NUM_FILTERS += 1
label_name = str(filter) + ": "
splitted = label_name.replace("_", " ").split(" ")
label_name = str(splitted[0].capitalize()) + " " + str(splitted[1].capitalize())
# Create left side label for the filter using stringvars name
filter_label = ttk.Label(self.filters_frame, text=label_name, font=NORM_FONT)
filter_label.grid(row=self.NUM_FILTERS, column=0, padx=5, pady=3, sticky="nsew")
# Create label that tracks the stringvar, which we will pass to a list that function will update
value_label = ttk.Label(self.filters_frame, textvariable=filter, font=NORM_FONT)
value_label.grid(row=self.NUM_FILTERS, column=1, padx=5, pady=3, sticky="nsew")
# - - - - - - - START BOT BUTTONS - - - - - - -
# Create a Frame
self.buttons_frame = ttk.LabelFrame(self, text="Buttons", padding=(20, 10))
self.buttons_frame.grid(row=0, column=3, padx=(20, 10), pady=(20, 10), sticky="nsew")
# Button Login
self.loginButton = ttk.Button(self.buttons_frame, text="Login", command=self.login)
self.loginButton.grid(row=0, column=0, columnspan=2, padx=5, pady=10, sticky="nsew")
# Button Start bot
self.startButton = ttk.Button(self.buttons_frame, text="Start Bot", command=self.startBot)
self.startButton.grid(row=1, column=0, columnspan=2, padx=5, pady=10, sticky="nsew")
# Button Discord
self.button = ttk.Button(self.buttons_frame, text="Join Discord")
self.button.grid(row=2, column=0, padx=5, columnspan=2, pady=10, sticky="nsew")
# Button Help
self.button = ttk.Button(self.buttons_frame, text="Help")
self.button.grid(row=3, column=0, columnspan=2, padx=5, pady=10, sticky="nsew")
# Entry email
self.emailEntryLabel = ttk.Label(self.buttons_frame, text="Email: ", font=NORM_FONT)
self.emailEntryLabel.grid(row=4, column=0, padx=5, pady=3, sticky="nsew")
self.emailEntry = ttk.Entry(self.buttons_frame, textvariable=self.emailVar)
self.emailEntry.grid(row=4, column=1, padx=5, pady=(10, 10), sticky="ew")
# Entry password
self.passwordEntryLabel = ttk.Label(self.buttons_frame, text="Password: ", font=NORM_FONT)
self.passwordEntryLabel.grid(row=5, column=0, padx=5, pady=3, sticky="nsew")
self.passwordEntry = ttk.Entry(self.buttons_frame, textvariable=self.passwordVar, show="*")
self.passwordEntry.grid(row=5, column=1, padx=5, pady=(10, 10), sticky="ew")
# - - - - - - - BID ROUNDS TABLE - - - - - - -
# Create a Frame
self.logs_frame = ttk.LabelFrame(self, text="Logs", padding=(20, 10))
self.logs_frame.grid(row=1, column=0, columnspan=4, padx=(20, 10), pady=10, sticky="nsew")
columns = ["Time", "Elapsed", "ID", "Won", "Lost", "Bids", "Requests", "Margin", "Sold", "Relisted", "Profit", "PPF"]
self.bidrounds_table = ttk.Treeview(self.logs_frame, columns=columns, show="headings", height=3)
for col in columns:
colwidth = 70
self.bidrounds_table.column(col, width=colwidth)
self.bidrounds_table.heading(col, text=col)
# LOAD IN TABLE
txt = open("./data/bid_rounds.txt", "r", encoding="utf8")
for aline in txt:
line = aline.strip("\n").split(",")
condensed_row_to_insert = []
for x in line:
condensed_row_to_insert.append(x)
self.bidrounds_table.insert('', 'end', values=condensed_row_to_insert)
txt.close()
self.bidrounds_table.grid(row=1,column=0, pady=5)
# - - - - - - - LOGS TABLE - - - - - - -
columns = ["Live Logs"]
self.logs_table = ttk.Treeview(self.logs_frame, columns=columns, show="headings", height=3)
for col in columns:
colwidth = 840
self.logs_table.column(col, width=colwidth)
self.logs_table.heading(col, text=col)
self.logs_table.grid(row=2, column=0)
def initialize_driver(self):
log_event(self.parentQueue, " - - - - Bot started - - - - ")
self.driver = create_driver()
setup_adblock(self.driver)
# Continuously update user settings
def update_settings(self):
try:
self.checkqueue()
except:
print("Error checking queue")
try:
self.config.read("./data/settings.ini")
for option in self.GUI_STATS_VARS:
# GET current stat value in config.ini
stat_value = self.config.get("Statistics", str(option))
# SET updated value to stringVar
option.set(str(stat_value))
for option in self.GUI_SETTINGS_VARS:
# Get DISPLAYED value pulled from Dropdown memory object
choice = option.get()
# WRITE displayed value in writing to config.ini
self.config.set("Settings", str(option), str(choice))
# Update email, pwd, Futbin URL
pwd_on_gui = self.passwordVar.get()
email_on_gui = self.emailVar.get()
self.config.set("Logins", "email", str(email_on_gui))
self.config.set("Logins", "password", str(pwd_on_gui))
# Check if URL has changed
url_on_gui = str(self.futbinURLVar.get())
url_on_disk = str(self.config.get("Other", "futbin_url"))
if (url_on_gui != url_on_disk):
# Run futbin function
filters = getFilters(url_on_gui)
for f in self.GUI_URL_VARS:
f.set("")
f_name = str(f)
self.config.set("Other", f_name, "")
if f_name in filters:
self.config.set("Other", f_name, str(filters[f_name]))
f.set(str(filters[f_name]))
log_event(self.parentQueue, "Successfully updated Futbin filters")
self.config.set("Other", "futbin_url", str(url_on_gui))
self.config.set("Other", "autoinput", str(self.autoInputVar.get()))
with open("./data/settings.ini", 'w') as configfile:
self.config.write(configfile)
# every 1 second update labels
self.after(3000, self.update_settings)
except:
print("Error updating GUI, restart")
def checkqueue(self):
while self.parentQueue.qsize():
try:
msg = self.parentQueue.get(0)
if (msg[1] == True):
# Send to table
msg = msg[0]
line_split_into_string = msg.strip("\n").split(",")
self.bidrounds_table.insert('', 'end', values=line_split_into_string)
self.bidrounds_table.yview_moveto(1)
hs = open("./data/bid_rounds.txt", "a", encoding="utf8")
hs.write(msg + "\n")
hs.close()
else:
self.write_logs_tofile(msg[0])
except queue.Empty:
pass
def write_logs_tofile(self, event):
file_object = open('./data/output.txt', 'a', encoding="utf8")
currentTime = datetime.now()
dt_string = currentTime.strftime("[%I:%M:%S %p] ")
full_log_print = str(dt_string + event + "\n")
print(str(dt_string + event))
msg_for_table = [event]
self.logs_table.insert('', 'end', values=msg_for_table)
self.logs_table.yview_moveto(1)
file_object.write(full_log_print)
file_object.close()
def periodiccall(self):
if self.thread.is_alive():
self.after(100, self.periodiccall)
else:
self.loginButton.config(state="active")
self.startButton.config(state="active")
self.startButton.config(style="", text="Start bot")
# self.master.configure(bg="grey")
def login(self):
log_event(self.parentQueue, "Logging in...")
self.loginButton.config(state="disabled")
self.thread = ThreadedClient(self.parentQueue, "login", self.driver)
self.thread.start()
self.periodiccall()
def startBot(self):
log_event(self.parentQueue, "Autobidder started")
self.startButton.config(state="disabled", style="Accent.TButton", text="RUNNING")
self.thread = ThreadedClient(self.parentQueue, "autobidder", self.driver)
self.thread.start()
self.periodiccall()
class ThreadedClient(threading.Thread):
def __init__(self, queue, action, driver):
threading.Thread.__init__(self)
self.queue = queue
self.action = action
self.driver = driver
def run(self):
if (self.action == "autobidder"):
importlib.reload(autobidder)
from autobidder import Autobidder
autobidder_obj = Autobidder(self.driver, self.queue)
autobidder_obj.run()
if (self.action == "login"):
time.sleep(5)
self.config = configparser.ConfigParser()
self.config.read("./data/settings.ini")
USER = {
"email": self.config.get("Logins", "email"),
"password": self.config.get("Logins", "password"),
}
login(self.queue, self.driver, USER) | python | MIT | a68e84e070c73e8595ea238791e49c9e025a7cd4 | 2026-01-05T07:11:03.703387Z | false |
tmb5cg/Fifa-Autobidder | https://github.com/tmb5cg/Fifa-Autobidder/blob/a68e84e070c73e8595ea238791e49c9e025a7cd4/src/autobidder.py | src/autobidder.py | import csv
from os import path
from platform import platform
import random
from csv import reader
from datetime import datetime
from datetime import date
from decimal import Decimal
from time import sleep
from turtle import position
from selenium import webdriver
from selenium.common.exceptions import (
NoSuchElementException, TimeoutException, WebDriverException)
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support import ui
from selenium.webdriver.support.wait import WebDriverWait
import configparser
from helpers import getFilters, log_event
class Autobidder:
def __init__(self, driver, queue):
self.driver = driver
self.queue = queue
self.playerlist = []
self.players = []
# Get user config statistics, and GUI labels
self.config = configparser.ConfigParser()
self.config.read("./data/settings.ini")
# Load in frontend statistics on bot init
self.user_players_won = int(
self.config.get("Statistics", "players_won"))
self.user_watchlist_outbid = int(
self.config.get("Statistics", "players_lost"))
self.user_transferlist_sold = int(
self.config.get("Statistics", "players_sold"))
self.user_transferlist_relisted = int(
self.config.get("Statistics", "players_relisted"))
self.user_num_coins = int(
self.config.get("Statistics", "current_coins"))
self.user_projected_profit = int(
float(self.config.get("Statistics", "projected_profit")))
self.total_cycles = int(self.config.get("Statistics", "total_cycles"))
self.user_requests_made = int(
self.config.get("Statistics", "requests_made"))
self.user_bids_made = int(self.config.get("Statistics", "bids_made"))
self.user_transferlist_selling = int(
self.config.get("Statistics", "current_selling"))
# Assign frontend user config settings to memory on init
self.undercut_market_on_list, self.sleep_time, self.num_cycles, self.expiration_cutoff_mins, self.margin, self.undercut_market_on_relist, self.futbin_max_price, self.platform = self.getUserConfig()
# Session variables assigned on init
self.bids_made_this_round = 0
self.requests_made_this_round = 0
self.bidround_number = 0
self.players_sold_this_round = 0
self.players_expired_this_round = 0
self.players_won_this_round = 0
self.players_lost_this_round = 0
self.projected_profit_this_round = 0
self.profit_per_player_this_round = 0
self.start_time = 0
self.end_time = 0
self.LAST_UPDATED_CUTOFF = 80
self.user_blank_bids_softban_count = 0
self.popup_text = ""
self.transferlistInfiniteLoopCounter = 0
self.botRunning = True
# Ensure push_to_google is False otherwise will break, this is from my personal version with cloud logging
self.PUSH_TO_GOOGLE = False
self.USE_FUTBIN_API = False
# Other global stuff to help user intervention
self.original_window = self.driver.window_handles[0]
self.current_tab_viewing = ""
self.current_tab_num = 0
self.cookies_accepted = False
# This is the main function
def run(self):
devmode = False
if devmode:
self.fetch_player_data()
else:
self.driver.switch_to.window(self.driver.window_handles[0])
state = self.checkState()
self.update_autobidder_logs()
if state == "transfer targets":
self.listPlayers()
elif state == "transfer list":
expiredplayers, players_sold, players_currently_listed, players_unlisted = self.getTransferlistInfo()
self.listExpired()
elif state == "search the transfer market":
for x in range(int(self.num_cycles)):
newstate = self.checkState()
if newstate == "search the transfer market":
if self.botRunning:
if (int(self.config["Other"]["autoinput"]) == 1):
self.enterFilters()
self.bidround_number = x
self.getFutbinList(
str(self.config["Other"]["futbin_url"]))
if (self.botRunning):
self.sleep_approx(3)
self.clickSearch()
self.bid()
self.total_cycles += 1
self.update_autobidder_logs()
# only sleep if is last bid round
if (x+1 < (int(self.num_cycles))):
if self.botRunning:
log_event(
self.queue, "Sleeping for " + str(self.sleep_time))
self.sleep_approx(int(self.sleep_time))
self.update_autobidder_logs()
log_event(self.queue, "- - - - FINISHED ALL BID ROUNDS - - - ")
elif state == "search results":
self.bid()
else:
log_event(
self.queue, "User error: user not on the 'Search the Transfer Market' page ")
log_event(self.queue, "Read the instructions on the GitHub repo")
if self.popup_text == "Connect to a network in order to use the app.":
log_event(
self.queue, "network connection lost detected -- insert function ehre to click OK and start over")
eventData = ["00000000000000", 0, 0, "error",
"error", "NetworkConnectionLost"]
self.log_event(
self.queue, "STOPPED at master end of Test main functio - no internet", eventData)
elif self.popup_text == "Unable to authenticate with the FUT servers. You will now be logged out of the application.":
print("Captcha -- insert function ehre to click OK and start over")
eventData = ["00000000000000", 0, 0,
"error", "error", "UnableToAuthenticate"]
self.log_event(
self.queue, "STOPPED at master end of Test main function -- unable to authenticate (doesn't auto mean captcha) ", eventData)
else:
eventData = ["00000000000000", 0, 0, "error",
"error", "GeneralUserInterventionOrBotBroke"]
self.log_event(self.queue, "Bot stopped", eventData)
def bid(self):
self.wait_for_visibility(
"/html/body/main/section/section/div[2]/div/div/section[1]/div/ul/li[1]")
players_to_use = self.getTargetListIDS()
num_eligible = 0
keepgoing = True
no_manual_user_intervention = True
redPopupVisible = False
watchlistFullPopup = False
# zero out
self.bids_made_this_round = 0
self.requests_made_this_round = 0
self.players_sold_this_round = 0
self.players_expired_this_round = 0
self.players_won_this_round = 0
self.players_lost_this_round = 0
self.projected_profit_this_round = 0
self.profit_per_player_this_round = 0
self.start_time = datetime.now()
self.end_time = 0
self.user_blank_bids_softban_count = 0
self.popup_text = ""
self.hasExceededTimeCutoff = False
reversePage = False
# and no_manual_user_intervention and not redPopupVisible:
while keepgoing and self.botRunning:
# try:
wait_for_shield_invisibility(self.driver)
if (redPopupVisible):
if (self.popup_text == "Item removed from Transfer Targets"):
log_event(
self.queue, "Red popup (all good): failed bid - item removed from transfer targets - all good")
self.popup_text = ""
redPopupVisible = False
elif (self.popup_text == "Bid status changed, auction data will be updated."):
log_event(
self.queue, "Red popup (all good): bid status changed, auction data will be upddated")
self.popup_text = ""
redPopupVisible = False
self.goNextPage() # go to next page bc of weird error
elif (self.popup_text == "Item added to Transfer Targets"):
print(
"Red popup (or white popup) (all good): item added to transfer targets lol wtf ")
self.popup_text = ""
redPopupVisible = False
elif (self.popup_text == "Cannot remove this item from your Transfer Targets."):
log_event(
self.queue, "Red popup (all good): can't remove item from transfer targets popup, gonna continue")
self.popup_text = ""
redPopupVisible = False
elif (self.popup_text == "Unable to authenticate with the FUT servers. You will now be logged out of the application."):
eventData = ["00000000000000", 0, 0, "error",
"error", "UnableToAuthenticatePopup"]
self.log_event(
self.queue, "STOPPED - unable to authenticate popup (this time is it in red pop up func, but i think its actually a popup window/box", eventData)
# kill bot
keepgoing = False
self.botRunning = False
elif (self.popup_text == "Too many actions have been taken, and use of this feature has been temporarily disabled."):
keepgoing = False
self.botRunning = False
eventData = ["00000000000000", 0, 0,
"error", "error", "TooManyActions"]
log_event(self.queue, "too many actions taken - this shouldve logged to google sheets that user is softbanned. NOW assume not using bot - go to transfer list and watchlist functions, ideally have a softban fixer method ")
self.log_event(
self.queue, "STOPPED - SOFTBAN - too many actions taken red popup! add method that fixes this", eventData)
else:
log_event(
self.queue, "OTHER POPUP MESSAGE DETECTED, stopping bot")
log_event(self.queue, self.popup_text)
print("check if it is network connection issue")
keepgoing = False
self.botRunning = False
elif (watchlistFullPopup):
self.popupText = self.getText(
"/html/body/div[4]/section/div/p")
if (self.popupText == "You are already the highest bidder. Are you sure you want to bid?"):
# click cancel
log_event(
self.queue, "PopupBox You are highest bidder box appeared")
self.clickButton(
"/html/body/div[4]/section/div/div/button[1]")
self.sleep_approx(5)
elif (self.popupText == "Your Transfer Targets list is full. Please try again later, or clear items from your Watched and Active list."):
log_event(
self.queue, "PopupBox Watchlist is full popup - clicked OK")
self.clickButton(
"/html/body/div[4]/section/div/div/button")
keepgoing = False
elif (self.popupText == "Connect to a network in order to use the app."):
eventData = ["00000000000000", 0, 0, "error",
"error", "InternetConnectionLost"]
self.log_event(
self.queue, "PopupBox STOPPED - u connect to network popup", eventData)
keepgoing = False
self.botRunning = False
elif (self.popupText == "You cannot unwatch an item you are bidding on."):
log_event(
self.queue, "PopupBox Can't unwatch item bidding on ")
self.clickButton(
"/html/body/div[4]/section/div/div/button")
self.popup_text = ""
elif (self.popupText == "Your bid must be higher than the current bid"):
log_event(
self.queue, "PopupBox your bid must be higher than current bid ")
self.clickButton(
"/html/body/div[4]/section/div/div/button")
self.popup_text = ""
elif (self.popupText == "Unable to authenticate with the FUT servers. You will now be logged out of the application."):
log_event(
self.queue, "STOPPED - PopupBox BAD - unable to authenticate (detected in popup function, not redpopup) should click OK")
# self.popup_text == ""
keepgoing = False
self.botRunning = False
else:
log_event(
self.queue, "STopping bot, Weird popup message is not any of above - should get text")
log_event(self.queue, self.popupText)
keepgoing = False
self.botRunning = False
elif self.requests_made_this_round > 50:
log_event(
self.queue, "Made over 50 requests, stopping, keepgoing = False")
keepgoing = False
elif (self.user_num_coins < 1000):
log_event(self.queue, "Coins too low, keepgoing = False")
keepgoing = False
elif (no_manual_user_intervention):
sleep(3)
players = self.getAllPlayerInfo2() # Re-load player list and cycle through them
num_eligible = 0
refresh = False
for p in players:
# print(p)
if refresh == False:
id = int(p[16])
if (id in players_to_use):
if ("expired" not in p[2]) and ("highest-bid" not in p[2]) and ("selected" not in p[2]):
if p[8] > 10: # time is greater than 10 secs
# time is less than 3 mins
if p[8] < int(self.expiration_cutoff_mins * 60):
pid = p[16]
curbid = int(p[6])
position = p[1]
rating = p[3]
sell_quickily_price = self.getSellPrice(
pid)
if (sell_quickily_price > 1000):
breakevenprice = self.round_nearest(
0.95 * sell_quickily_price, 100)
else:
breakevenprice = self.round_nearest(
0.95*sell_quickily_price)
if (curbid > 1000):
bidprice = curbid + 100
else:
bidprice = curbid+50
margin = int(self.margin)
idealbid = self.round_nearest(
breakevenprice-margin)
if ((breakevenprice - margin) >= bidprice):
idealbid = self.round_nearest(
breakevenprice-margin)
if ((sell_quickily_price * 0.95) - idealbid) >= margin:
# ID = p[0]
nation = self.getText("/html/body/main/section/section/div[2]/div/div/section[1]/div/ul/li[" + str(
p[0]) + "]/div/div[1]/div[1]/div[8]/div[1]/span[2]")
league = self.getText("/html/body/main/section/section/div[2]/div/div/section[1]/div/ul/li[" + str(
p[0]) + "]/div/div[1]/div[1]/div[8]/div[2]/span[2]")
team = self.getText("/html/body/main/section/section/div[2]/div/div/section[1]/div/ul/li[" + str(
p[0]) + "]/div/div[1]/div[1]/div[8]/div[3]/span[2]")
eventData = [pid, p[4], curbid, idealbid, sell_quickily_price, (sell_quickily_price*.95), ((
sell_quickily_price*.95) - idealbid), nation, league, team, position, rating]
bidSuccesful = self.makebid_individualplayer2(
p[0], idealbid)
if (bidSuccesful == True):
self.log_event(self.queue, "BID " + str(self.bids_made_this_round + 1) + ": " + str(p[4]) + " - CurBid: " + str(curbid) + " -> Bid to make: " + str(idealbid) + " -> Sell price " + str(
sell_quickily_price) + " -> Minus EA tax: " + str(int(sell_quickily_price * .95)) + " -> Est. Profit: " + str(int((sell_quickily_price*.95) - idealbid)), eventData)
if self.user_blank_bids_softban_count > 15:
log_event(
self.queue, "im guessing this is a softban but red popup didn't show")
self.botRunning = False
num_eligible += 1
refresh = True
else:
self.hasExceededTimeCutoff = True
refresh = True
# Go to next page if no eligible players
if (num_eligible == 0) and (self.hasExceededTimeCutoff == False):
self.goNextPage()
watchlistFullPopup = self.check_exists_by_xpath(
"/html/body/div[4]/section/div/div/button")
no_manual_user_intervention = self.checkState("transfermarket")
redPopupVisible = self.check_exists_by_xpath(
"/html/body/div[5]/div")
if redPopupVisible:
self.popup_text = str(
self.getText("/html/body/div[5]/div"))
page = self.checkState("transfermarket")
if page == False:
self.botRunning = False
log_event(self.queue, "bot running set to false")
if self.hasExceededTimeCutoff:
log_event(self.queue, "Time cutoff exceeded - researching")
self.clickBack()
self.hasExceededTimeCutoff = False
self.sleep_approx(5)
self.clickSearch()
self.sleep_approx(5)
if (self.botRunning):
log_event(self.queue, "Total Bids made: " + str(self.bids_made_this_round) +
" Requests: " + str(self.requests_made_this_round))
log_event(self.queue, "Margin: " + str(self.margin))
self.sleep_approx(3)
self.listExpired()
else:
log_event(
self.queue, "self.botrunning was false, hopefully user intervention")
def listExpired(self):
# add this method to helpersv2
wait_for_shield_invisibility(self.driver)
self.go_to_transferlist() # note this is using old helper object
wait_for_shield_invisibility(self.driver)
self.sleep_approx(5)
wait_for_player_shield_invisibility(self.driver)
expiredplayers, players_sold, players_currently_listed, players_unlisted = self.getTransferlistInfo()
log_event(self.queue, "Players sold: " + str(players_sold))
log_event(self.queue, "Players expired: " + str(expiredplayers))
log_event(self.queue, "Players currently listed: " +
str(players_currently_listed))
log_event(self.queue, "Players unlisted: " + str(players_unlisted))
# FIRST Clear sold - first time
if players_sold > 0:
try:
self.clearSold()
self.user_transferlist_sold += players_sold
self.players_sold_this_round += players_sold
self.update_autobidder_logs()
players_sold = 0 # reassign players_sold
except Exception as e:
print(e)
log_event(self.queue, "clear sold error")
wait_for_shield_invisibility(self.driver)
expiredplayers, players_sold, players_currently_listed, players_unlisted = self.getTransferlistInfo()
exception_counter = 0
# SECOND start listing expired players, which can get complicated by sold
if (expiredplayers > 0):
wait_for_player_shield_invisibility(self.driver)
player_exists = self.check_exists_by_xpath(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]")
if (player_exists):
self.scrollIntoView(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]")
self.clickButton(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]")
wait_for_player_shield_invisibility(self.driver)
status = True
while status:
status = self.check_exists_by_xpath(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]")
try:
self.clickButton(
'/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div') # click player
wait_for_shield_invisibility(self.driver)
# click re-list
self.clickButton(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[1]/button")
wait_for_shield_invisibility(self.driver)
if int(self.undercut_market_on_relist) == 0:
print("undercut market is 0, not gonna subtract")
elif int(self.undercut_market_on_relist) == 1:
# click minus button
self.clickButton(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/button[1]")
elif int(self.undercut_market_on_relist) == 2:
# click minus button
self.clickButton(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/button[1]")
self.sleep_approx(0.1)
# click minus button
self.clickButton(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/button[1]")
wait_for_shield_invisibility(self.driver)
rating = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[1]/div[5]/div[2]/div[1]")
position = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[1]/div[5]/div[2]/div[2]")
name = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[2]")
pace = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[3]/ul/li[1]/span[2]")
shooting = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[3]/ul/li[2]/span[2]")
passing = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[3]/ul/li[3]/span[2]")
dribbling = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[3]/ul/li[4]/span[2]")
defending = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[3]/ul/li[5]/span[2]")
physical = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[3]/ul/li[6]/span[2]")
nation = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[1]/div[8]/div[1]/span[2]")
league = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[1]/div[8]/div[2]/span[2]")
team = self.getText(
"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div/div[1]/div[1]/div[8]/div[3]/span[2]")
player_data = [rating, pace, shooting,
passing, dribbling, defending, physical]
unique_player_id = ""
for x in player_data:
x = str(x)
unique_player_id += x
self.scrollIntoView(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/button")
relist_price = int(self.getInputBoxText(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/input"))
self.clickButton(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/button") # List player
text = "RELIST PID: " + str(unique_player_id) + " NAME: " + str(
name) + " RELISTPRICE: " + str(relist_price) + " POSITION: " + str(position)
eventData = [unique_player_id, name, relist_price,
position, rating, nation, league, team]
self.log_event(self.queue, text, eventData)
# this is actually the number of players that expired
self.user_transferlist_relisted += 1
self.players_expired_this_round += 1
self.user_projected_profit -= (0.95*50)
self.update_autobidder_logs()
# self.sleep_approx(3)
except Exception as e:
print(e)
exception_counter += 1
if exception_counter > 5:
status = False
self.sleep_approx(3)
expiredplayers, players_sold, players_currently_listed, players_unlisted = self.getTransferlistInfo()
# THIRD Clear sold - last time
if players_sold > 0:
try:
self.clearSold()
self.user_transferlist_sold += players_sold
self.players_sold_this_round += players_sold
self.update_autobidder_logs()
except:
log_event(self.queue, "clear sold error")
self.sleep_approx(3)
log_event(self.queue, "Going to watchlist")
# INSERT CHECK FOR POPUP HERE
print("todo: check for popup")
self.go_to_watchlist()
self.listPlayers()
def listPlayers(self):
self.sleep_approx(5)
projected_profit = 0
status = True
exception_counter = 0
players_won = 0
players_expired = 0
transferlist_full = False
while status and (transferlist_full == False):
try:
# every 10 seconds, check if bid is currently active and expiring etc
activeBid = self.check_exists_by_xpath(
"/html/body/main/section/section/div[2]/div/div/div/section[1]/ul/li")
wait_for_shield_invisibility(self.driver)
self.sleep_approx(10)
if activeBid == False:
self.sleep_approx(10)
print(
"active bid is gone - just finished sleeping 10 seconds, gonna clear expired")
self.clearExpired()
self.sleep_approx(2)
players_won, players_expired = self.getWatchlistInfo()
unlistedPlayers = True
counter = 0
while unlistedPlayers and (transferlist_full == False):
try:
wait_for_shield_invisibility(self.driver)
wait_for_player_shield_invisibility(self.driver)
pid = self.getPIDWatchlist(1)
playerPrice = int(self.getSellPrice(pid))
boughtprice = self.getText(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[1]/div[2]/div/span[2]")
rating = int(self.getText(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[1]/div/div[2]/div/div/div[1]/div/div[7]/div[2]/div[1]"))
position = self.getText(
"/html/body/main/section/section/div[2]/div/div/section/div/div/div[1]/div/div[2]/div/div/div[1]/div/div[7]/div[2]/div[2]")
if "," in boughtprice:
boughtprice = boughtprice.replace(",", "")
boughtprice = int(boughtprice)
if playerPrice == 0:
print(
"player id not found in list func, setting player price equal to boughprice plus margin")
playerPrice = boughtprice + int(self.margin)
if (self.undercut_market_on_list == 1):
if playerPrice > 1000:
playerPrice = playerPrice - 100
else:
playerPrice = playerPrice - 50
est_profit = ((playerPrice)*.95) - boughtprice
startBid = 0
if playerPrice > 1000:
startBid = playerPrice - 100
else:
| python | MIT | a68e84e070c73e8595ea238791e49c9e025a7cd4 | 2026-01-05T07:11:03.703387Z | true |
tmb5cg/Fifa-Autobidder | https://github.com/tmb5cg/Fifa-Autobidder/blob/a68e84e070c73e8595ea238791e49c9e025a7cd4/src/helpers.py | src/helpers.py | import configparser
from datetime import datetime
import json
import os
from os import path
import platform
import random
import re
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import (
NoSuchElementException, TimeoutException, WebDriverException)
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support import ui
from selenium.webdriver.support.wait import WebDriverWait
import undetected_chromedriver as uc
def log_event(queue, event, bidroundOver=False):
"""
Sends log to queue, which GUI handles and writes to txt file for display on GUI.
The queue objects allows us to talk to the GUI from a separate threads, which is cool.
This was a big breakthrough in functionality.
Parameters:
queue (queue): GUI's queue object
event (str): Event log to write to data/output.txt
"""
event = str(event)
combined = [event, bidroundOver]
queue.put(combined)
def getFilters(url):
webapp_options = ['quality', 'rarity',
'league', 'club', 'country', 'position']
futbin_options = ['league', 'nation', 'club', 'version', 'position']
full_data = ""
# Opening JSON file
with open('./data/futbin_decoder.json', "r", encoding="utf8") as json_file:
data = json.load(json_file)
full_data = data
txt = url
results = re.findall("[^&?]*?=[^&?]*", txt)
webapp_filters_output = {}
if results:
for i in results:
temp = i.split("=")
param = temp[0]
value = temp[1]
param = param.strip()
value = value.strip()
# Extract valid futbin paramters
if param in futbin_options:
try:
if (param == "nation"):
param = "country"
full_data[param]
try:
output = full_data[param][value]
if (param != "version"):
webapp_filters_output[param] = output[param]
else:
webapp_filters_output["quality"] = output['quality']
webapp_filters_output["rarity"] = output['rarity']
except:
continue
except:
continue
else:
print("No match")
total_filters = len(webapp_filters_output)
return webapp_filters_output
def create_driver():
system = platform.system()
# print("SYSTEM IS: " + str(system))
if system == 'Darwin':
path = 'chrome_mac/chromedriver'
elif system == 'Linux':
path = 'chrome_linux/chromedriver'
elif system == 'Windows':
path = os.getcwd() + '\chrome_windows\chromedriver.exe'
# Shoutout to the dev who created this
use_undetected_chromedriver = True
if use_undetected_chromedriver:
options = uc.ChromeOptions()
options.add_argument('--profile-directory=Profile 8')
options.add_argument('--disable-popup-blocking') # allow for new tab
# options.add_extension("adblocker/uBlock-Origin.crx")
driver = uc.Chrome(options=options)
return driver
else:
options = webdriver.ChromeOptions()
# For older ChromeDriver under version 79.0.3945.16
options.add_argument("--ignore-certificate-error")
options.add_argument("--ignore-ssl-errors")
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_experimental_option(
"excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
# Stop annoying windows logs
options.add_argument('--disable-logging')
options.add_argument("--log-level=3")
driver = webdriver.Chrome(executable_path=path, options=options)
return driver
def setup_adblock(driver):
driver.execute_script(
"alert('Install Adblocker after accepting this prompt. Without Adblocker, FUTBIN fetch will break (way too many advertisements). After 10 seconds, bot will automatically go to Webapp. ');")
alert_present = True
while alert_present:
try:
alert_present = WebDriverWait(driver, 1).until(
EC.alert_is_present(), 'Alert is gone')
except Exception as e:
# Alert is gone, now install adblock
alert_present = False
try:
driver.get(
"https://chrome.google.com/webstore/detail/ublock-origin/cjpalhdlnbpafiamejdnhcphjbkeiagm?hl=en")
WebDriverWait(driver, 10).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div[5]/div[2]/div/div/div[2]/div[2]/div/div/div/div")))
WebDriverWait(driver, 20).until(EC.element_to_be_clickable(
(By.XPATH, "/html/body/div[5]/div[2]/div/div/div[2]/div[2]/div/div/div/div"))).click()
except Exception as e:
# print("User broke futbin fetch, self.botRunning false")
print("Issue installing adblocker, please install manually")
driver.switch_to.window(driver.window_handles[0])
driver.switch_to.window(driver.window_handles[0])
sleep(14)
# installing = True
# infiniteCounter = 0
# while installing:
# try:
# elements = "/html/body/div[3]/div[2]/div/div/div[2]"
# page_content = driver.find_elements(By.XPATH, elements)
# for elem in page_content:
# text = str(elem.text)
# text = text.strip()
# # print(text)
# lowered = text.lower()
# if (text == "Remove from Chrome"):
# installing = False
# if (lowered == "remove from chrome"):
# installing = False
# if "remove" in lowered:
# installing = False
# break
# except:
# infiniteCounter += 1
# if infiniteCounter > 10:
# print("Issue installing adblocker, restart bot")
# break
driver.get("https://www.ea.com/fifa/ultimate-team/web-app/")
def login(queue, driver, user):
try:
WebDriverWait(driver, 15).until(
EC.visibility_of_element_located(
(By.XPATH, '//*[@class="ut-login-content"]//button'))
)
sleep(random.randint(2, 4))
driver.find_element(
By.XPATH, '//*[@class="ut-login-content"]//button').click()
WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.ID, 'email'))
)
sleep(1)
driver.find_element(By.ID, 'email').send_keys(user["email"])
sleep(1)
driver.find_element(By.ID, 'password').send_keys(user["password"])
sleep(1)
driver.find_element(
By.XPATH, '/html/body/div[1]/div[2]/section/div[1]/form/div[6]/a').click()
sleep(3)
WebDriverWait(driver, 15).until(
EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/form/div/section/a[2]'))
).click()
log_event(queue, "Continue login manually")
except:
log_event(queue, "Continue login manually")
def clearGUIstats():
config = configparser.ConfigParser()
config.read("./data/settings.ini")
user_settings_current_date = str(config.get("Other", "todays_date"))
today = datetime.today().strftime('%Y-%m-%d')
today_str = str(today)
if (user_settings_current_date != today_str):
# Set Date var to current date in file
config.read("./data/settings.ini")
config.set("Other", "todays_date", today_str)
with open("./data/settings.ini", 'w') as configfile:
config.write(configfile)
# Reset GUI stats to 0 since it is new day
config.read("./data/settings.ini")
val = "0"
options = config.options("Statistics")
for stat in options:
cur_stat = config.set("Statistics", stat, val)
with open("./data/settings.ini", 'w') as configfile:
config.write(configfile)
def checkStartupFiles():
gui_logs_exists = os.path.exists("./data/output.txt")
if not (gui_logs_exists):
pathstr = os.path.abspath(__file__)
pathstr = str(pathstr)
slash = pathstr[-8]
pathstr_new = pathstr[:-14]
pathstr_new = pathstr_new + "data"
save_path = pathstr_new
file_name = "output.txt"
completeName = os.path.join(save_path, file_name)
print(completeName)
file1 = open(completeName, "a+")
file1.close()
target_players_exists = os.path.exists("./data/targetplayers.txt")
if not (target_players_exists):
pathstr = os.path.abspath(__file__)
pathstr = str(pathstr)
slash = pathstr[-8]
pathstr_new = pathstr[:-14]
pathstr_new = pathstr_new + "data"
save_path = pathstr_new
file_name = "targetplayers.txt"
completeName = os.path.join(save_path, file_name)
file1 = open(completeName, "w")
file1.close()
logs_csv_exists = os.path.exists("./data/logs.csv")
if not (logs_csv_exists):
pathstr = os.path.abspath(__file__)
pathstr = str(pathstr)
slash = pathstr[-8]
pathstr_new = pathstr[:-14]
pathstr_new = pathstr_new + "data"
save_path = pathstr_new
file_name = "logs.csv"
completeName = os.path.join(save_path, file_name)
file1 = open(completeName, "w")
file1.close()
| python | MIT | a68e84e070c73e8595ea238791e49c9e025a7cd4 | 2026-01-05T07:11:03.703387Z | false |
MichaelStott/KivMob | https://github.com/MichaelStott/KivMob/blob/1e6591541a1164cf9080f4d0e64c166f54bd4c55/setup.py | setup.py | from setuptools import setup
setup(
name="kivmob",
version="2.0",
description="Provides AdMob support for Kivy.",
url="http://github.com/MichaelStott/KivMob",
author="Michael Stott",
license="MIT",
py_modules=["kivmob"],
install_requires=["kivy"],
zip_safe=False,
)
| python | MIT | 1e6591541a1164cf9080f4d0e64c166f54bd4c55 | 2026-01-05T07:11:54.574300Z | false |
MichaelStott/KivMob | https://github.com/MichaelStott/KivMob/blob/1e6591541a1164cf9080f4d0e64c166f54bd4c55/kivmob.py | kivmob.py | from kivy.core.window import Window
from kivy.logger import Logger
from kivy.metrics import dp
from kivy.utils import platform
if platform == "android":
try:
from jnius import autoclass, cast, PythonJavaClass, java_method
from android.runnable import run_on_ui_thread
activity = autoclass("org.kivy.android.PythonActivity")
AdListener = autoclass("com.google.android.gms.ads.AdListener")
AdMobAdapter = autoclass("com.google.ads.mediation.admob.AdMobAdapter")
AdRequest = autoclass("com.google.android.gms.ads.AdRequest")
AdRequestBuilder = autoclass("com.google.android.gms.ads.AdRequest$Builder")
AdSize = autoclass("com.google.android.gms.ads.AdSize")
AdView = autoclass("com.google.android.gms.ads.AdView")
Bundle = autoclass("android.os.Bundle")
Gravity = autoclass("android.view.Gravity")
InterstitialAd = autoclass("com.google.android.gms.ads.interstitial.InterstitialAd")
LayoutParams = autoclass("android.view.ViewGroup$LayoutParams")
LinearLayout = autoclass("android.widget.LinearLayout")
MobileAds = autoclass("com.google.android.gms.ads.MobileAds")
RewardItem = autoclass("com.google.android.gms.ads.rewarded.RewardItem")
#RewardedVideoAd = autoclass("com.google.android.gms.ads.rewarded.RewardedVideoAd")
#RewardedVideoAdListener = autoclass("com.google.android.gms.ads.rewarded.RewardedVideoAdListener")
View = autoclass("android.view.View")
""" TODO since no more RewardedVideoAd
class AdMobRewardedVideoAdListener(PythonJavaClass):
__javainterfaces__ = (
"com.google.android.gms.ads.reward.RewardedVideoAdListener",
)
__javacontext__ = "app"
def __init__(self, listener):
self._listener = listener
@java_method("(Lcom/google/android/gms/ads/reward/RewardItem;)V")
def onRewarded(self, reward):
Logger.info("KivMob: onRewarded() called.")
self._listener.on_rewarded(
reward.getType(), reward.getAmount()
)
@java_method("()V")
def onRewardedVideoAdLeftApplication(self):
Logger.info(
"KivMob: onRewardedVideoAdLeftApplicaxtion() called."
)
self._listener.on_rewarded_video_ad_left_application()
@java_method("()V")
def onRewardedVideoAdClosed(self):
Logger.info("KivMob: onRewardedVideoAdClosed() called.")
self._listener.on_rewarded_video_ad_closed()
@java_method("(I)V")
def onRewardedVideoAdFailedToLoad(self, errorCode):
Logger.info("KivMob: onRewardedVideoAdFailedToLoad() called.")
# Logger.info("KivMob: ErrorCode " + str(errorCode))
self._listener.on_rewarded_video_ad_failed_to_load(errorCode)
@java_method("()V")
def onRewardedVideoAdLoaded(self):
Logger.info("KivMob: onRewardedVideoAdLoaded() called.")
self._listener.on_rewarded_video_ad_loaded()
@java_method("()V")
def onRewardedVideoAdOpened(self):
Logger.info("KivMob: onRewardedVideoAdOpened() called.")
self._listener.on_rewarded_video_ad_opened()
@java_method("()V")
def onRewardedVideoStarted(self):
Logger.info("KivMob: onRewardedVideoStarted() called.")
self._listener.on_rewarded_video_ad_started()
@java_method("()V")
def onRewardedVideoCompleted(self):
Logger.info("KivMob: onRewardedVideoCompleted() called.")
self._listener.on_rewarded_video_ad_completed()
"""
except BaseException:
Logger.error(
"KivMob: Cannot load AdMob classes. Check buildozer.spec."
)
else:
"""
class AdMobRewardedVideoAdListener:
pass
"""
def run_on_ui_thread(x):
pass
class TestIds:
""" Enum of test ad ids provided by AdMob. This allows developers to
test displaying ads without setting up an AdMob account.
"""
""" Test AdMob App ID """
APP = "ca-app-pub-3940256099942544~3347511713"
""" Test Banner Ad ID """
BANNER = "ca-app-pub-3940256099942544/6300978111"
""" Test Interstitial Ad ID """
INTERSTITIAL = "ca-app-pub-3940256099942544/1033173712"
""" Test Interstitial Video Ad ID """
INTERSTITIAL_VIDEO = "ca-app-pub-3940256099942544/8691691433"
""" Test Rewarded Video Ad ID """
REWARDED_VIDEO = "ca-app-pub-3940256099942544/5224354917"
class AdMobBridge:
def __init__(self, appID):
pass
def add_test_device(self, testID):
pass
def is_interstitial_loaded(self):
return False
def new_banner(self, unitID, top_pos=True):
pass
def new_interstitial(self, unitID):
pass
def request_banner(self, options):
pass
def request_interstitial(self, options):
pass
def show_banner(self):
pass
def show_interstitial(self):
pass
def destroy_banner(self):
pass
def destroy_interstitial(self):
pass
def hide_banner(self):
pass
def set_rewarded_ad_listener(self, listener):
pass
def load_rewarded_ad(self, unitID):
pass
def show_rewarded_ad(self):
pass
class RewardedListenerInterface:
""" Interface for objects that handle rewarded video ad
callback functions
"""
def on_rewarded(self, reward_name, reward_amount):
""" Called when the video completes
:type reward_name: string
:param reward_name: Name of the reward.
:type reward_amount: string
:param reward_amount: Amount of the reward.
"""
pass
def on_rewarded_video_ad_left_application(self):
""" Called when the user closes the application while
the video is playing.
"""
pass
def on_rewarded_video_ad_closed(self):
""" Called when the user manually closes the ad before completion.
"""
pass
def on_rewarded_video_ad_failed_to_load(self, error_code):
""" Called when the rewarded video ad fails to load.
:type error_code: int
:param error_code: Integer code that corresponds to the error.
"""
pass
def on_rewarded_video_ad_loaded(self):
""" Called when the rewarded ad finishes loading.
"""
pass
def on_rewarded_video_ad_opened(self):
""" Called when the rewarded ad is opened.
"""
pass
def on_rewarded_video_ad_started(self):
""" Called when the rewarded video ad starts.
"""
pass
def on_rewarded_video_ad_completed(self):
""" Called when the rewarded video ad completes.
"""
pass
class AndroidBridge(AdMobBridge):
@run_on_ui_thread
def __init__(self, appID):
self._loaded = False
try:
MobileAds.initialize(activity.mActivity, appID)
except ValueError as error:
print(error)
self._adview = AdView(activity.mActivity)
self._interstitial = InterstitialAd(activity.mActivity)
self._rewarded = MobileAds.getRewardedVideoAdInstance(
activity.mActivity
)
self._test_devices = []
@run_on_ui_thread
def add_test_device(self, testID):
self._test_devices.append(testID)
@run_on_ui_thread
def new_banner(self, unitID, top_pos=True):
self._adview = AdView(activity.mActivity)
self._adview.setAdUnitId(unitID)
self._adview.setAdSize(AdSize.SMART_BANNER)
self._adview.setVisibility(View.GONE)
adLayoutParams = LayoutParams(
LayoutParams.MATCH_PARENT, LayoutParams.WRAP_CONTENT
)
self._adview.setLayoutParams(adLayoutParams)
layout = LinearLayout(activity.mActivity)
if not top_pos:
layout.setGravity(Gravity.BOTTOM)
layout.addView(self._adview)
layoutParams = LayoutParams(
LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT
)
layout.setLayoutParams(layoutParams)
activity.mActivity.addContentView(layout, layoutParams)
@run_on_ui_thread
def request_banner(self, options={}):
self._adview.loadAd(self._get_builder(options).build())
@run_on_ui_thread
def show_banner(self):
self._adview.setVisibility(View.VISIBLE)
@run_on_ui_thread
def hide_banner(self):
self._adview.setVisibility(View.GONE)
@run_on_ui_thread
def new_interstitial(self, unitID):
self._interstitial.setAdUnitId(unitID)
@run_on_ui_thread
def request_interstitial(self, options={}):
self._interstitial.loadAd(self._get_builder(options).build())
@run_on_ui_thread
def _is_interstitial_loaded(self):
self._loaded = self._interstitial.isLoaded()
def is_interstitial_loaded(self):
self._is_interstitial_loaded()
return self._loaded
@run_on_ui_thread
def show_interstitial(self):
if self.is_interstitial_loaded():
self._interstitial.show()
@run_on_ui_thread
def set_rewarded_ad_listener(self, listener):
self._listener = AdMobRewardedVideoAdListener(listener)
self._rewarded.setRewardedVideoAdListener(self._listener)
@run_on_ui_thread
def load_rewarded_ad(self, unitID):
builder = self._get_builder(None)
self._rewarded.loadAd(unitID, builder.build())
@run_on_ui_thread
def show_rewarded_ad(self):
if self._rewarded.isLoaded():
self._rewarded.show()
@run_on_ui_thread
def destroy_banner(self):
self._adview.destroy()
@run_on_ui_thread
def destroy_interstitial(self):
self._interstitial.destroy()
@run_on_ui_thread
def destroy_rewarded_video_ad(self):
self._rewarded.destroy()
def _get_builder(self, options):
builder = AdRequestBuilder()
if options is not None:
if "children" in options:
builder.tagForChildDirectedTreatment(options["children"])
if "family" in options:
extras = Bundle()
extras.putBoolean(
"is_designed_for_families", options["family"]
)
builder.addNetworkExtrasBundle(AdMobAdapter, extras)
for test_device in self._test_devices:
if len(self._test_devices) != 0:
builder.addTestDevice(test_device)
return builder
class iOSBridge(AdMobBridge):
# TODO
pass
class KivMob:
""" Allows access to AdMob functionality on Android devices.
"""
def __init__(self, appID):
Logger.info("KivMob: __init__ called.")
self._banner_top_pos = True
if platform == "android":
Logger.info("KivMob: Android platform detected.")
self.bridge = AndroidBridge(appID)
elif platform == "ios":
Logger.warning("KivMob: iOS not yet supported.")
self.bridge = iOSBridge(appID)
else:
Logger.warning("KivMob: Ads will not be shown.")
self.bridge = AdMobBridge(appID)
def add_test_device(self, device):
""" Add test device ID, which will trigger test ads to be displayed on
that device
:type device: string
:param device: The test device ID of the Android device.
"""
Logger.info("KivMob: add_test_device() called.")
self.bridge.add_test_device(device)
def new_banner(self, unitID, top_pos=True):
""" Create a new mobile banner ad.
:type unitID: string
:param unitID: AdMob banner ID for mobile application.
:type top_pos: boolean
:param top_pos: Positions banner at the top of the page if True,
bottom if otherwise.
"""
Logger.info("KivMob: new_banner() called.")
self.bridge.new_banner(unitID, top_pos)
def new_interstitial(self, unitID):
""" Create a new mobile interstitial ad.
:type unitID: string
:param unitID: AdMob interstitial ID for mobile application.
"""
Logger.info("KivMob: new_interstitial() called.")
self.bridge.new_interstitial(unitID)
def is_interstitial_loaded(self):
""" Check if the interstitial ad has loaded.
"""
Logger.info("KivMob: is_interstitial_loaded() called.")
return self.bridge.is_interstitial_loaded()
def request_banner(self, options={}):
""" Request a new banner ad from AdMob.
"""
Logger.info("KivMob: request_banner() called.")
self.bridge.request_banner(options)
def request_interstitial(self, options={}):
""" Request a new interstitial ad from AdMob.
"""
Logger.info("KivMob: request_interstitial() called.")
self.bridge.request_interstitial(options)
def show_banner(self):
""" Displays banner ad, if it has loaded.
"""
Logger.info("KivMob: show_banner() called.")
self.bridge.show_banner()
def show_interstitial(self):
""" Displays interstitial ad, if it has loaded.
"""
Logger.info("KivMob: show_interstitial() called.")
self.bridge.show_interstitial()
def destroy_banner(self):
""" Destroys current banner ad.
"""
Logger.info("KivMob: destroy_banner() called.")
self.bridge.destroy_banner()
def destroy_interstitial(self):
""" Destroys current interstitial ad.
"""
Logger.info("KivMob: destroy_interstitial() called.")
self.bridge.destroy_interstitial()
def hide_banner(self):
""" Hide current banner ad.
"""
Logger.info("KivMob: hide_banner() called.")
self.bridge.hide_banner()
def set_rewarded_ad_listener(self, listener):
""" Set listener object for rewarded video ads.
:type listener: AdMobRewardedVideoAdListener
:param listener: Handles callback functionality for
rewarded video ads.
"""
Logger.info("KivMob: set_rewarded_ad_listener() called.")
self.bridge.set_rewarded_ad_listener(listener)
def load_rewarded_ad(self, unitID):
""" Load rewarded video ad.
:type unitID: string
:param unitID: AdMob rewarded video ID for mobile application.
"""
Logger.info("KivMob: load_rewarded_ad() called.")
self.bridge.load_rewarded_ad(unitID)
def show_rewarded_ad(self):
""" Display rewarded video ad.
"""
Logger.info("KivMob: show_rewarded_ad() called.")
self.bridge.show_rewarded_ad()
def determine_banner_height(self):
""" Utility function for determining the height (dp) of the banner ad.
:return height: Height of banner ad in dp.
"""
height = dp(32)
upper_bound = dp(720)
if Window.height > upper_bound:
height = dp(90)
elif dp(400) < Window.height <= upper_bound:
height = dp(50)
return height
if __name__ == "__main__":
print(
"\033[92m _ ___ __ __ _\n"
" | |/ (_)_ _| \\/ | ___ | |__\n"
" | ' /| \\ \\ / / |\\/| |/ _ \\| '_ \\\n"
" | . \\| |\\ V /| | | | (_) | |_) |\n"
" |_|\\_\\_| \\_/ |_| |_|\\___/|_.__/\n\033[0m"
)
print(" AdMob support for Kivy\n")
print(" Michael Stott, 2019\n")
| python | MIT | 1e6591541a1164cf9080f4d0e64c166f54bd4c55 | 2026-01-05T07:11:54.574300Z | false |
MichaelStott/KivMob | https://github.com/MichaelStott/KivMob/blob/1e6591541a1164cf9080f4d0e64c166f54bd4c55/docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = u"KivMob"
copyright = u"2018, Michael Stott"
author = u"Michael Stott"
# The short X.Y version
version = u""
# The full version, including alpha/beta/rc tags
release = u"2.0"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.githubpages"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u"_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "KivMobdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"KivMob.tex",
u"KivMob Documentation",
u"Michael Stott",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "kivmob", u"KivMob Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"KivMob",
u"KivMob Documentation",
author,
"KivMob",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
| python | MIT | 1e6591541a1164cf9080f4d0e64c166f54bd4c55 | 2026-01-05T07:11:54.574300Z | false |
MichaelStott/KivMob | https://github.com/MichaelStott/KivMob/blob/1e6591541a1164cf9080f4d0e64c166f54bd4c55/demo/main.py | demo/main.py | from kivmob import KivMob, TestIds, RewardedListenerInterface
import kivy.utils
from kivymd.app import MDApp
from kivy.lang import Builder
from kivy.config import Config
from kivy.utils import platform
from kivy.core.window import Window
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.screenmanager import ScreenManager
from kivy.uix.image import Image
from kivy.properties import NumericProperty
if platform not in ("android", "ios"):
# Approximate dimensions of mobile phone.
Config.set("graphics", "resizable", "0")
Window.size = (400, 600)
__version__ = "1.0"
from kivymd.theming import ThemeManager
from kivymd.uix.list import ILeftBody
from kivymd.uix.list import OneLineListItem
from kivymd.uix.list import TwoLineListItem
from kivymd.uix.list import ThreeLineListItem
from kivymd.uix.snackbar import Snackbar
Builder.load_string(
"""
#:import kivy kivy
#.import Snackbar kivymd.uix.snackbar.Snackbar
#:import MDList kivymd.uix.list.MDList
#:import OneLineListItem kivymd.uix.list.OneLineListItem
#:import TwoLineListItem kivymd.uix.list.TwoLineListItem
#:import ThreeLineListItem kivymd.uix.list.ThreeLineListItem
#:import webbrowser webbrowser
<KivMobDemoUI>:
BoxLayout:
orientation: 'vertical'
MDToolbar:
id: toolbar
title: 'KivMob 2.0'
md_bg_color: app.theme_cls.primary_color
ScreenManager:
id: scr_mngr
Screen:
name: 'menu'
ScrollView:
do_scroll_x: False
MDList:
ThreeLineAvatarListItem:
type: "three-line"
text: "Banners"
secondary_text: "Rectangular image or text ads that occupy a spot within an app's layout."
on_press: app.root.switch_to_screen("banner", "Banners")
AvatarIconWidget:
source: './assets/banner.png'
ThreeLineAvatarListItem:
type: "three-line"
text: "Interstitial"
secondary_text: "Full-screen ads that cover the interface of an app until closed by the user."
on_press: app.root.switch_to_screen("interstitial", "Interstitial")
AvatarIconWidget:
source: './assets/interstitial.png'
ThreeLineAvatarListItem:
type: "three-line"
text: "Rewarded Video"
secondary_text: "Video ads that users may watch in exchange for in-app rewards."
on_press: app.root.switch_to_screen("rewarded", "Rewarded Video Ad")
AvatarIconWidget:
source: './assets/rewarded.png'
ThreeLineAvatarListItem:
type: "three-line"
text: "Documentation"
secondary_text: "Learn how to utilize KivMob within a mobile Kivy application."
on_press: webbrowser.open("https://kivmob.com")
AvatarIconWidget:
source: './assets/documentation.png'
ThreeLineAvatarListItem:
type: "three-line"
text: "Source Code"
secondary_text: "Checkout, fork, and follow the KivMob project on GitHub."
on_press: webbrowser.open("https://github.com/MichaelStott/KivMob")
AvatarIconWidget:
source: './assets/github.png'
ThreeLineAvatarListItem:
type: "three-line"
text: "About"
secondary_text: "Software licensing, credits, and other KivMob information."
on_press: webbrowser.open("https://github.com/MichaelStott/KivMob")
AvatarIconWidget:
source: './assets/about.png'
Screen:
name: "banner"
on_pre_leave:
app.ads.hide_banner()
app.show_banner = False
MDRaisedButton:
text: "Toggle Banner Ad"
elevation_normal: 2
opposite_colors: True
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
on_press: app.toggle_banner()
Screen:
name: "interstitial"
MDRaisedButton:
text: "Show Interstitial"
elevation_normal: 2
opposite_colors: True
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
on_press: app.ads.show_interstitial() if app.ads.is_interstitial_loaded() else app.root.show_interstitial_msg()
Screen:
name: 'rewarded'
BoxLayout:
MDLabel:
font_style: 'H1'
theme_text_color: 'Primary'
text: "Points: "+str(app.Points)
halign: 'center'
pos_hint: {'center_x': 0.5, 'center_y': 0.75}
MDFloatingActionButton:
icon: 'plus'
elevation_normal: 2
pos_hint: {'center_x': 0.5, 'center_y': 0.25}
on_press: app.ads.show_rewarded_ad()
"""
)
class AvatarIconWidget(ILeftBody, Image):
pass
class KivMobDemoUI(FloatLayout):
def switch_to_screen(self, name, title):
self.ids.toolbar.title = title
self.ids.toolbar.left_action_items = [
["chevron-left", lambda x: self.back_to_menu()]
]
self.ids.scr_mngr.transition.direction = "left"
self.ids.scr_mngr.current = name
self.interstitial_snack = Snackbar(
text="Interstitial has not yet loaded."
)
def back_to_menu(self):
self.ids.scr_mngr.transition.direction = "right"
self.ids.scr_mngr.current = "menu"
self.ids.toolbar.title = "KivMob 2.0"
self.ids.toolbar.left_action_items = []
def show_interstitial_msg(self):
self.interstitial_snack.show()
def hide_interstitial_msg(self):
self.interstitial_snack.hide()
def open_dialog(self):
pass
class KivMobDemo(MDApp):
def __init__(self,**kwargs):
self.theme_cls.theme_style = "Dark"
super().__init__(**kwargs)
self.rewards = Rewards_Handler(self)
Points = NumericProperty(0)
show_banner = False
def build(self):
self.ads = KivMob(TestIds.APP)
self.ads.new_banner(TestIds.BANNER, False)
self.ads.new_interstitial(TestIds.INTERSTITIAL)
self.ads.request_banner()
self.ads.request_interstitial()
self.ads.set_rewarded_ad_listener(self.rewards)
self.ads.load_rewarded_ad(TestIds.REWARDED_VIDEO)
self.toggled = False
return KivMobDemoUI()
def toggle_banner(self):
self.show_banner = not self.show_banner
if self.show_banner:
self.ads.show_banner()
else:
self.ads.hide_banner()
def load_video(self):
self.ads.load_rewarded_ad(TestIds.REWARDED_VIDEO)
class Rewards_Handler(RewardedListenerInterface):
def __init__(self,other):
self.AppObj = other
Reward = "None"
Reward_Amount = "None"
def on_rewarded(self, reward_name, reward_amount):
self.Reward = reward_name
self.Reward_Amount = reward_amount
self.AppObj.Points += int(reward_amount)
def on_rewarded_video_ad_completed(self):
self.on_rewarded(self.Reward,self.Reward_Amount)
def on_rewarded_video_ad_started(self):
self.AppObj.load_video()
def on_rewarded_video_ad_left_application(self):
self.AppObj.Points += 0
if __name__ == "__main__":
KivMobDemo().run()
| python | MIT | 1e6591541a1164cf9080f4d0e64c166f54bd4c55 | 2026-01-05T07:11:54.574300Z | false |
jacob-bd/notebooklm-mcp | https://github.com/jacob-bd/notebooklm-mcp/blob/1ca3bba360852de0534ca33e0ccf7258a0efd306/src/notebooklm_mcp/api_client.py | src/notebooklm_mcp/api_client.py | #!/usr/bin/env python3
"""NotebookLM MCP API client (notebooklm.google.com).
Internal API. See CLAUDE.md for full documentation.
"""
import json
import os
import re
import urllib.parse
from dataclasses import dataclass
from datetime import datetime, timezone
from typing import Any
import httpx
# Ownership constants (from metadata position 0)
OWNERSHIP_MINE = 1
OWNERSHIP_SHARED = 2
@dataclass
class ConversationTurn:
"""Represents a single turn in a conversation (query + response).
Used to track conversation history for follow-up queries.
NotebookLM requires the full conversation history in follow-up requests.
"""
query: str # The user's question
answer: str # The AI's response
turn_number: int # 1-indexed turn number in the conversation
def parse_timestamp(ts_array: list | None) -> str | None:
"""Convert [seconds, nanoseconds] timestamp array to ISO format string.
"""
if not ts_array or not isinstance(ts_array, list) or len(ts_array) < 1:
return None
try:
seconds = ts_array[0]
if not isinstance(seconds, (int, float)):
return None
# Convert to datetime
dt = datetime.fromtimestamp(seconds, tz=timezone.utc)
return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
except (ValueError, OSError, OverflowError):
return None
@dataclass
class Notebook:
"""Represents a NotebookLM notebook."""
id: str
title: str
source_count: int
sources: list[dict]
is_owned: bool = True # True if owned by user, False if shared with user
is_shared: bool = False # True if shared with others (for owned notebooks)
created_at: str | None = None # ISO format timestamp
modified_at: str | None = None # ISO format timestamp
@property
def url(self) -> str:
return f"https://notebooklm.google.com/notebook/{self.id}"
@property
def ownership(self) -> str:
"""Return human-readable ownership status."""
if self.is_owned:
return "owned"
return "shared_with_me"
class NotebookLMClient:
"""Client for NotebookLM MCP internal API."""
BASE_URL = "https://notebooklm.google.com"
BATCHEXECUTE_URL = f"{BASE_URL}/_/LabsTailwindUi/data/batchexecute"
# Known RPC IDs
RPC_LIST_NOTEBOOKS = "wXbhsf"
RPC_GET_NOTEBOOK = "rLM1Ne"
RPC_CREATE_NOTEBOOK = "CCqFvf"
RPC_RENAME_NOTEBOOK = "s0tc2d"
RPC_DELETE_NOTEBOOK = "WWINqb"
RPC_ADD_SOURCE = "izAoDd" # Used for URL, text, and Drive sources
RPC_GET_SOURCE = "hizoJc" # Get source details
RPC_CHECK_FRESHNESS = "yR9Yof" # Check if Drive source is stale
RPC_SYNC_DRIVE = "FLmJqe" # Sync Drive source with latest content
RPC_DELETE_SOURCE = "tGMBJ" # Delete a source from notebook
RPC_GET_CONVERSATIONS = "hPTbtc"
RPC_PREFERENCES = "hT54vc"
RPC_SUBSCRIPTION = "ozz5Z"
RPC_SETTINGS = "ZwVcOc"
RPC_GET_SUMMARY = "VfAZjd" # Get notebook summary and suggested report topics
RPC_GET_SOURCE_GUIDE = "tr032e" # Get source guide (AI summary + keyword chips)
# Research RPCs (source discovery)
RPC_START_FAST_RESEARCH = "Ljjv0c" # Start Fast Research (Web or Drive)
RPC_START_DEEP_RESEARCH = "QA9ei" # Start Deep Research (Web only)
RPC_POLL_RESEARCH = "e3bVqc" # Poll research results
RPC_IMPORT_RESEARCH = "LBwxtb" # Import research sources
# Research source types
RESEARCH_SOURCE_WEB = 1
RESEARCH_SOURCE_DRIVE = 2
RESEARCH_MODE_FAST = 1
RESEARCH_MODE_DEEP = 5
RESULT_TYPE_WEB = 1
RESULT_TYPE_GOOGLE_DOC = 2
RESULT_TYPE_GOOGLE_SLIDES = 3
RESULT_TYPE_DEEP_REPORT = 5
RESULT_TYPE_GOOGLE_SHEETS = 8
RPC_CREATE_STUDIO = "R7cb6c" # Create Audio or Video Overview
RPC_POLL_STUDIO = "gArtLc" # Poll for studio content status
RPC_DELETE_STUDIO = "V5N4be" # Delete Audio or Video Overview
# Studio content types
STUDIO_TYPE_AUDIO = 1
STUDIO_TYPE_VIDEO = 3
AUDIO_FORMAT_DEEP_DIVE = 1
AUDIO_FORMAT_BRIEF = 2
AUDIO_FORMAT_CRITIQUE = 3
AUDIO_FORMAT_DEBATE = 4
# Audio Overview lengths
AUDIO_LENGTH_SHORT = 1
AUDIO_LENGTH_DEFAULT = 2
AUDIO_LENGTH_LONG = 3
VIDEO_FORMAT_EXPLAINER = 1
VIDEO_FORMAT_BRIEF = 2
# Video visual styles
VIDEO_STYLE_AUTO_SELECT = 1
VIDEO_STYLE_CUSTOM = 2
VIDEO_STYLE_CLASSIC = 3
VIDEO_STYLE_WHITEBOARD = 4
VIDEO_STYLE_KAWAII = 5
VIDEO_STYLE_ANIME = 6
VIDEO_STYLE_WATERCOLOR = 7
VIDEO_STYLE_RETRO_PRINT = 8
VIDEO_STYLE_HERITAGE = 9
VIDEO_STYLE_PAPER_CRAFT = 10
STUDIO_TYPE_REPORT = 2
STUDIO_TYPE_FLASHCARDS = 4 # Also used for Quiz (differentiated by options)
STUDIO_TYPE_INFOGRAPHIC = 7
STUDIO_TYPE_SLIDE_DECK = 8
STUDIO_TYPE_DATA_TABLE = 9
RPC_GENERATE_MIND_MAP = "yyryJe" # Generate mind map JSON from sources
RPC_SAVE_MIND_MAP = "CYK0Xb" # Save generated mind map to notebook
RPC_LIST_MIND_MAPS = "cFji9" # List existing mind maps
# Report format constants
REPORT_FORMAT_BRIEFING_DOC = "Briefing Doc"
REPORT_FORMAT_STUDY_GUIDE = "Study Guide"
REPORT_FORMAT_BLOG_POST = "Blog Post"
REPORT_FORMAT_CUSTOM = "Create Your Own"
# Flashcard difficulty codes (suspected values)
FLASHCARD_DIFFICULTY_EASY = 1
FLASHCARD_DIFFICULTY_MEDIUM = 2
FLASHCARD_DIFFICULTY_HARD = 3
FLASHCARD_COUNT_DEFAULT = 2
INFOGRAPHIC_ORIENTATION_LANDSCAPE = 1
INFOGRAPHIC_ORIENTATION_PORTRAIT = 2
INFOGRAPHIC_ORIENTATION_SQUARE = 3
INFOGRAPHIC_DETAIL_CONCISE = 1
INFOGRAPHIC_DETAIL_STANDARD = 2
INFOGRAPHIC_DETAIL_DETAILED = 3
SLIDE_DECK_FORMAT_DETAILED = 1
SLIDE_DECK_FORMAT_PRESENTER = 2
# Slide Deck length codes
SLIDE_DECK_LENGTH_SHORT = 1
SLIDE_DECK_LENGTH_DEFAULT = 3
# Chat configuration goal/style codes
CHAT_GOAL_DEFAULT = 1
CHAT_GOAL_CUSTOM = 2
CHAT_GOAL_LEARNING_GUIDE = 3
# Chat configuration response length codes
CHAT_RESPONSE_DEFAULT = 1
CHAT_RESPONSE_LONGER = 4
CHAT_RESPONSE_SHORTER = 5
# Source type constants (from metadata position 4)
# These represent the Google Workspace document type, NOT the source origin
SOURCE_TYPE_GOOGLE_DOCS = 1
SOURCE_TYPE_GOOGLE_OTHER = 2
SOURCE_TYPE_PASTED_TEXT = 4
# Query endpoint (different from batchexecute - streaming gRPC-style)
QUERY_ENDPOINT = "/_/LabsTailwindUi/data/google.internal.labs.tailwind.orchestration.v1.LabsTailwindOrchestrationService/GenerateFreeFormStreamed"
# Headers required for page fetch (must look like a browser navigation)
_PAGE_FETCH_HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.9",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"sec-ch-ua": '"Google Chrome";v="143", "Chromium";v="143", "Not A(Brand";v="24"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
}
def __init__(self, cookies: dict[str, str], csrf_token: str = "", session_id: str = ""):
"""
Initialize the client.
Args:
cookies: Dict of Google auth cookies (SID, SSID, HSID, APISID, SAPISID, etc.)
csrf_token: CSRF token (optional - will be auto-extracted from page if not provided)
session_id: Session ID (optional - will be auto-extracted from page if not provided)
"""
self.cookies = cookies
self.csrf_token = csrf_token
self._client: httpx.Client | None = None
self._session_id = session_id
# Conversation cache for follow-up queries
# Key: conversation_id, Value: list of ConversationTurn objects
self._conversation_cache: dict[str, list[ConversationTurn]] = {}
# Request counter for _reqid parameter (required for query endpoint)
import random
self._reqid_counter = random.randint(100000, 999999)
# ALWAYS refresh CSRF token on initialization - they expire quickly (minutes)
# Even if a CSRF token was provided, it may be stale
self._refresh_auth_tokens()
def _refresh_auth_tokens(self) -> None:
"""
Refresh CSRF token and session ID by fetching the NotebookLM homepage.
This method fetches the NotebookLM page using the stored cookies and
extracts the CSRF token (SNlM0e) and session ID (FdrFJe) from the HTML.
Raises:
ValueError: If cookies are expired (redirected to login) or tokens not found
"""
# Build cookie header
cookie_header = "; ".join(f"{k}={v}" for k, v in self.cookies.items())
# Must use browser-like headers for page fetch
headers = {**self._PAGE_FETCH_HEADERS, "Cookie": cookie_header}
# Use a temporary client for the page fetch
with httpx.Client(headers=headers, follow_redirects=True, timeout=15.0) as client:
response = client.get(f"{self.BASE_URL}/")
# Check if redirected to login (cookies expired)
if "accounts.google.com" in str(response.url):
raise ValueError(
"Cookies have expired. Please re-authenticate by running 'notebooklm-mcp-auth'."
)
if response.status_code != 200:
raise ValueError(f"Failed to fetch NotebookLM page: HTTP {response.status_code}")
html = response.text
# Extract CSRF token (SNlM0e)
csrf_match = re.search(r'"SNlM0e":"([^"]+)"', html)
if not csrf_match:
# Save HTML for debugging
from pathlib import Path
debug_dir = Path.home() / ".notebooklm-mcp"
debug_dir.mkdir(exist_ok=True)
debug_path = debug_dir / "debug_page.html"
debug_path.write_text(html)
raise ValueError(
f"Could not extract CSRF token from page. "
f"Page saved to {debug_path} for debugging. "
f"The page structure may have changed."
)
self.csrf_token = csrf_match.group(1)
# Extract session ID (FdrFJe) - optional but helps
sid_match = re.search(r'"FdrFJe":"([^"]+)"', html)
if sid_match:
self._session_id = sid_match.group(1)
# Cache the extracted tokens to avoid re-fetching the page on next request
self._update_cached_tokens()
def _update_cached_tokens(self) -> None:
"""Update the cached auth tokens with newly extracted CSRF token and session ID.
This avoids re-fetching the NotebookLM page on every client initialization,
significantly improving performance for subsequent API calls.
"""
try:
import time
from .auth import AuthTokens, save_tokens_to_cache, load_cached_tokens
# Load existing cache or create new
cached = load_cached_tokens()
if cached:
# Update existing cache with new tokens
cached.csrf_token = self.csrf_token
cached.session_id = self._session_id
else:
# Create new cache entry
cached = AuthTokens(
cookies=self.cookies,
csrf_token=self.csrf_token,
session_id=self._session_id,
extracted_at=time.time(),
)
save_tokens_to_cache(cached, silent=True)
except Exception:
# Silently fail - caching is an optimization, not critical
pass
def _get_client(self) -> httpx.Client:
"""Get or create HTTP client."""
if self._client is None:
# Build cookie string
cookie_str = "; ".join(f"{k}={v}" for k, v in self.cookies.items())
self._client = httpx.Client(
headers={
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"Origin": self.BASE_URL,
"Referer": f"{self.BASE_URL}/",
"Cookie": cookie_str,
"X-Same-Domain": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
},
timeout=30.0,
)
return self._client
def _build_request_body(self, rpc_id: str, params: Any) -> str:
"""Build the batchexecute request body."""
# The params need to be JSON-encoded, then wrapped in the RPC structure
# Use separators to match Chrome's compact format (no spaces)
params_json = json.dumps(params, separators=(',', ':'))
f_req = [[[rpc_id, params_json, None, "generic"]]]
f_req_json = json.dumps(f_req, separators=(',', ':'))
# URL encode (safe='' encodes all characters including /)
body_parts = [f"f.req={urllib.parse.quote(f_req_json, safe='')}"]
if self.csrf_token:
body_parts.append(f"at={urllib.parse.quote(self.csrf_token, safe='')}")
# Add trailing & to match NotebookLM's format
return "&".join(body_parts) + "&"
def _build_url(self, rpc_id: str, source_path: str = "/") -> str:
"""Build the batchexecute URL with query params."""
params = {
"rpcids": rpc_id,
"source-path": source_path,
"bl": os.environ.get("NOTEBOOKLM_BL", "boq_labs-tailwind-frontend_20251221.14_p0"),
"hl": "en",
"rt": "c",
}
if self._session_id:
params["f.sid"] = self._session_id
query = urllib.parse.urlencode(params)
return f"{self.BATCHEXECUTE_URL}?{query}"
def _parse_response(self, response_text: str) -> Any:
"""Parse the batchexecute response."""
# Response format:
# )]}'
# <byte_count>
# <json_array>
# Remove the anti-XSSI prefix
if response_text.startswith(")]}'"):
response_text = response_text[4:]
lines = response_text.strip().split("\n")
# Parse each chunk
results = []
i = 0
while i < len(lines):
line = lines[i].strip()
if not line:
i += 1
continue
# Try to parse as byte count
try:
byte_count = int(line)
# Next line(s) should be the JSON payload
i += 1
if i < len(lines):
json_str = lines[i]
try:
data = json.loads(json_str)
results.append(data)
except json.JSONDecodeError:
pass
i += 1
except ValueError:
# Not a byte count, try to parse as JSON
try:
data = json.loads(line)
results.append(data)
except json.JSONDecodeError:
pass
i += 1
return results
def _extract_rpc_result(self, parsed_response: list, rpc_id: str) -> Any:
"""Extract the result for a specific RPC ID from the parsed response."""
for chunk in parsed_response:
if isinstance(chunk, list):
for item in chunk:
if isinstance(item, list) and len(item) >= 3:
if item[0] == "wrb.fr" and item[1] == rpc_id:
result_str = item[2]
if isinstance(result_str, str):
try:
return json.loads(result_str)
except json.JSONDecodeError:
return result_str
return result_str
return None
def _call_rpc(
self,
rpc_id: str,
params: Any,
path: str = "/",
timeout: float | None = None,
) -> Any:
"""Execute an RPC call and return the extracted result."""
client = self._get_client()
body = self._build_request_body(rpc_id, params)
url = self._build_url(rpc_id, path)
if timeout:
response = client.post(url, content=body, timeout=timeout)
else:
response = client.post(url, content=body)
response.raise_for_status()
parsed = self._parse_response(response.text)
return self._extract_rpc_result(parsed, rpc_id)
# =========================================================================
# Conversation Management (for query follow-ups)
# =========================================================================
def _build_conversation_history(self, conversation_id: str) -> list | None:
"""Build the conversation history array for follow-up queries.
Chrome expects history in format: [[answer, null, 2], [query, null, 1], ...]
where type 1 = user message, type 2 = AI response.
The history includes ALL previous turns, not just the most recent one.
Turns are added in chronological order (oldest first).
Args:
conversation_id: The conversation ID to get history for
Returns:
List in Chrome's expected format, or None if no history exists
"""
turns = self._conversation_cache.get(conversation_id, [])
if not turns:
return None
history = []
# Add turns in chronological order (oldest first)
# Each turn adds: [answer, null, 2] then [query, null, 1]
for turn in turns:
history.append([turn.answer, None, 2])
history.append([turn.query, None, 1])
return history if history else None
def _cache_conversation_turn(
self, conversation_id: str, query: str, answer: str
) -> None:
"""Cache a conversation turn for future follow-up queries.
"""
if conversation_id not in self._conversation_cache:
self._conversation_cache[conversation_id] = []
turn_number = len(self._conversation_cache[conversation_id]) + 1
turn = ConversationTurn(query=query, answer=answer, turn_number=turn_number)
self._conversation_cache[conversation_id].append(turn)
def clear_conversation(self, conversation_id: str) -> bool:
"""Clear the conversation cache for a specific conversation.
"""
if conversation_id in self._conversation_cache:
del self._conversation_cache[conversation_id]
return True
return False
def get_conversation_history(self, conversation_id: str) -> list[dict] | None:
"""Get the conversation history for a specific conversation.
"""
turns = self._conversation_cache.get(conversation_id)
if not turns:
return None
return [
{"turn": t.turn_number, "query": t.query, "answer": t.answer}
for t in turns
]
# =========================================================================
# Notebook Operations
# =========================================================================
def list_notebooks(self, debug: bool = False) -> list[Notebook]:
"""List all notebooks."""
client = self._get_client()
# [null, 1, null, [2]] - params for list notebooks
params = [None, 1, None, [2]]
body = self._build_request_body(self.RPC_LIST_NOTEBOOKS, params)
url = self._build_url(self.RPC_LIST_NOTEBOOKS)
if debug:
print(f"[DEBUG] URL: {url}")
print(f"[DEBUG] Body: {body[:200]}...")
response = client.post(url, content=body)
response.raise_for_status()
if debug:
print(f"[DEBUG] Response status: {response.status_code}")
print(f"[DEBUG] Response length: {len(response.text)} chars")
parsed = self._parse_response(response.text)
result = self._extract_rpc_result(parsed, self.RPC_LIST_NOTEBOOKS)
if debug:
print(f"[DEBUG] Parsed chunks: {len(parsed)}")
print(f"[DEBUG] Result type: {type(result)}")
if result:
print(f"[DEBUG] Result length: {len(result) if isinstance(result, list) else 'N/A'}")
if isinstance(result, list) and len(result) > 0:
print(f"[DEBUG] First item type: {type(result[0])}")
print(f"[DEBUG] First item: {str(result[0])[:500]}...")
notebooks = []
if result and isinstance(result, list):
# [0] = "Title"
# [1] = [sources]
# [2] = "notebook-uuid"
# [3] = "emoji" or null
# [4] = null
# [5] = [metadata] where metadata[0] = ownership (1=mine, 2=shared_with_me)
notebook_list = result[0] if result and isinstance(result[0], list) else result
for nb_data in notebook_list:
if isinstance(nb_data, list) and len(nb_data) >= 3:
title = nb_data[0] if isinstance(nb_data[0], str) else "Untitled"
sources_data = nb_data[1] if len(nb_data) > 1 else []
notebook_id = nb_data[2] if len(nb_data) > 2 else None
is_owned = True # Default to owned
is_shared = False # Default to not shared
created_at = None
modified_at = None
if len(nb_data) > 5 and isinstance(nb_data[5], list) and len(nb_data[5]) > 0:
metadata = nb_data[5]
ownership_value = metadata[0]
# 1 = mine (owned), 2 = shared with me
is_owned = ownership_value == OWNERSHIP_MINE
# Check if shared (for owned notebooks)
# Based on observation: [1, true, true, ...] -> Shared
# [1, false, true, ...] -> Private
if len(metadata) > 1:
is_shared = bool(metadata[1])
# metadata[5] = [seconds, nanos] = last modified
# metadata[8] = [seconds, nanos] = created
if len(metadata) > 5:
modified_at = parse_timestamp(metadata[5])
if len(metadata) > 8:
created_at = parse_timestamp(metadata[8])
sources = []
if isinstance(sources_data, list):
for src in sources_data:
if isinstance(src, list) and len(src) >= 2:
# Source structure: [[source_id], title, metadata, ...]
src_ids = src[0] if src[0] else []
src_title = src[1] if len(src) > 1 else "Untitled"
# Extract the source ID (might be in a list)
src_id = src_ids[0] if isinstance(src_ids, list) and src_ids else src_ids
sources.append({
"id": src_id,
"title": src_title,
})
if notebook_id:
notebooks.append(Notebook(
id=notebook_id,
title=title,
source_count=len(sources),
sources=sources,
is_owned=is_owned,
is_shared=is_shared,
created_at=created_at,
modified_at=modified_at,
))
return notebooks
def get_notebook(self, notebook_id: str) -> dict | None:
"""Get notebook details."""
return self._call_rpc(
self.RPC_GET_NOTEBOOK,
[notebook_id, None, [2], None, 0],
f"/notebook/{notebook_id}",
)
def get_notebook_summary(self, notebook_id: str) -> dict[str, Any]:
"""Get AI-generated summary and suggested topics for a notebook."""
result = self._call_rpc(
self.RPC_GET_SUMMARY, [notebook_id, [2]], f"/notebook/{notebook_id}"
)
summary = ""
suggested_topics = []
if result and isinstance(result, list):
# Summary is at result[0][0]
if len(result) > 0 and isinstance(result[0], list) and len(result[0]) > 0:
summary = result[0][0]
# Suggested topics are at result[1][0]
if len(result) > 1 and result[1]:
topics_data = result[1][0] if isinstance(result[1], list) and len(result[1]) > 0 else []
for topic in topics_data:
if isinstance(topic, list) and len(topic) >= 2:
suggested_topics.append({
"question": topic[0],
"prompt": topic[1],
})
return {
"summary": summary,
"suggested_topics": suggested_topics,
}
def get_source_guide(self, source_id: str) -> dict[str, Any]:
"""Get AI-generated summary and keywords for a source."""
result = self._call_rpc(self.RPC_GET_SOURCE_GUIDE, [[[[source_id]]]], "/")
summary = ""
keywords = []
if result and isinstance(result, list):
if len(result) > 0 and isinstance(result[0], list):
if len(result[0]) > 0 and isinstance(result[0][0], list):
inner = result[0][0]
if len(inner) > 1 and isinstance(inner[1], list) and len(inner[1]) > 0:
summary = inner[1][0]
if len(inner) > 2 and isinstance(inner[2], list) and len(inner[2]) > 0:
keywords = inner[2][0] if isinstance(inner[2][0], list) else []
return {
"summary": summary,
"keywords": keywords,
}
def create_notebook(self, title: str = "") -> Notebook | None:
"""Create a new notebook."""
params = [title, None, None, [2], [1, None, None, None, None, None, None, None, None, None, [1]]]
result = self._call_rpc(self.RPC_CREATE_NOTEBOOK, params)
if result and isinstance(result, list) and len(result) >= 3:
notebook_id = result[2]
if notebook_id:
return Notebook(
id=notebook_id,
title=title or "Untitled notebook",
source_count=0,
sources=[],
)
return None
def rename_notebook(self, notebook_id: str, new_title: str) -> bool:
"""Rename a notebook."""
params = [notebook_id, [[None, None, None, [None, new_title]]]]
result = self._call_rpc(self.RPC_RENAME_NOTEBOOK, params, f"/notebook/{notebook_id}")
return result is not None
def configure_chat(
self,
notebook_id: str,
goal: str = "default",
custom_prompt: str | None = None,
response_length: str = "default",
) -> dict[str, Any]:
"""Configure chat goal/style and response length for a notebook."""
goal_map = {
"default": self.CHAT_GOAL_DEFAULT,
"learning_guide": self.CHAT_GOAL_LEARNING_GUIDE,
"custom": self.CHAT_GOAL_CUSTOM,
}
if goal not in goal_map:
raise ValueError(f"Invalid goal: {goal}. Must be one of: {list(goal_map.keys())}")
goal_code = goal_map[goal]
# Validate custom prompt
if goal == "custom":
if not custom_prompt:
raise ValueError("custom_prompt is required when goal='custom'")
if len(custom_prompt) > 10000:
raise ValueError(f"custom_prompt exceeds 10000 chars (got {len(custom_prompt)})")
# Map response length string to code
length_map = {
"default": self.CHAT_RESPONSE_DEFAULT,
"longer": self.CHAT_RESPONSE_LONGER,
"shorter": self.CHAT_RESPONSE_SHORTER,
}
if response_length not in length_map:
raise ValueError(f"Invalid response_length: {response_length}. Must be one of: {list(length_map.keys())}")
length_code = length_map[response_length]
if goal == "custom" and custom_prompt:
goal_setting = [goal_code, custom_prompt]
else:
goal_setting = [goal_code]
chat_settings = [goal_setting, [length_code]]
params = [notebook_id, [[None, None, None, None, None, None, None, chat_settings]]]
result = self._call_rpc(self.RPC_RENAME_NOTEBOOK, params, f"/notebook/{notebook_id}")
if result:
# Response format: [title, null, id, emoji, null, metadata, null, [[goal_code, prompt?], [length_code]]]
settings = result[7] if len(result) > 7 else None
return {
"status": "success",
"notebook_id": notebook_id,
"goal": goal,
"custom_prompt": custom_prompt if goal == "custom" else None,
"response_length": response_length,
"raw_settings": settings,
}
return {
"status": "error",
"error": "Failed to configure chat settings",
}
def delete_notebook(self, notebook_id: str) -> bool:
"""Delete a notebook permanently.
WARNING: This action is IRREVERSIBLE. The notebook and all its sources,
notes, and generated content will be permanently deleted.
Args:
notebook_id: The notebook UUID to delete
Returns:
True on success, False on failure
"""
client = self._get_client()
params = [[notebook_id], [2]]
body = self._build_request_body(self.RPC_DELETE_NOTEBOOK, params)
url = self._build_url(self.RPC_DELETE_NOTEBOOK)
response = client.post(url, content=body)
response.raise_for_status()
parsed = self._parse_response(response.text)
result = self._extract_rpc_result(parsed, self.RPC_DELETE_NOTEBOOK)
return result is not None
def check_source_freshness(self, source_id: str) -> bool | None:
"""Check if a Drive source is fresh (up-to-date with Google Drive).
"""
client = self._get_client()
params = [None, [source_id], [2]]
body = self._build_request_body(self.RPC_CHECK_FRESHNESS, params)
url = self._build_url(self.RPC_CHECK_FRESHNESS)
response = client.post(url, content=body)
response.raise_for_status()
parsed = self._parse_response(response.text)
result = self._extract_rpc_result(parsed, self.RPC_CHECK_FRESHNESS)
# true = fresh, false = stale
if result and isinstance(result, list) and len(result) > 0:
inner = result[0] if result else []
if isinstance(inner, list) and len(inner) >= 2:
return inner[1] # true = fresh, false = stale
return None
def sync_drive_source(self, source_id: str) -> dict | None:
"""Sync a Drive source with the latest content from Google Drive.
"""
client = self._get_client()
# Sync params: [null, ["source_id"], [2]]
params = [None, [source_id], [2]]
body = self._build_request_body(self.RPC_SYNC_DRIVE, params)
url = self._build_url(self.RPC_SYNC_DRIVE)
response = client.post(url, content=body)
response.raise_for_status()
parsed = self._parse_response(response.text)
result = self._extract_rpc_result(parsed, self.RPC_SYNC_DRIVE)
if result and isinstance(result, list) and len(result) > 0:
source_data = result[0] if result else []
if isinstance(source_data, list) and len(source_data) >= 3:
source_id_result = source_data[0][0] if source_data[0] else None
title = source_data[1] if len(source_data) > 1 else "Unknown"
metadata = source_data[2] if len(source_data) > 2 else []
synced_at = None
| python | MIT | 1ca3bba360852de0534ca33e0ccf7258a0efd306 | 2026-01-05T07:12:11.692365Z | true |
jacob-bd/notebooklm-mcp | https://github.com/jacob-bd/notebooklm-mcp/blob/1ca3bba360852de0534ca33e0ccf7258a0efd306/src/notebooklm_mcp/auth_cli.py | src/notebooklm_mcp/auth_cli.py | #!/usr/bin/env python3
"""CLI tool to authenticate with NotebookLM MCP.
This tool connects to Chrome via DevTools Protocol, navigates to NotebookLM,
and extracts authentication tokens. If the user is not logged in, it waits
for them to log in via the Chrome window.
Usage:
1. Start Chrome with remote debugging:
/Applications/Google\\ Chrome.app/Contents/MacOS/Google\\ Chrome --remote-debugging-port=9222
2. Or, if Chrome is already running, it may already have debugging enabled.
3. Run this tool:
notebooklm-mcp-auth
4. If not logged in, log in via the Chrome window
5. Tokens are cached to ~/.notebooklm-mcp/auth.json
"""
import json
import re
import sys
import time
from pathlib import Path
import httpx
from .auth import (
AuthTokens,
REQUIRED_COOKIES,
extract_csrf_from_page_source,
get_cache_path,
save_tokens_to_cache,
validate_cookies,
)
CDP_DEFAULT_PORT = 9222
NOTEBOOKLM_URL = "https://notebooklm.google.com/"
def get_chrome_user_data_dir() -> str | None:
"""Get the default Chrome user data directory."""
import platform
from pathlib import Path
system = platform.system()
home = Path.home()
if system == "Darwin":
return str(home / "Library/Application Support/Google/Chrome")
elif system == "Linux":
return str(home / ".config/google-chrome")
elif system == "Windows":
return str(home / "AppData/Local/Google/Chrome/User Data")
return None
def launch_chrome(port: int, headless: bool = False) -> bool:
"""Launch Chrome with remote debugging enabled.
Args:
port: The debugging port to use
headless: If True, launch in headless mode (no visible window)
Returns:
True if Chrome was launched, False if failed
"""
import platform
import subprocess
system = platform.system()
if system == "Darwin":
chrome_path = "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
elif system == "Linux":
chrome_path = "google-chrome"
elif system == "Windows":
chrome_path = r"C:\Program Files\Google\Chrome\Application\chrome.exe"
else:
print(f"Unsupported platform: {system}")
return False
# Chrome 136+ requires a non-default user-data-dir for remote debugging
# We use a persistent directory so Google login is remembered across runs
profile_dir = Path.home() / ".notebooklm-mcp" / "chrome-profile"
profile_dir.mkdir(parents=True, exist_ok=True)
args = [
chrome_path,
f"--remote-debugging-port={port}",
"--no-first-run",
"--no-default-browser-check",
"--disable-extensions", # Bypass extensions that may interfere (e.g., Antigravity IDE)
f"--user-data-dir={profile_dir}", # Persistent profile for login persistence
"--remote-allow-origins=*", # Allow WebSocket connections from any origin
]
if headless:
args.append("--headless=new")
try:
# Print the command for debugging
print(f"Running: {' '.join(args[:3])}...")
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(3) # Wait for Chrome to start
# Check if there was an immediate error
if process.poll() is not None:
_, stderr = process.communicate()
if stderr:
print(f"Chrome error: {stderr.decode()[:500]}")
return True
except Exception as e:
print(f"Failed to launch Chrome: {e}")
return False
def get_chrome_debugger_url(port: int = CDP_DEFAULT_PORT) -> str | None:
"""Get the WebSocket debugger URL for Chrome."""
try:
response = httpx.get(f"http://localhost:{port}/json/version", timeout=5)
data = response.json()
return data.get("webSocketDebuggerUrl")
except Exception:
return None
def get_chrome_pages(port: int = CDP_DEFAULT_PORT) -> list[dict]:
"""Get list of open pages in Chrome."""
try:
response = httpx.get(f"http://localhost:{port}/json", timeout=5)
return response.json()
except Exception:
return []
def find_or_create_notebooklm_page(port: int = CDP_DEFAULT_PORT) -> dict | None:
"""Find an existing NotebookLM page or create a new one."""
from urllib.parse import quote
pages = get_chrome_pages(port)
# Look for existing NotebookLM page
for page in pages:
url = page.get("url", "")
if "notebooklm.google.com" in url:
return page
# Create a new page - URL must be properly encoded
try:
encoded_url = quote(NOTEBOOKLM_URL, safe="")
response = httpx.put(
f"http://localhost:{port}/json/new?{encoded_url}",
timeout=15
)
if response.status_code == 200 and response.text.strip():
return response.json()
# Fallback: create blank page then navigate
response = httpx.put(f"http://localhost:{port}/json/new", timeout=10)
if response.status_code == 200 and response.text.strip():
page = response.json()
# Navigate to NotebookLM using the page's websocket
ws_url = page.get("webSocketDebuggerUrl")
if ws_url:
navigate_to_url(ws_url, NOTEBOOKLM_URL)
return page
print(f"Failed to create page: status={response.status_code}")
return None
except Exception as e:
print(f"Failed to create new page: {e}")
return None
def execute_cdp_command(ws_url: str, method: str, params: dict = None) -> dict:
"""Execute a CDP command via WebSocket."""
import websocket
ws = websocket.create_connection(ws_url, timeout=30)
try:
command = {
"id": 1,
"method": method,
"params": params or {}
}
ws.send(json.dumps(command))
# Wait for response
while True:
response = json.loads(ws.recv())
if response.get("id") == 1:
return response.get("result", {})
finally:
ws.close()
def get_page_cookies(ws_url: str) -> list[dict]:
"""Get all cookies for the page."""
result = execute_cdp_command(ws_url, "Network.getCookies")
return result.get("cookies", [])
def get_page_html(ws_url: str) -> str:
"""Get the page HTML to extract CSRF token."""
# Enable Runtime domain
execute_cdp_command(ws_url, "Runtime.enable")
# Execute JavaScript to get page HTML
result = execute_cdp_command(
ws_url,
"Runtime.evaluate",
{"expression": "document.documentElement.outerHTML"}
)
return result.get("result", {}).get("value", "")
def navigate_to_url(ws_url: str, url: str) -> None:
"""Navigate the page to a URL."""
execute_cdp_command(ws_url, "Page.enable")
execute_cdp_command(ws_url, "Page.navigate", {"url": url})
# Wait for page to load
time.sleep(3)
def get_current_url(ws_url: str) -> str:
"""Get the current page URL via CDP (cheap operation, no JS evaluation)."""
execute_cdp_command(ws_url, "Runtime.enable")
result = execute_cdp_command(
ws_url,
"Runtime.evaluate",
{"expression": "window.location.href"}
)
return result.get("result", {}).get("value", "")
def check_if_logged_in_by_url(url: str) -> bool:
"""Check login status by URL - much cheaper than parsing HTML.
If NotebookLM redirects to accounts.google.com, user is not logged in.
If URL stays on notebooklm.google.com, user is authenticated.
"""
if "accounts.google.com" in url:
return False
if "notebooklm.google.com" in url:
return True
# Unknown URL - assume not logged in
return False
def extract_session_id_from_html(html: str) -> str:
"""Extract session ID from page HTML."""
patterns = [
r'"FdrFJe":"(\d+)"',
r'f\.sid["\s:=]+["\']?(\d+)',
r'"cfb2h":"([^"]+)"',
]
for pattern in patterns:
match = re.search(pattern, html)
if match:
return match.group(1)
return ""
def is_chrome_profile_locked(profile_dir: str | None = None) -> bool:
"""Check if a Chrome profile is locked (Chrome is using it).
Args:
profile_dir: The profile directory to check. If None, checks our
notebooklm-mcp profile, NOT the default Chrome profile.
This is more reliable than process detection because:
- Works across all platforms
- Detects if Chrome is using the specific profile we need
- The lock file only exists while Chrome has the profile open
"""
if profile_dir is None:
# Check OUR profile, not the default Chrome profile
# We use a separate profile so we can run alongside the user's main Chrome
profile_dir = str(Path.home() / ".notebooklm-mcp" / "chrome-profile")
# Chrome creates a "SingletonLock" file when the profile is in use
lock_file = Path(profile_dir) / "SingletonLock"
return lock_file.exists()
def is_our_chrome_profile_in_use() -> bool:
"""Check if OUR Chrome profile is already in use.
We use a separate profile at ~/.notebooklm-mcp/chrome-profile
so we can run alongside the user's main Chrome browser.
This only checks if our specific profile is locked, NOT if Chrome
is running in general. Users can have their main Chrome open.
"""
return is_chrome_profile_locked() # Already checks our profile by default
def run_auth_flow(port: int = CDP_DEFAULT_PORT, auto_launch: bool = True) -> AuthTokens | None:
"""Run the authentication flow.
Args:
port: Chrome DevTools port
auto_launch: If True, automatically launch Chrome if not running
"""
print("NotebookLM MCP Authentication")
print("=" * 40)
print()
# Check if Chrome is running with debugging
debugger_url = get_chrome_debugger_url(port)
if not debugger_url and auto_launch:
# Check if our specific profile is already in use
if is_our_chrome_profile_in_use():
print("The NotebookLM auth profile is already in use.")
print()
print("This means a previous auth Chrome window is still open.")
print("Close that window and try again, or use file mode:")
print()
print(" notebooklm-mcp-auth --file")
print()
return None
# We can launch our separate Chrome profile even if user's main Chrome is open
print("Launching Chrome with NotebookLM auth profile...")
print("(First time: you'll need to log in to your Google account)")
print()
# Launch with visible window so user can log in
launch_chrome(port, headless=False)
time.sleep(3)
debugger_url = get_chrome_debugger_url(port)
if not debugger_url:
print(f"ERROR: Cannot connect to Chrome on port {port}")
print()
print("This can happen if:")
print(" - Chrome failed to start")
print(" - Another process is using port 9222")
print(" - Firewall is blocking the port")
print()
print("TRY: Use file mode instead (most reliable):")
print(" notebooklm-mcp-auth --file")
print()
return None
print(f"Connected to Chrome debugger")
# Find or create NotebookLM page
page = find_or_create_notebooklm_page(port)
if not page:
print("ERROR: Failed to find or create NotebookLM page")
return None
ws_url = page.get("webSocketDebuggerUrl")
if not ws_url:
print("ERROR: No WebSocket URL for page")
return None
print(f"Using page: {page.get('title', 'Unknown')}")
# Navigate to NotebookLM if needed
current_url = page.get("url", "")
if "notebooklm.google.com" not in current_url:
print("Navigating to NotebookLM...")
navigate_to_url(ws_url, NOTEBOOKLM_URL)
# Check login status by URL (cheap - no HTML parsing)
print("Checking login status...")
current_url = get_current_url(ws_url)
if not check_if_logged_in_by_url(current_url):
print()
print("=" * 40)
print("NOT LOGGED IN")
print("=" * 40)
print()
print("Please log in to NotebookLM in the Chrome window.")
print("This tool will wait for you to complete login...")
print()
print("(Press Ctrl+C to cancel)")
print()
# Wait for login - check URL every 5 seconds (cheap operation)
max_wait = 300 # 5 minutes
start_time = time.time()
while time.time() - start_time < max_wait:
time.sleep(5)
try:
current_url = get_current_url(ws_url)
if check_if_logged_in_by_url(current_url):
print("Login detected!")
break
except Exception as e:
print(f"Waiting... ({e})")
if not check_if_logged_in_by_url(current_url):
print("ERROR: Login timeout. Please try again.")
return None
# Extract cookies
print("Extracting cookies...")
cookies_list = get_page_cookies(ws_url)
cookies = {c["name"]: c["value"] for c in cookies_list}
if not validate_cookies(cookies):
print("ERROR: Missing required cookies. Please ensure you're fully logged in.")
print(f"Required: {REQUIRED_COOKIES}")
print(f"Found: {list(cookies.keys())}")
return None
# Get page HTML for CSRF extraction
html = get_page_html(ws_url)
# Extract CSRF token
print("Extracting CSRF token...")
csrf_token = extract_csrf_from_page_source(html)
if not csrf_token:
print("WARNING: Could not extract CSRF token from page.")
print("You may need to extract it manually from Network tab.")
csrf_token = ""
# Extract session ID
session_id = extract_session_id_from_html(html)
# Create tokens object
tokens = AuthTokens(
cookies=cookies,
csrf_token=csrf_token,
session_id=session_id,
extracted_at=time.time(),
)
# Save to cache
save_tokens_to_cache(tokens)
print()
print("=" * 40)
print("SUCCESS!")
print("=" * 40)
print()
print(f"Cookies: {len(cookies)} extracted")
print(f"CSRF Token: {'Yes' if csrf_token else 'No (will be auto-extracted)'}")
print(f"Session ID: {session_id or 'Will be auto-extracted'}")
print()
print(f"Tokens cached to: {get_cache_path()}")
print()
print("NEXT STEPS:")
print()
print(" 1. Add the MCP to your AI tool (if not already done):")
print()
print(" Claude Code:")
print(" claude mcp add notebooklm-mcp -- notebooklm-mcp")
print()
print(" Gemini CLI:")
print(" gemini mcp add notebooklm notebooklm-mcp")
print()
print(" Or add to settings.json manually:")
print(' "notebooklm-mcp": { "command": "notebooklm-mcp" }')
print()
print(" 2. Restart your AI assistant")
print()
print(" 3. Test by asking: 'List my NotebookLM notebooks'")
print()
return tokens
def run_file_cookie_entry(cookie_file: str | None = None) -> AuthTokens | None:
"""Read cookies from a file and save them.
This is the recommended way to authenticate - users save their cookies
to a text file to avoid terminal truncation issues.
Args:
cookie_file: Optional path to file. If not provided, shows instructions
and prompts for the path.
"""
print("NotebookLM MCP - Cookie File Import")
print("=" * 50)
print()
# If no file provided, show instructions and prompt for path
if not cookie_file:
print("Follow these steps to extract and save your cookies:")
print()
print(" 1. Open Chrome and go to: https://notebooklm.google.com")
print(" 2. Make sure you're logged in")
print(" 3. Press F12 (or Cmd+Option+I on Mac) to open DevTools")
print(" 4. Click the 'Network' tab")
print(" 5. In the filter box, type: batchexecute")
print(" 6. Click on any notebook to trigger a request")
print(" 7. Click on a 'batchexecute' request in the list")
print(" 8. In the right panel, find 'Request Headers'")
print(" 9. Find the line starting with 'cookie:'")
print(" 10. Right-click the cookie VALUE and select 'Copy value'")
print(" 11. Edit the 'cookies.txt' file in this repo (or create a new file)")
print(" 12. Paste the cookie string and save")
print()
print("TIP: If running from the repo directory, just edit 'cookies.txt'")
print(" and enter: cookies.txt")
print()
print("-" * 50)
print()
try:
cookie_file = input("Enter the path to your cookie file: ").strip()
except (EOFError, KeyboardInterrupt):
print("\nCancelled.")
return None
if not cookie_file:
print("ERROR: No file path provided.")
return None
# Expand ~ to home directory
cookie_file = str(Path(cookie_file).expanduser())
print()
print(f"Reading cookies from: {cookie_file}")
print()
try:
with open(cookie_file, "r") as f:
cookie_string = f.read().strip()
except FileNotFoundError:
print(f"ERROR: File not found: {cookie_file}")
return None
except Exception as e:
print(f"ERROR: Could not read file: {e}")
return None
# Strip comment lines (lines starting with #)
lines = cookie_string.split("\n")
cookie_lines = [line.strip() for line in lines if line.strip() and not line.strip().startswith("#")]
cookie_string = " ".join(cookie_lines)
if not cookie_string:
print("\nERROR: No cookie string found in file.")
print("Make sure you pasted your cookies and removed the instructions.")
return None
print()
print("Validating cookies...")
# Parse cookies from header format (key=value; key=value; ...)
cookies = {}
for cookie in cookie_string.split(";"):
cookie = cookie.strip()
if "=" in cookie:
key, value = cookie.split("=", 1)
cookies[key.strip()] = value.strip()
if not cookies:
print("\nERROR: Could not parse any cookies from input.")
print("Make sure you copied the cookie VALUE, not the header name.")
print()
print("Expected format: SID=xxx; HSID=xxx; SSID=xxx; ...")
return None
# Validate required cookies
if not validate_cookies(cookies):
print("\nWARNING: Some required cookies are missing!")
print(f"Required: {REQUIRED_COOKIES}")
print(f"Found: {list(cookies.keys())}")
print()
print("Continuing anyway...")
# Create tokens object (CSRF and session ID will be auto-extracted later)
tokens = AuthTokens(
cookies=cookies,
csrf_token="", # Will be auto-extracted
session_id="", # Will be auto-extracted
extracted_at=time.time(),
)
# Save to cache
print()
print("Saving cookies...")
save_tokens_to_cache(tokens)
print()
print("=" * 50)
print("SUCCESS!")
print("=" * 50)
print()
print(f"Cookies saved: {len(cookies)} cookies")
print(f"Cache location: {get_cache_path()}")
print()
print("NEXT STEPS:")
print()
print(" 1. Add the MCP to your AI tool (if not already done):")
print()
print(" Claude Code:")
print(" claude mcp add notebooklm-mcp -- notebooklm-mcp")
print()
print(" Gemini CLI:")
print(" gemini mcp add notebooklm notebooklm-mcp")
print()
print(" Or add to settings.json manually:")
print(' "notebooklm-mcp": { "command": "notebooklm-mcp" }')
print()
print(" 2. Restart your AI assistant")
print()
print(" 3. Test by asking: 'List my NotebookLM notebooks'")
print()
return tokens
def main():
"""Main entry point."""
import argparse
parser = argparse.ArgumentParser(
description="Authenticate with NotebookLM MCP",
epilog="""
This tool extracts authentication tokens from Chrome for use with the NotebookLM MCP.
TWO MODES:
1. FILE MODE (--file): Import cookies from a file (RECOMMENDED)
- Shows step-by-step instructions for extracting cookies
- Prompts you for the file path after you save the cookies
- No Chrome remote debugging required
2. AUTO MODE (default): Automatic extraction via Chrome DevTools
- Requires closing Chrome first
- Launches Chrome and extracts cookies automatically
- May not work on all systems
EXAMPLES:
notebooklm-mcp-auth --file # Guided file import (recommended)
notebooklm-mcp-auth --file ~/cookies.txt # Direct file import
notebooklm-mcp-auth # Auto mode (close Chrome first)
After authentication, start the MCP server with: notebooklm-mcp
"""
)
parser.add_argument(
"--file",
nargs="?",
const="", # When --file is used without argument, set to empty string
metavar="PATH",
help="Import cookies from file (recommended). Shows instructions if no path given."
)
parser.add_argument(
"--port",
type=int,
default=CDP_DEFAULT_PORT,
help=f"Chrome DevTools port (default: {CDP_DEFAULT_PORT})"
)
parser.add_argument(
"--show-tokens",
action="store_true",
help="Show cached tokens (for debugging)"
)
parser.add_argument(
"--no-auto-launch",
action="store_true",
help="Don't automatically launch Chrome (requires Chrome to be running with debugging)"
)
args = parser.parse_args()
if args.show_tokens:
cache_path = get_cache_path()
if cache_path.exists():
with open(cache_path) as f:
data = json.load(f)
print(json.dumps(data, indent=2))
else:
print("No cached tokens found.")
return 0
try:
if args.file is not None: # --file was used (with or without path)
# File-based cookie import
tokens = run_file_cookie_entry(cookie_file=args.file if args.file else None)
else:
# Automatic extraction via Chrome DevTools
tokens = run_auth_flow(args.port, auto_launch=not args.no_auto_launch)
return 0 if tokens else 1
except KeyboardInterrupt:
print("\nCancelled.")
return 1
except Exception as e:
print(f"ERROR: {e}")
import traceback
traceback.print_exc()
return 1
if __name__ == "__main__":
sys.exit(main())
| python | MIT | 1ca3bba360852de0534ca33e0ccf7258a0efd306 | 2026-01-05T07:12:11.692365Z | false |
jacob-bd/notebooklm-mcp | https://github.com/jacob-bd/notebooklm-mcp/blob/1ca3bba360852de0534ca33e0ccf7258a0efd306/src/notebooklm_mcp/__init__.py | src/notebooklm_mcp/__init__.py | """NotebookLM MCP Server.
This MCP provides access to NotebookLM (notebooklm.google.com)
using undocumented internal APIs. Tested with personal/free tier accounts.
May work with Google Workspace accounts but has not been tested.
WARNING: This uses undocumented internal APIs that may change at any time.
"""
__version__ = "0.1.0"
| python | MIT | 1ca3bba360852de0534ca33e0ccf7258a0efd306 | 2026-01-05T07:12:11.692365Z | false |
jacob-bd/notebooklm-mcp | https://github.com/jacob-bd/notebooklm-mcp/blob/1ca3bba360852de0534ca33e0ccf7258a0efd306/src/notebooklm_mcp/server.py | src/notebooklm_mcp/server.py | """NotebookLM MCP Server."""
from typing import Any
from fastmcp import FastMCP
from .api_client import NotebookLMClient, extract_cookies_from_chrome_export, parse_timestamp
# Initialize MCP server
mcp = FastMCP(
name="notebooklm",
instructions="""NotebookLM MCP - Access NotebookLM (notebooklm.google.com).
**Auth:** Use save_auth_tokens with cookies from Chrome DevTools. CSRF/session auto-extracted.
**Confirmation:** Tools with confirm param require user approval before setting confirm=True.
**Studio:** After creating audio/video/infographic/slides, poll studio_status for completion.""",
)
# Global state
_client: NotebookLMClient | None = None
def get_client() -> NotebookLMClient:
"""Get or create the API client.
Tries environment variables first, falls back to cached tokens from auth CLI.
"""
global _client
if _client is None:
import os
from .auth import load_cached_tokens
cookie_header = os.environ.get("NOTEBOOKLM_COOKIES", "")
csrf_token = os.environ.get("NOTEBOOKLM_CSRF_TOKEN", "")
session_id = os.environ.get("NOTEBOOKLM_SESSION_ID", "")
if cookie_header:
# Use environment variables
cookies = extract_cookies_from_chrome_export(cookie_header)
else:
# Try cached tokens from auth CLI
cached = load_cached_tokens()
if cached:
cookies = cached.cookies
csrf_token = csrf_token or cached.csrf_token
session_id = session_id or cached.session_id
else:
raise ValueError(
"No authentication found. Either:\n"
"1. Run 'notebooklm-mcp-auth' to authenticate via Chrome, or\n"
"2. Set NOTEBOOKLM_COOKIES environment variable manually"
)
_client = NotebookLMClient(
cookies=cookies,
csrf_token=csrf_token,
session_id=session_id,
)
return _client
@mcp.tool()
def notebook_list(max_results: int = 100) -> dict[str, Any]:
"""List all notebooks.
Args:
max_results: Maximum number of notebooks to return (default: 100)
"""
try:
client = get_client()
notebooks = client.list_notebooks()
# Count owned vs shared notebooks
owned_count = sum(1 for nb in notebooks if nb.is_owned)
shared_count = len(notebooks) - owned_count
# Count notebooks shared by me (owned + is_shared=True)
shared_by_me_count = sum(1 for nb in notebooks if nb.is_owned and nb.is_shared)
return {
"status": "success",
"count": len(notebooks),
"owned_count": owned_count,
"shared_count": shared_count,
"shared_by_me_count": shared_by_me_count,
"notebooks": [
{
"id": nb.id,
"title": nb.title,
"source_count": nb.source_count,
"url": nb.url,
"ownership": nb.ownership,
"is_shared": nb.is_shared,
"created_at": nb.created_at,
"modified_at": nb.modified_at,
}
for nb in notebooks[:max_results]
],
}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_create(title: str = "") -> dict[str, Any]:
"""Create a new notebook.
Args:
title: Optional title for the notebook
"""
try:
client = get_client()
notebook = client.create_notebook(title=title)
if notebook:
return {
"status": "success",
"notebook": {
"id": notebook.id,
"title": notebook.title,
"url": notebook.url,
},
}
return {"status": "error", "error": "Failed to create notebook"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_get(notebook_id: str) -> dict[str, Any]:
"""Get notebook details with sources.
Args:
notebook_id: Notebook UUID
"""
try:
client = get_client()
result = client.get_notebook(notebook_id)
# Extract timestamps from metadata if available
# Result structure: [title, sources, id, emoji, null, metadata, ...]
# metadata[5] = modified_at, metadata[8] = created_at
created_at = None
modified_at = None
if result and isinstance(result, list) and len(result) > 5:
metadata = result[5]
if isinstance(metadata, list):
if len(metadata) > 5:
modified_at = parse_timestamp(metadata[5])
if len(metadata) > 8:
created_at = parse_timestamp(metadata[8])
return {
"status": "success",
"notebook": result,
"created_at": created_at,
"modified_at": modified_at,
}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_describe(notebook_id: str) -> dict[str, Any]:
"""Get AI-generated notebook summary with suggested topics.
Args:
notebook_id: Notebook UUID
Returns: summary (markdown), suggested_topics list
"""
try:
client = get_client()
result = client.get_notebook_summary(notebook_id)
return {
"status": "success",
**result, # Includes summary and suggested_topics
}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def source_describe(source_id: str) -> dict[str, Any]:
"""Get AI-generated source summary with keyword chips.
Args:
source_id: Source UUID
Returns: summary (markdown with **bold** keywords), keywords list
"""
try:
client = get_client()
result = client.get_source_guide(source_id)
return {
"status": "success",
**result, # Includes summary and keywords
}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_add_url(notebook_id: str, url: str) -> dict[str, Any]:
"""Add URL (website or YouTube) as source.
Args:
notebook_id: Notebook UUID
url: URL to add
"""
try:
client = get_client()
result = client.add_url_source(notebook_id, url=url)
if result:
return {
"status": "success",
"source": result,
}
return {"status": "error", "error": "Failed to add URL source"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_add_text(
notebook_id: str,
text: str,
title: str = "Pasted Text",
) -> dict[str, Any]:
"""Add pasted text as source.
Args:
notebook_id: Notebook UUID
text: Text content to add
title: Optional title
"""
try:
client = get_client()
result = client.add_text_source(notebook_id, text=text, title=title)
if result:
return {
"status": "success",
"source": result,
}
return {"status": "error", "error": "Failed to add text source"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_add_drive(
notebook_id: str,
document_id: str,
title: str,
doc_type: str = "doc",
) -> dict[str, Any]:
"""Add Google Drive document as source.
Args:
notebook_id: Notebook UUID
document_id: Drive document ID (from URL)
title: Display title
doc_type: doc|slides|sheets|pdf
"""
try:
mime_types = {
"doc": "application/vnd.google-apps.document",
"docs": "application/vnd.google-apps.document",
"slides": "application/vnd.google-apps.presentation",
"sheets": "application/vnd.google-apps.spreadsheet",
"pdf": "application/pdf",
}
mime_type = mime_types.get(doc_type.lower())
if not mime_type:
return {
"status": "error",
"error": f"Unknown doc_type '{doc_type}'. Use 'doc', 'slides', 'sheets', or 'pdf'.",
}
client = get_client()
result = client.add_drive_source(
notebook_id,
document_id=document_id,
title=title,
mime_type=mime_type,
)
if result:
return {
"status": "success",
"source": result,
}
return {"status": "error", "error": "Failed to add Drive source"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_query(
notebook_id: str,
query: str,
source_ids: list[str] | None = None,
conversation_id: str | None = None,
) -> dict[str, Any]:
"""Ask AI about EXISTING sources already in notebook. NOT for finding new sources.
Use research_start instead for: deep research, web search, find new sources, Drive search.
Args:
notebook_id: Notebook UUID
query: Question to ask
source_ids: Source IDs to query (default: all)
conversation_id: For follow-up questions
"""
try:
client = get_client()
result = client.query(
notebook_id,
query_text=query,
source_ids=source_ids,
conversation_id=conversation_id,
)
if result:
return {
"status": "success",
"answer": result.get("answer", ""),
"conversation_id": result.get("conversation_id"),
}
return {"status": "error", "error": "Failed to query notebook"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_delete(
notebook_id: str,
confirm: bool = False,
) -> dict[str, Any]:
"""Delete notebook permanently. IRREVERSIBLE. Requires confirm=True.
Args:
notebook_id: Notebook UUID
confirm: Must be True after user approval
"""
if not confirm:
return {
"status": "error",
"error": "Deletion not confirmed. You must ask the user to confirm "
"before deleting. Set confirm=True only after user approval.",
"warning": "This action is IRREVERSIBLE. The notebook and all its "
"sources will be permanently deleted.",
}
try:
client = get_client()
result = client.delete_notebook(notebook_id)
if result:
return {
"status": "success",
"message": f"Notebook {notebook_id} has been permanently deleted.",
}
return {"status": "error", "error": "Failed to delete notebook"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def notebook_rename(
notebook_id: str,
new_title: str,
) -> dict[str, Any]:
"""Rename a notebook.
Args:
notebook_id: Notebook UUID
new_title: New title
"""
try:
client = get_client()
result = client.rename_notebook(notebook_id, new_title)
if result:
return {
"status": "success",
"notebook": {
"id": notebook_id,
"title": new_title,
},
}
return {"status": "error", "error": "Failed to rename notebook"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def chat_configure(
notebook_id: str,
goal: str = "default",
custom_prompt: str | None = None,
response_length: str = "default",
) -> dict[str, Any]:
"""Configure notebook chat settings.
Args:
notebook_id: Notebook UUID
goal: default|learning_guide|custom
custom_prompt: Required when goal=custom (max 10000 chars)
response_length: default|longer|shorter
"""
try:
client = get_client()
result = client.configure_chat(
notebook_id=notebook_id,
goal=goal,
custom_prompt=custom_prompt,
response_length=response_length,
)
return result
except ValueError as e:
return {"status": "error", "error": str(e)}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def source_list_drive(notebook_id: str) -> dict[str, Any]:
"""List sources with types and Drive freshness status.
Use before source_sync_drive to identify stale sources.
Args:
notebook_id: Notebook UUID
"""
try:
client = get_client()
sources = client.get_notebook_sources_with_types(notebook_id)
# Separate sources by syncability
syncable_sources = []
other_sources = []
for src in sources:
if src.get("can_sync"):
# Check freshness for syncable sources (Drive docs and Gemini Notes)
is_fresh = client.check_source_freshness(src["id"])
src["is_fresh"] = is_fresh
src["needs_sync"] = is_fresh is False
syncable_sources.append(src)
else:
other_sources.append(src)
# Count stale sources
stale_count = sum(1 for s in syncable_sources if s.get("needs_sync"))
return {
"status": "success",
"notebook_id": notebook_id,
"summary": {
"total_sources": len(sources),
"syncable_sources": len(syncable_sources),
"stale_sources": stale_count,
"other_sources": len(other_sources),
},
"syncable_sources": syncable_sources,
"other_sources": [
{"id": s["id"], "title": s["title"], "type": s["source_type_name"]}
for s in other_sources
],
}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def source_sync_drive(
source_ids: list[str],
confirm: bool = False,
) -> dict[str, Any]:
"""Sync Drive sources with latest content. Requires confirm=True.
Call source_list_drive first to identify stale sources.
Args:
source_ids: Source UUIDs to sync
confirm: Must be True after user approval
"""
if not confirm:
return {
"status": "error",
"error": "Sync not confirmed. You must ask the user to confirm "
"before syncing. Set confirm=True only after user approval.",
"hint": "First call source_list_drive to show stale sources, "
"then ask user to confirm before syncing.",
}
if not source_ids:
return {
"status": "error",
"error": "No source_ids provided. Use source_list_drive to get source IDs.",
}
try:
client = get_client()
results = []
synced_count = 0
failed_count = 0
for source_id in source_ids:
try:
result = client.sync_drive_source(source_id)
if result:
results.append({
"source_id": source_id,
"status": "synced",
"title": result.get("title"),
})
synced_count += 1
else:
results.append({
"source_id": source_id,
"status": "failed",
"error": "Sync returned no result",
})
failed_count += 1
except Exception as e:
results.append({
"source_id": source_id,
"status": "failed",
"error": str(e),
})
failed_count += 1
return {
"status": "success" if failed_count == 0 else "partial",
"summary": {
"total": len(source_ids),
"synced": synced_count,
"failed": failed_count,
},
"results": results,
}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def source_delete(
source_id: str,
confirm: bool = False,
) -> dict[str, Any]:
"""Delete source permanently. IRREVERSIBLE. Requires confirm=True.
Args:
source_id: Source UUID to delete
confirm: Must be True after user approval
"""
if not confirm:
return {
"status": "error",
"error": "Deletion not confirmed. You must ask the user to confirm "
"before deleting. Set confirm=True only after user approval.",
"warning": "This action is IRREVERSIBLE. The source will be "
"permanently deleted from the notebook.",
}
try:
client = get_client()
result = client.delete_source(source_id)
if result:
return {
"status": "success",
"message": f"Source {source_id} has been permanently deleted.",
}
return {"status": "error", "error": "Failed to delete source"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def research_start(
query: str,
source: str = "web",
mode: str = "fast",
notebook_id: str | None = None,
title: str | None = None,
) -> dict[str, Any]:
"""Deep research / fast research: Search web or Google Drive to FIND NEW sources.
Use this for: "deep research on X", "find sources about Y", "search web for Z", "search Drive".
Workflow: research_start -> poll research_status -> research_import.
Args:
query: What to search for (e.g. "quantum computing advances")
source: web|drive (where to search)
mode: fast (~30s, ~10 sources) | deep (~5min, ~40 sources, web only)
notebook_id: Existing notebook (creates new if not provided)
title: Title for new notebook
"""
try:
client = get_client()
# Validate mode + source combination early
if mode.lower() == "deep" and source.lower() == "drive":
return {
"status": "error",
"error": "Deep Research only supports Web sources. Use mode='fast' for Drive.",
}
# Create notebook if needed
if not notebook_id:
notebook_title = title or f"Research: {query[:50]}"
notebook = client.create_notebook(title=notebook_title)
if not notebook:
return {"status": "error", "error": "Failed to create notebook"}
notebook_id = notebook.id
created_notebook = True
else:
created_notebook = False
# Start research
result = client.start_research(
notebook_id=notebook_id,
query=query,
source=source,
mode=mode,
)
if result:
response = {
"status": "success",
"task_id": result["task_id"],
"notebook_id": notebook_id,
"notebook_url": f"https://notebooklm.google.com/notebook/{notebook_id}",
"query": query,
"source": result["source"],
"mode": result["mode"],
"created_notebook": created_notebook,
}
# Add helpful message based on mode
if result["mode"] == "deep":
response["message"] = (
"Deep Research started. This takes 3-5 minutes. "
"Call research_status to check progress."
)
else:
response["message"] = (
"Fast Research started. This takes about 30 seconds. "
"Call research_status to check progress."
)
return response
return {"status": "error", "error": "Failed to start research"}
except ValueError as e:
return {"status": "error", "error": str(e)}
except Exception as e:
return {"status": "error", "error": str(e)}
def _compact_research_result(result: dict) -> dict:
"""Compact research result to save tokens.
Truncates report to 500 chars and limits sources to first 10.
Users can query the notebook for full details.
"""
if not isinstance(result, dict):
return result
# Truncate report if present
if "report" in result and result["report"]:
report = result["report"]
if len(report) > 500:
result["report"] = report[:500] + f"\n\n... (truncated {len(report) - 500} characters. Query the notebook for full details)"
# Limit sources shown
if "sources" in result and isinstance(result["sources"], list):
total_sources = len(result["sources"])
if total_sources > 10:
result["sources"] = result["sources"][:10]
result["sources_truncated"] = f"Showing first 10 of {total_sources} sources. Set compact=False for all sources."
return result
@mcp.tool()
def research_status(
notebook_id: str,
poll_interval: int = 30,
max_wait: int = 300,
compact: bool = True,
) -> dict[str, Any]:
"""Poll research progress. Blocks until complete or timeout.
Args:
notebook_id: Notebook UUID
poll_interval: Seconds between polls (default: 30)
max_wait: Max seconds to wait (default: 300, 0=single poll)
compact: If True (default), truncate report and limit sources shown to save tokens.
Use compact=False to get full details.
"""
import time
try:
client = get_client()
start_time = time.time()
polls = 0
while True:
polls += 1
result = client.poll_research(notebook_id)
if not result:
return {"status": "error", "error": "Failed to poll research status"}
# If completed or no research found, return immediately
if result.get("status") in ("completed", "no_research"):
result["polls_made"] = polls
result["wait_time_seconds"] = round(time.time() - start_time, 1)
# Compact mode: truncate to save tokens
if compact and result.get("status") == "completed":
result = _compact_research_result(result)
return {
"status": "success",
"research": result,
}
# Check if we should stop waiting
elapsed = time.time() - start_time
if max_wait == 0 or elapsed >= max_wait:
result["polls_made"] = polls
result["wait_time_seconds"] = round(elapsed, 1)
result["message"] = (
f"Research still in progress after {round(elapsed, 1)}s. "
f"Call research_status again to continue waiting."
)
# Compact mode even for in-progress
if compact:
result = _compact_research_result(result)
return {
"status": "success",
"research": result,
}
# Wait before next poll
time.sleep(poll_interval)
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def research_import(
notebook_id: str,
task_id: str,
source_indices: list[int] | None = None,
) -> dict[str, Any]:
"""Import discovered sources into notebook.
Call after research_status shows status="completed".
Args:
notebook_id: Notebook UUID
task_id: Research task ID
source_indices: Source indices to import (default: all)
"""
try:
client = get_client()
# First, get the current research results to get source details
poll_result = client.poll_research(notebook_id)
if not poll_result or poll_result.get("status") == "no_research":
return {
"status": "error",
"error": "No research found for this notebook. Run research_start first.",
}
if poll_result.get("status") != "completed":
return {
"status": "error",
"error": f"Research is still in progress (status: {poll_result.get('status')}). "
"Wait for completion before importing.",
}
# Get sources from poll result
all_sources = poll_result.get("sources", [])
report_content = poll_result.get("report", "")
if not all_sources:
return {
"status": "error",
"error": "No sources found in research results.",
}
# Separate deep_report sources (type 5) from importable web/drive sources
# Deep reports will be imported as text sources, web sources imported normally
deep_report_source = None
web_sources = []
for src in all_sources:
if src.get("result_type") == 5:
deep_report_source = src
else:
web_sources.append(src)
# Filter sources by indices if specified
if source_indices is not None:
sources_to_import = []
invalid_indices = []
for idx in source_indices:
if 0 <= idx < len(all_sources):
sources_to_import.append(all_sources[idx])
else:
invalid_indices.append(idx)
if invalid_indices:
return {
"status": "error",
"error": f"Invalid source indices: {invalid_indices}. "
f"Valid range is 0-{len(all_sources)-1}.",
}
else:
sources_to_import = all_sources
# Import web/drive sources (skip deep_report sources as they don't have URLs)
web_sources_to_import = [s for s in sources_to_import if s.get("result_type") != 5]
imported = client.import_research_sources(
notebook_id=notebook_id,
task_id=task_id,
sources=web_sources_to_import,
)
# If deep research with report, import the report as a text source
if deep_report_source and report_content:
try:
report_result = client.add_text_source(
notebook_id=notebook_id,
title=deep_report_source.get("title", "Deep Research Report"),
text=report_content,
)
if report_result:
imported.append({
"id": report_result.get("id"),
"title": report_result.get("title", "Deep Research Report"),
})
except Exception as e:
# Don't fail the entire import if report import fails
pass
return {
"status": "success",
"imported_count": len(imported),
"total_available": len(all_sources),
"sources": imported,
"notebook_url": f"https://notebooklm.google.com/notebook/{notebook_id}",
}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def audio_overview_create(
notebook_id: str,
source_ids: list[str] | None = None,
format: str = "deep_dive",
length: str = "default",
language: str = "en",
focus_prompt: str = "",
confirm: bool = False,
) -> dict[str, Any]:
"""Generate audio overview. Requires confirm=True after user approval.
Args:
notebook_id: Notebook UUID
source_ids: Source IDs (default: all)
format: deep_dive|brief|critique|debate
length: short|default|long
language: BCP-47 code (en, es, fr, de, ja)
focus_prompt: Optional focus text
confirm: Must be True after user approval
"""
if not confirm:
return {
"status": "pending_confirmation",
"message": "Please confirm these settings before creating the audio overview:",
"settings": {
"notebook_id": notebook_id,
"format": format,
"length": length,
"language": language,
"focus_prompt": focus_prompt or "(none)",
"source_ids": source_ids or "all sources",
},
"note": "Set confirm=True after user approves these settings.",
}
try:
client = get_client()
# Map format string to code
format_codes = {
"deep_dive": 1,
"brief": 2,
"critique": 3,
"debate": 4,
}
format_code = format_codes.get(format.lower())
if format_code is None:
return {
"status": "error",
"error": f"Unknown format '{format}'. Use: deep_dive, brief, critique, or debate.",
}
# Map length string to code
length_codes = {
"short": 1,
"default": 2,
"long": 3,
}
length_code = length_codes.get(length.lower())
if length_code is None:
return {
"status": "error",
"error": f"Unknown length '{length}'. Use: short, default, or long.",
}
# Get source IDs if not provided
if source_ids is None:
sources = client.get_notebook_sources_with_types(notebook_id)
source_ids = [s["id"] for s in sources if s["id"]]
if not source_ids:
return {
"status": "error",
"error": "No sources found in notebook. Add sources before creating audio overview.",
}
result = client.create_audio_overview(
notebook_id=notebook_id,
source_ids=source_ids,
format_code=format_code,
length_code=length_code,
language=language,
focus_prompt=focus_prompt,
)
if result:
return {
"status": "success",
"artifact_id": result["artifact_id"],
"type": "audio",
"format": result["format"],
"length": result["length"],
"language": result["language"],
"generation_status": result["status"],
"message": "Audio generation started. Use studio_status to check progress.",
"notebook_url": f"https://notebooklm.google.com/notebook/{notebook_id}",
}
return {"status": "error", "error": "Failed to create audio overview"}
except Exception as e:
return {"status": "error", "error": str(e)}
@mcp.tool()
def video_overview_create(
notebook_id: str,
source_ids: list[str] | None = None,
format: str = "explainer",
visual_style: str = "auto_select",
language: str = "en",
focus_prompt: str = "",
confirm: bool = False,
) -> dict[str, Any]:
"""Generate video overview. Requires confirm=True after user approval.
Args:
notebook_id: Notebook UUID
source_ids: Source IDs (default: all)
format: explainer|brief
visual_style: auto_select|classic|whiteboard|kawaii|anime|watercolor|retro_print|heritage|paper_craft
language: BCP-47 code (en, es, fr, de, ja)
focus_prompt: Optional focus text
confirm: Must be True after user approval
"""
if not confirm:
return {
"status": "pending_confirmation",
"message": "Please confirm these settings before creating the video overview:",
"settings": {
"notebook_id": notebook_id,
"format": format,
"visual_style": visual_style,
"language": language,
"focus_prompt": focus_prompt or "(none)",
"source_ids": source_ids or "all sources",
},
"note": "Set confirm=True after user approves these settings.",
}
try:
client = get_client()
# Map format string to code
format_codes = {
"explainer": 1,
| python | MIT | 1ca3bba360852de0534ca33e0ccf7258a0efd306 | 2026-01-05T07:12:11.692365Z | true |
jacob-bd/notebooklm-mcp | https://github.com/jacob-bd/notebooklm-mcp/blob/1ca3bba360852de0534ca33e0ccf7258a0efd306/src/notebooklm_mcp/auth.py | src/notebooklm_mcp/auth.py | """Authentication helper for NotebookLM MCP.
Uses Chrome DevTools MCP to extract auth tokens from an authenticated browser session.
If the user is not logged in, prompts them to log in via the Chrome window.
"""
import json
import os
import time
from dataclasses import dataclass
from pathlib import Path
@dataclass
class AuthTokens:
"""Authentication tokens for NotebookLM.
Only cookies are required. CSRF token and session ID are optional because
they can be auto-extracted from the NotebookLM page when needed.
"""
cookies: dict[str, str]
csrf_token: str = "" # Optional - auto-extracted from page
session_id: str = "" # Optional - auto-extracted from page
extracted_at: float = 0.0
def to_dict(self) -> dict:
return {
"cookies": self.cookies,
"csrf_token": self.csrf_token,
"session_id": self.session_id,
"extracted_at": self.extracted_at,
}
@classmethod
def from_dict(cls, data: dict) -> "AuthTokens":
return cls(
cookies=data["cookies"],
csrf_token=data.get("csrf_token", ""), # May be empty
session_id=data.get("session_id", ""), # May be empty
extracted_at=data.get("extracted_at", 0),
)
def is_expired(self, max_age_hours: float = 168) -> bool:
"""Check if cookies are older than max_age_hours.
Default is 168 hours (1 week) since cookies are stable for weeks.
The CSRF token/session ID will be auto-refreshed regardless.
"""
age_seconds = time.time() - self.extracted_at
return age_seconds > (max_age_hours * 3600)
@property
def cookie_header(self) -> str:
"""Get cookies as a header string."""
return "; ".join(f"{k}={v}" for k, v in self.cookies.items())
def get_cache_path() -> Path:
"""Get the path to the auth cache file."""
cache_dir = Path.home() / ".notebooklm-mcp"
cache_dir.mkdir(exist_ok=True)
return cache_dir / "auth.json"
def load_cached_tokens() -> AuthTokens | None:
"""Load tokens from cache if they exist.
Note: We no longer reject tokens based on age. The functional check
(redirect to login during CSRF refresh) is the real validity test.
Cookies often last much longer than any arbitrary time limit.
"""
cache_path = get_cache_path()
if not cache_path.exists():
return None
try:
with open(cache_path) as f:
data = json.load(f)
tokens = AuthTokens.from_dict(data)
# Just warn if tokens are old, but still return them
# Let the API client's functional check determine validity
if tokens.is_expired():
print("Note: Cached tokens are older than 1 week. They may still work.")
return tokens
except (json.JSONDecodeError, KeyError, TypeError) as e:
print(f"Failed to load cached tokens: {e}")
return None
def save_tokens_to_cache(tokens: AuthTokens, silent: bool = False) -> None:
"""Save tokens to cache.
Args:
tokens: AuthTokens to save
silent: If True, don't print confirmation message (for auto-updates)
"""
cache_path = get_cache_path()
with open(cache_path, "w") as f:
json.dump(tokens.to_dict(), f, indent=2)
if not silent:
print(f"Auth tokens cached to {cache_path}")
def extract_tokens_via_chrome_devtools() -> AuthTokens | None:
"""
Extract auth tokens using Chrome DevTools.
This function assumes Chrome DevTools MCP is available and connected
to a Chrome browser. It will:
1. Navigate to notebooklm.google.com
2. Check if logged in
3. If not, wait for user to log in
4. Extract cookies and CSRF token
Returns:
AuthTokens if successful, None otherwise
"""
# This is a placeholder - the actual implementation would use
# Chrome DevTools MCP tools. Since we're inside an MCP server,
# we can't directly call another MCP's tools.
#
# Instead, we'll provide a CLI command that can be run separately
# to extract and cache the tokens.
raise NotImplementedError(
"Direct Chrome DevTools extraction not implemented. "
"Use the 'notebooklm-mcp-auth' CLI command instead."
)
def extract_csrf_from_page_source(html: str) -> str | None:
"""Extract CSRF token from page HTML.
The token is stored in WIZ_global_data.SNlM0e or similar structures.
"""
import re
# Try different patterns for CSRF token
patterns = [
r'"SNlM0e":"([^"]+)"', # WIZ_global_data.SNlM0e
r'at=([^&"]+)', # Direct at= value
r'"FdrFJe":"([^"]+)"', # Alternative location
]
for pattern in patterns:
match = re.search(pattern, html)
if match:
return match.group(1)
return None
def extract_session_id_from_page(html: str) -> str | None:
"""Extract session ID from page HTML."""
import re
patterns = [
r'"FdrFJe":"([^"]+)"',
r'f\.sid=(\d+)',
]
for pattern in patterns:
match = re.search(pattern, html)
if match:
return match.group(1)
return None
# ============================================================================
# CLI Authentication Flow
# ============================================================================
#
# This is designed to be run as a separate command before starting the MCP.
# It uses Chrome DevTools MCP interactively to extract auth tokens.
#
# Usage:
# 1. Make sure Chrome is open with DevTools MCP connected
# 2. Run: notebooklm-mcp-auth
# 3. If not logged in, log in via the Chrome window
# 4. Tokens are cached to ~/.notebooklm-mcp/auth.json
# 5. Start the MCP server - it will use cached tokens
#
# The auth flow script is separate because:
# - MCP servers can't easily call other MCP tools
# - Interactive login needs user attention
# - Caching allows the MCP to start without browser interaction
def parse_cookies_from_chrome_format(cookies_list: list[dict]) -> dict[str, str]:
"""Parse cookies from Chrome DevTools format to simple dict."""
result = {}
for cookie in cookies_list:
name = cookie.get("name", "")
value = cookie.get("value", "")
if name:
result[name] = value
return result
# Tokens that need to be present for auth to work
REQUIRED_COOKIES = ["SID", "HSID", "SSID", "APISID", "SAPISID"]
def validate_cookies(cookies: dict[str, str]) -> bool:
"""Check if required cookies are present."""
for required in REQUIRED_COOKIES:
if required not in cookies:
return False
return True
| python | MIT | 1ca3bba360852de0534ca33e0ccf7258a0efd306 | 2026-01-05T07:12:11.692365Z | false |
eriklindernoren/PyTorch-Deep-Dream | https://github.com/eriklindernoren/PyTorch-Deep-Dream/blob/637de95ffca461d49ae49538d0d44f0e89ffdf0f/deep_dream.py | deep_dream.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import argparse
import os
import tqdm
import scipy.ndimage as nd
from utils import deprocess, preprocess, clip
def dream(image, model, iterations, lr):
""" Updates the image to maximize outputs for n iterations """
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available else torch.FloatTensor
image = Variable(Tensor(image), requires_grad=True)
for i in range(iterations):
model.zero_grad()
out = model(image)
loss = out.norm()
loss.backward()
avg_grad = np.abs(image.grad.data.cpu().numpy()).mean()
norm_lr = lr / avg_grad
image.data += norm_lr * image.grad.data
image.data = clip(image.data)
image.grad.data.zero_()
return image.cpu().data.numpy()
def deep_dream(image, model, iterations, lr, octave_scale, num_octaves):
""" Main deep dream method """
image = preprocess(image).unsqueeze(0).cpu().data.numpy()
# Extract image representations for each octave
octaves = [image]
for _ in range(num_octaves - 1):
octaves.append(nd.zoom(octaves[-1], (1, 1, 1 / octave_scale, 1 / octave_scale), order=1))
detail = np.zeros_like(octaves[-1])
for octave, octave_base in enumerate(tqdm.tqdm(octaves[::-1], desc="Dreaming")):
if octave > 0:
# Upsample detail to new octave dimension
detail = nd.zoom(detail, np.array(octave_base.shape) / np.array(detail.shape), order=1)
# Add deep dream detail from previous octave to new base
input_image = octave_base + detail
# Get new deep dream image
dreamed_image = dream(input_image, model, iterations, lr)
# Extract deep dream details
detail = dreamed_image - octave_base
return deprocess(dreamed_image)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_image", type=str, default="images/supermarket.jpg", help="path to input image")
parser.add_argument("--iterations", default=20, help="number of gradient ascent steps per octave")
parser.add_argument("--at_layer", default=27, type=int, help="layer at which we modify image to maximize outputs")
parser.add_argument("--lr", default=0.01, help="learning rate")
parser.add_argument("--octave_scale", default=1.4, help="image scale between octaves")
parser.add_argument("--num_octaves", default=10, help="number of octaves")
args = parser.parse_args()
# Load image
image = Image.open(args.input_image)
# Define the model
network = models.vgg19(pretrained=True)
layers = list(network.features.children())
model = nn.Sequential(*layers[: (args.at_layer + 1)])
if torch.cuda.is_available:
model = model.cuda()
print(network)
# Extract deep dream image
dreamed_image = deep_dream(
image,
model,
iterations=args.iterations,
lr=args.lr,
octave_scale=args.octave_scale,
num_octaves=args.num_octaves,
)
# Save and plot image
os.makedirs("outputs", exist_ok=True)
filename = args.input_image.split("/")[-1]
plt.figure(figsize=(20, 20))
plt.imshow(dreamed_image)
plt.imsave(f"outputs/output_{filename}", dreamed_image)
plt.show()
| python | MIT | 637de95ffca461d49ae49538d0d44f0e89ffdf0f | 2026-01-05T07:12:12.077056Z | false |
eriklindernoren/PyTorch-Deep-Dream | https://github.com/eriklindernoren/PyTorch-Deep-Dream/blob/637de95ffca461d49ae49538d0d44f0e89ffdf0f/utils.py | utils.py | import numpy as np
import torch
from torchvision import transforms
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
preprocess = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
def deprocess(image_np):
image_np = image_np.squeeze().transpose(1, 2, 0)
image_np = image_np * std.reshape((1, 1, 3)) + mean.reshape((1, 1, 3))
image_np = np.clip(image_np, 0.0, 255.0)
return image_np
def clip(image_tensor):
for c in range(3):
m, s = mean[c], std[c]
image_tensor[0, c] = torch.clamp(image_tensor[0, c], -m / s, (1 - m) / s)
return image_tensor
| python | MIT | 637de95ffca461d49ae49538d0d44f0e89ffdf0f | 2026-01-05T07:12:12.077056Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/eval_cfd.py | src/eval_cfd.py | import os
import platform
from argparse import ArgumentParser
from copy import deepcopy
from pathlib import Path
import wandb
import yaml
def parse_args():
parser = ArgumentParser()
parser.add_argument("--stage_id", type=str, required=True)
parser.add_argument("--checkpoint", type=str, default="best_model.loss.online.x_hat.E1")
parser.add_argument("--mode", type=str, default="correlation", choices=["gif", "loss", "correlation"])
parser.add_argument("--num_input_points", type=int)
parser.add_argument("--num_input_points_ratio", type=float)
parser.add_argument("--k", type=int)
parser.add_argument("--rollout_mode", type=str, choices=["image", "latent"])
parser.add_argument("--version", type=str)
parser.add_argument("--num_supernodes", type=int)
parser.add_argument("--resolution", type=int)
return vars(parser.parse_args())
def main(
stage_id,
checkpoint,
mode,
num_input_points,
num_input_points_ratio,
k,
rollout_mode,
version,
num_supernodes,
resolution,
):
# init args + W&B
print(f"stage_id: {stage_id}")
print(f"checkpoint: {checkpoint}")
print(f"mode: {mode} (gif or loss)")
print(f"num_input_points: {num_input_points}")
print(f"num_input_points_ratio: {num_input_points_ratio}")
print(f"num_supernodes: {num_supernodes}")
print(f"resolution: {resolution}")
wandb.login(host="https://api.wandb.ai/")
api = wandb.Api()
# generate yamls
out = Path("yamls_run")
out.mkdir(exist_ok=True)
print(stage_id)
run = api.run(f"jku-ssl/cvsim/{stage_id}")
name = "/".join(run.name.split("/")[:-1])
if num_input_points_ratio is not None:
name += f"-subsam{str(num_input_points_ratio).replace('.', '')}"
if k is not None:
name += f"-k{k}"
if "grid_resolution" in run.config["datasets"]["train"]:
if "standardize_query_pos" in run.config["datasets"]["train"]:
print(f"found grid_resolution and standardize_query_pos -> using interpolated template")
template_fname = "interpolated"
else:
print("found grid_resolution -> using gino template")
template_fname = "gino"
else:
print("found no grid_resolution -> using simformer template")
template_fname = "simformer"
template_uri = f"yamls/eval/cfd/{mode}/{template_fname}.yaml"
with open(template_uri) as f:
hp = yaml.safe_load(f)
# fetch mode
if rollout_mode is None:
rec_prev_x = run.config["trainer"].get("reconstruct_prev_x_weight", 0)
rec_dynamics = run.config["trainer"].get("reconstruct_dynamics_weight", 0)
if rec_prev_x > 0 or rec_dynamics > 0:
print(f"found reconstruction loss -> use mode=latent")
rollout_mode = "latent"
else:
print(f"no reconstruction losses found -> use mode=image")
rollout_mode = "image"
hp["vars"]["mode"] = rollout_mode
name += f"-{rollout_mode}"
name += f"-{checkpoint.split('_')[0]}"
if num_input_points is not None:
if num_input_points > 1000:
name += f"_in{num_input_points // 1000}k"
else:
name += f"_in{num_input_points}"
if num_supernodes is not None:
name += f"_{num_supernodes}supernodes"
if resolution is not None:
name += f"_{resolution}resolution"
# set other params
hp["vars"]["stage_id"] = stage_id
if checkpoint.isdigit():
hp["vars"]["checkpoint"] = dict(epoch=checkpoint)
elif checkpoint.startswith("E") and checkpoint[1:].isdigit():
hp["vars"]["checkpoint"] = dict(epoch=checkpoint[1:])
else:
hp["vars"]["checkpoint"] = checkpoint
if version is None:
hp["vars"]["version"] = run.config["datasets"]["train"]["version"]
else:
hp["vars"]["version"] = version
name += f"-{name}"
hp["vars"]["num_input_timesteps"] = run.config["datasets"]["train"]["num_input_timesteps"]
if num_input_points_ratio is None and "num_input_points_ratio" not in run.config["datasets"]["train"]:
hp["vars"]["num_input_points_ratio"] = None
else:
if num_input_points_ratio is not None:
hp["vars"]["num_input_points_ratio"] = num_input_points_ratio
else:
hp["vars"]["num_input_points_ratio"] = run.config["datasets"]["train"]["num_input_points_ratio"]
if num_input_points is None and "num_input_points" not in run.config["datasets"]["train"]:
hp["vars"]["num_input_points"] = None
else:
if num_input_points is not None:
hp["vars"]["num_input_points"] = num_input_points
else:
# wandb stores lists as dictionary with indices as keys
# if num_input_points is sampled -> force --num_input_points
if isinstance(run.config["datasets"]["train"]["num_input_points"], dict):
assert "test_rollout" in run.config["datasets"]
hp["vars"]["num_input_points"] = run.config["datasets"]["test_rollout"]["num_input_points"]
else:
hp["vars"]["num_input_points"] = run.config["datasets"]["train"]["num_input_points"]
if "radius_graph_r" in run.config["datasets"]["train"]:
hp["vars"]["radius_graph_r"] = run.config["datasets"]["train"]["radius_graph_r"]
radius_graph_max_num_neighbors = k or run.config["datasets"]["train"]["radius_graph_max_num_neighbors"]
hp["vars"]["radius_graph_max_num_neighbors"] = radius_graph_max_num_neighbors
else:
hp["vars"]["radius_graph_r"] = run.config["trainer"]["radius_graph_r"]
hp["vars"]["radius_graph_max_num_neighbors"] = k or run.config["trainer"]["radius_graph_max_num_neighbors"]
if "norm" in run.config["datasets"]["train"]:
hp["vars"]["norm"] = run.config["datasets"]["train"]["norm"]
else:
hp["vars"]["norm"] = "mean0std1"
if "clamp" in run.config["datasets"]["train"]:
hp["vars"]["clamp"] = run.config["datasets"]["train"]["clamp"]
else:
hp["vars"]["clamp"] = None
if "clamp_mode" in run.config["datasets"]["train"]:
hp["vars"]["clamp_mode"] = run.config["datasets"]["train"]["clamp_mode"]
else:
hp["vars"]["clamp_mode"] = "hard"
if "max_num_timesteps" in run.config["datasets"]["train"]:
hp["vars"]["max_num_timesteps"] = run.config["datasets"]["train"]["max_num_timesteps"]
else:
hp["vars"]["max_num_timesteps"] = None
hp["trainer"]["precision"] = run.config["trainer"]["precision"]
if "backup_precision" in run.config["trainer"]:
hp["trainer"]["backup_precision"] = run.config["trainer"]["backup_precision"]
if num_supernodes is None:
if "num_supernodes" in run.config["datasets"]["train"]:
hp["vars"]["num_supernodes"] = run.config["datasets"]["train"]["num_supernodes"]
elif "num_supernodes" in run.config["datasets"]["train"]["collators"]["0"]:
hp["vars"]["num_supernodes"] = run.config["datasets"]["train"]["collators"]["0"]["num_supernodes"]
else:
if "num_supernodes" in hp["vars"]:
hp["vars"]["num_supernodes"] = None
else:
hp["vars"]["num_supernodes"] = num_supernodes
if resolution is None:
if "grid_resolution" in run.config["datasets"]["train"]:
# wandb stores lists as dictionary with indices as keys
resolution = [
run.config["datasets"]["train"]["grid_resolution"]["0"],
run.config["datasets"]["train"]["grid_resolution"]["1"],
]
hp["vars"]["grid_resolution"] = resolution
else:
if isinstance(resolution, int):
resolution = [resolution, resolution]
hp["vars"]["grid_resolution"] = resolution
hp["name"] = hp["name"].replace("???", name)
fname = f"{stage_id}_rollout_{template_fname}_{mode}_{rollout_mode}_{checkpoint.split('_')[0]}"
if num_input_points_ratio is not None:
fname += f"_subsam{str(num_input_points_ratio).replace('.', '')}"
if num_input_points is not None:
if num_input_points > 1000:
fname += f"_in{num_input_points // 1000}k"
else:
fname += f"_in{num_input_points}"
if num_supernodes is not None:
fname += f"_{num_supernodes}supernodes"
if resolution is not None:
if isinstance(resolution, int):
fname += f"_{resolution}resolution"
else:
fname += f"_{resolution[0]}resolution"
if k is not None:
fname += f"_k{k}"
out_uri = out / f"{fname}.yaml"
with open(out_uri, "w") as f:
yaml.safe_dump(hp, f, sort_keys=False)
print(f"created '{out_uri.as_posix()}'")
wandb.finish()
if __name__ == "__main__":
main(**parse_args())
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/train_stage.py | src/train_stage.py | import logging
import os
from pathlib import Path
import kappaprofiler as kp
import yaml
from torch.distributed import broadcast_object_list
from wandb.util import generate_id
from callbacks.base.callback_base import CallbackBase
from configs.cli_args import CliArgs
from configs.static_config import StaticConfig
from configs.wandb_config import WandbConfig
from datasets import dataset_from_kwargs
from datasets.dummy_dataset import DummyDataset
from distributed.config import is_rank0, is_distributed, get_rank, log_distributed_config
from models import model_from_kwargs
from models.dummy_model import DummyModel
from providers.dataset_config_provider import DatasetConfigProvider
from providers.path_provider import PathProvider
from summarizers.stage_summarizers import stage_summarizer_from_kwargs
from summarizers.summary_summarizers import summary_summarizer_from_kwargs
from trainers import trainer_from_kwargs
from utils.commands import command_from_kwargs
from utils.data_container import DataContainer
from utils.kappaconfig.util import save_unresolved_hp, save_resolved_hp, log_stage_hp
from utils.logging_util import add_global_handlers
from utils.memory_leak_util import get_tensors_in_memory
from utils.seed import set_seed, get_random_int
from utils.system_info import log_system_info, get_cli_command
from utils.version_check import check_versions
from utils.wandb_utils import init_wandb, finish_wandb
def train_stage(stage_hp: dict, static_config: StaticConfig, cli_args: CliArgs, device: str):
# set environment variables
for key, value in stage_hp.get("env", {}).items():
os.environ[key] = value if isinstance(value, str) else str(value)
# resume
if cli_args.resume_stage_id is not None:
assert "initializer" not in stage_hp["trainer"]
if cli_args.resume_checkpoint is None:
checkpoint = "latest"
elif cli_args.resume_checkpoint.startswith("E"):
checkpoint = dict(epoch=int(cli_args.resume_checkpoint[1:]))
elif cli_args.resume_checkpoint.startswith("U"):
checkpoint = dict(update=int(cli_args.resume_checkpoint[1:]))
elif cli_args.resume_checkpoint.startswith("S"):
checkpoint = dict(sample=int(cli_args.resume_checkpoint[1:]))
else:
# any checkpoint (like cp=last or cp=best.accuracy1.test.main)
checkpoint = cli_args.resume_checkpoint
stage_hp["trainer"]["initializer"] = dict(
kind="resume_initializer",
stage_id=cli_args.resume_stage_id,
checkpoint=checkpoint,
)
# retrieve stage_id from hp (allows queueing up dependent stages by hardcoding stage_ids in the yamls) e.g.:
# - pretrain MAE with stageid abcdefgh
# - finetune MAE where the backbone is initialized with the backbone from stage_id abcdefgh
stage_id = stage_hp.get("stage_id", None)
# generate stage_id and sync across devices
if stage_id is None:
stage_id = generate_id()
if is_distributed():
object_list = [stage_id] if is_rank0() else [None]
broadcast_object_list(object_list)
stage_id = object_list[0]
stage_name = stage_hp.get("stage_name", "default_stage")
# initialize logging
path_provider = PathProvider(
output_path=static_config.output_path,
model_path=static_config.model_path,
stage_name=stage_name,
stage_id=stage_id,
temp_path=static_config.temp_path,
)
message_counter = add_global_handlers(log_file_uri=path_provider.logfile_uri)
# init seed
run_name = cli_args.name or stage_hp.pop("name", None)
seed = stage_hp.pop("seed", None)
if seed is None:
seed = 0
logging.info(f"no seed specified -> using seed={seed}")
# initialize wandb
wandb_config_uri = stage_hp.pop("wandb", None)
if wandb_config_uri == "disabled":
wandb_mode = "disabled"
else:
wandb_mode = cli_args.wandb_mode or static_config.default_wandb_mode
if wandb_mode == "disabled":
wandb_config_dict = {}
if cli_args.wandb_config is not None:
logging.warning(f"wandb_config is defined via CLI but mode is disabled -> wandb_config is not used")
if wandb_config_uri is not None:
logging.warning(f"wandb_config is defined via yaml but mode is disabled -> wandb_config is not used")
else:
# retrieve wandb config from yaml
if wandb_config_uri is not None:
wandb_config_uri = Path("wandb_configs") / wandb_config_uri
if cli_args.wandb_config is not None:
logging.warning(f"wandb_config is defined via CLI and via yaml -> wandb_config from yaml is used")
# retrieve wandb config from --wandb_config cli arg
elif cli_args.wandb_config is not None:
wandb_config_uri = Path("wandb_configs") / cli_args.wandb_config
# use default wandb_config file
else:
wandb_config_uri = Path("wandb_config.yaml")
with open(wandb_config_uri.with_suffix(".yaml")) as f:
wandb_config_dict = yaml.safe_load(f)
wandb_config = WandbConfig(mode=wandb_mode, **wandb_config_dict)
config_provider, summary_provider = init_wandb(
device=device,
run_name=run_name,
stage_hp=stage_hp,
wandb_config=wandb_config,
path_provider=path_provider,
account_name=static_config.account_name,
tags=stage_hp.pop("tags", None),
notes=stage_hp.pop("notes", None),
group=stage_hp.pop("group", None),
group_tags=stage_hp.pop("group_tags", None),
)
# log codebase "high-level" version name (git commit is logged anyway)
config_provider["code/mlp"] = "CVSim"
config_provider["code/tag"] = os.popen("git describe --abbrev=0").read().strip()
config_provider["code/name"] = "initial"
# log setup
logging.info("------------------")
logging.info(f"stage_id: {stage_id}")
logging.info(get_cli_command())
check_versions(verbose=True)
log_system_info()
static_config.log()
cli_args.log()
log_distributed_config()
log_stage_hp(stage_hp)
if is_rank0():
save_unresolved_hp(cli_args.hp, path_provider.stage_output_path / "hp_unresolved.yaml")
save_resolved_hp(stage_hp, path_provider.stage_output_path / "hp_resolved.yaml")
logging.info("------------------")
logging.info(f"training stage '{path_provider.stage_name}'")
if is_distributed():
# using a different seed for every rank to ensure that stochastic processes are different across ranks
# for large batch_sizes this shouldn't matter too much
# this is relevant for:
# - augmentations (augmentation parameters of sample0 of rank0 == augparams of sample0 of rank1 == ...)
# - the masks of a MAE are the same for every rank
# NOTE: DDP syncs the parameters in its __init__ method -> same initial parameters independent of seed
seed += get_rank()
logging.info(f"using different seeds per process (seed+rank) ")
set_seed(seed)
# init datasets
logging.info("------------------")
logging.info("initializing datasets")
datasets = {}
dataset_config_provider = DatasetConfigProvider(
global_dataset_paths=static_config.get_global_dataset_paths(),
local_dataset_path=static_config.get_local_dataset_path(),
data_source_modes=static_config.get_data_source_modes(),
)
if "datasets" not in stage_hp:
logging.info(f"no datasets found -> initialize dummy dataset")
datasets["train"] = DummyDataset(
size=256,
x_shape=(2,),
n_classes=2,
)
else:
for dataset_key, dataset_kwargs in stage_hp["datasets"].items():
logging.info(f"initializing {dataset_key}")
datasets[dataset_key] = dataset_from_kwargs(
dataset_config_provider=dataset_config_provider,
path_provider=path_provider,
**dataset_kwargs,
)
data_container_kwargs = {}
if "prefetch_factor" in stage_hp:
data_container_kwargs["prefetch_factor"] = stage_hp.pop("prefetch_factor")
if "max_num_workers" in stage_hp:
data_container_kwargs["max_num_workers"] = stage_hp.pop("max_num_workers")
data_container = DataContainer(
**datasets,
num_workers=cli_args.num_workers,
pin_memory=cli_args.pin_memory,
config_provider=config_provider,
seed=get_random_int(),
**data_container_kwargs,
)
# init trainer
logging.info("------------------")
logging.info("initializing trainer")
trainer_kwargs = {}
if "max_batch_size" in stage_hp:
trainer_kwargs["max_batch_size"] = stage_hp.pop("max_batch_size")
trainer = trainer_from_kwargs(
data_container=data_container,
device=device,
sync_batchnorm=cli_args.sync_batchnorm or static_config.default_sync_batchnorm,
config_provider=config_provider,
summary_provider=summary_provider,
path_provider=path_provider,
**stage_hp["trainer"],
**trainer_kwargs,
)
# register datasets of callbacks (e.g. for ImageNet-C the dataset never changes so its pointless to specify)
for callback in trainer.callbacks:
callback.register_root_datasets(
dataset_config_provider=dataset_config_provider,
is_mindatarun=cli_args.testrun or cli_args.mindatarun,
)
# init model
logging.info("------------------")
logging.info("creating model")
if "model" not in stage_hp:
logging.info(f"no model defined -> use dummy model")
model = DummyModel(
input_shape=trainer.input_shape,
output_shape=trainer.output_shape,
update_counter=trainer.update_counter,
path_provider=path_provider,
is_frozen=True,
)
else:
model = model_from_kwargs(
**stage_hp["model"],
input_shape=trainer.input_shape,
output_shape=trainer.output_shape,
update_counter=trainer.update_counter,
path_provider=path_provider,
data_container=data_container,
)
# logging.info(f"model architecture:\n{model}")
# moved to trainer as initialization on cuda is different than on cpu
# model = model.to(stage_config.run_config.device)
# train model
trainer.train_model(model)
# finish callbacks
CallbackBase.finish()
# summarize logvalues
logging.info("------------------")
logging.info(f"summarize logvalues")
summary_provider.summarize_logvalues()
# summarize stage
if "stage_summarizers" in stage_hp and is_rank0():
logging.info("------------------")
logging.info("summarize stage")
for kwargs in stage_hp["stage_summarizers"]:
summarizer = stage_summarizer_from_kwargs(
summary_provider=summary_provider,
path_provider=path_provider,
**kwargs,
)
summarizer.summarize()
# summarize summary
if "summary_summarizers" in stage_hp and is_rank0():
summary_provider.flush()
logging.info("------------------")
for kwargs in stage_hp["summary_summarizers"]:
summary_summarizer = summary_summarizer_from_kwargs(
summary_provider=summary_provider,
**kwargs,
)
summary_summarizer.summarize()
summary_provider.flush()
# add profiling times to summary_provider
def try_log_profiler_time(summary_key, profiler_query):
try:
summary_provider[summary_key] = kp.profiler.get_node(profiler_query).total_time
except AssertionError:
pass
try_log_profiler_time("profiler/train", "train")
try_log_profiler_time("profiler/train/iterator", "train.iterator")
try_log_profiler_time("profiler/train/data_loading", "train.data_loading")
try_log_profiler_time("profiler/train/update", "train.update")
try_log_profiler_time("profiler/train/to_device", "train.update.forward.to_device")
try_log_profiler_time("profiler/train/forward", "train.update.forward")
try_log_profiler_time("profiler/train/backward", "train.update.backward")
summary_provider.flush()
# log profiler times
logging.info(f"full profiling times:\n{kp.profiler.to_string()}")
kp.reset()
# execute commands
if "on_finish" in stage_hp and is_rank0():
logging.info("------------------")
logging.info("ON_FINISH COMMANDS")
for command in stage_hp["on_finish"]:
command = command_from_kwargs(**command, stage_id=stage_id)
# noinspection PyBroadException
try:
command.execute()
except:
logging.exception(f"failed to execute {command}")
# cleanup
logging.info("------------------")
logging.info(f"CLEANUP")
data_container.dispose()
message_counter.log()
finish_wandb(wandb_config)
# log how many tensors remain to be aware of potential memory leaks
all_tensors, cuda_tensors = get_tensors_in_memory()
logging.info("------------------")
logging.info(f"{len(all_tensors)} tensors remaining in memory (cpu+gpu)")
logging.info(f"{len(all_tensors) - len(cuda_tensors)} tensors remaining in memory (cpu)")
logging.info(f"{len(cuda_tensors)} tensors remaining in memory (gpu)")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/main_train.py | src/main_train.py | from utils.version_check import check_versions
check_versions(verbose=False)
import logging
import os
import kappaprofiler as kp
import torch
from configs.cli_args import parse_run_cli_args
from configs.static_config import StaticConfig
from distributed.config import barrier, get_rank, get_local_rank, get_world_size, is_managed
from distributed.run import run_single_or_multiprocess, run_managed
from train_stage import train_stage
from utils.kappaconfig.util import get_stage_hp
from utils.logging_util import add_global_handlers, log_from_all_ranks
from utils.pytorch_cuda_timing import cuda_start_event, cuda_end_event
def main_single(device):
cli_args = parse_run_cli_args()
static_config = StaticConfig(uri="static_config.yaml", datasets_were_preloaded=cli_args.datasets_were_preloaded)
add_global_handlers(log_file_uri=None)
with log_from_all_ranks():
logging.info(f"initialized process rank={get_rank()} local_rank={get_local_rank()} pid={os.getpid()}")
barrier()
logging.info(f"initialized {get_world_size()} processes")
# CUDA_LAUNCH_BLOCKING=1 for debugging
# os.environ["CUDA_LAUNCH_BLOCKING"] = str(1)
# cudnn
if cli_args.accelerator == "gpu":
if cli_args.cudnn_benchmark or static_config.default_cudnn_benchmark:
torch.backends.cudnn.benchmark = True
assert not static_config.default_cudnn_deterministic, "cudnn_benchmark can make things non-deterministic"
else:
logging.warning(f"disabled cudnn benchmark")
if static_config.default_cudnn_deterministic:
torch.backends.cudnn.deterministic = True
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
logging.warning(f"enabled cudnn deterministic")
# profiling
if cli_args.accelerator == "gpu":
if cli_args.cuda_profiling or static_config.default_cuda_profiling:
kp.setup_async(cuda_start_event, cuda_end_event)
logging.info(f"initialized profiler to call sync cuda")
else:
kp.setup_async_as_sync()
# load hyperparameters
stage_hp = get_stage_hp(
cli_args.hp,
template_path="zztemplates",
testrun=cli_args.testrun,
minmodelrun=cli_args.minmodelrun,
mindatarun=cli_args.mindatarun,
mindurationrun=cli_args.mindurationrun,
)
# train stage
train_stage(
stage_hp=stage_hp,
static_config=static_config,
cli_args=cli_args,
device=device,
)
def main():
# parse cli_args immediately for fast cli_args validation
cli_args = parse_run_cli_args()
static_config = StaticConfig(uri="static_config.yaml", datasets_were_preloaded=cli_args.datasets_were_preloaded)
# initialize loggers for setup (seperate id)
add_global_handlers(log_file_uri=None)
if is_managed():
run_managed(
accelerator=cli_args.accelerator,
devices=cli_args.devices,
main_single=main_single,
)
else:
run_single_or_multiprocess(
accelerator=cli_args.accelerator,
devices=cli_args.devices,
main_single=main_single,
master_port=cli_args.master_port or static_config.master_port,
mig_devices=static_config.mig_config,
)
if __name__ == "__main__":
main()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/main_sbatch.py | src/main_sbatch.py | import os
import platform
import shlex
import sys
import uuid
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
import yaml
import kappaconfig as kc
def get_parser():
parser = ArgumentParser()
# how many GPUs
gpus_group = parser.add_mutually_exclusive_group()
gpus_group.add_argument("--nodes", type=int)
gpus_group.add_argument("--gpus", type=int)
#
parser.add_argument("--time", type=str, required=True)
parser.add_argument("--account", type=str)
parser.add_argument("--qos", type=str)
parser.add_argument("--script", type=str, choices=["train", "run_folder"], default="train")
parser.add_argument("--preload", type=str)
# resume
parser.add_argument("--resume_stage_id", type=str)
parser.add_argument("--resume_checkpoint", type=str)
return parser
def main(nodes, gpus, time, account, qos, script, preload, resume_stage_id, resume_checkpoint):
# by default use 1 node
if nodes is None and gpus is None:
print(f"no --nodes and no --gpus defined -> use 1 node")
nodes = 1
# load template submit script
if nodes is not None:
with open("template_sbatch_nodes.sh") as f:
template = f.read()
elif gpus is not None:
with open("template_sbatch_gpus.sh") as f:
template = f.read()
else:
raise NotImplementedError
# load config
config = kc.DefaultResolver().resolve(kc.from_file_uri("sbatch_config.yaml"))
# check paths exist
chdir = Path(config["chdir"]).expanduser()
assert chdir.exists(), f"chdir {chdir} doesn't exist"
# default account
account = account or config["default_account"]
# not every server has qos and qos doesnt need to be defined via CLI args
qos = qos or config.get("default_qos")
# get sbatch-only arguments
parser = get_parser()
args_to_filter = []
# noinspection PyProtectedMember
for action in parser._actions:
if action.dest == "help":
continue
# currently only supports to filter out args with -- prefix
assert len(action.option_strings) == 1
assert action.option_strings[0].startswith("--")
args_to_filter.append(action.option_strings[0])
# filter out sbatch-only arguments
train_args = []
i = 0
while i < len(sys.argv[1:]):
arg = sys.argv[1 + i]
if arg.startswith("--") and arg in args_to_filter:
i += 2
else:
train_args.append(arg)
i += 1
cli_args_str = " ".join(map(shlex.quote, train_args))
# patch template
if preload is not None:
assert "{preload}" in template
config["preload"] = "true"
config["preload_yaml"] = preload
cli_args_str += " --datasets_were_preloaded"
else:
config["preload"] = "false"
config["preload_yaml"] = "nothing"
if script == "run_folder":
cli_args_str += " --devices 0"
if resume_stage_id is not None:
assert script == "train"
cli_args_str += f" --resume_stage_id {resume_stage_id}"
if resume_checkpoint is not None:
assert script == "train"
cli_args_str += f" --resume_checkpoint {resume_checkpoint}"
patched_template = template.format(
time=time,
nodes=nodes,
gpus=gpus,
account=account,
qos=qos,
script=script,
cli_args=cli_args_str,
**config,
)
print(patched_template)
# create a shell script
out_path = Path("submit")
out_path.mkdir(exist_ok=True)
fname = f"{datetime.now():%m.%d-%H.%M.%S}-{uuid.uuid4()}.sh"
with open(out_path / fname, "w") as f:
f.write(patched_template)
# execute the shell script
if os.name != "nt":
os.system(f"sbatch submit/{fname}")
if __name__ == "__main__":
main(**vars(get_parser().parse_known_args()[0]))
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/main_run_folder.py | src/main_run_folder.py | import argparse
import logging
import os
import random
import shutil
import subprocess
from pathlib import Path
from time import sleep
import yaml
from utils.logging_util import add_stdout_handler
from utils.version_check import check_versions
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str, default="yamls_run")
parser.add_argument("--devices", type=str, required=True)
parser.add_argument("--accelerator", type=str, choices=["cpu", "gpu"])
parser.add_argument("--wandb_config", type=str)
parser.add_argument("--wandb_mode", type=str)
parser.add_argument("--datasets_were_preloaded", action="store_true")
single_or_forever = parser.add_mutually_exclusive_group()
single_or_forever.add_argument("--forever", action="store_true")
single_or_forever.add_argument("--single", action="store_true")
parser.add_argument("--start_on_idle", action="store_true")
parser.add_argument("--num_workers", type=int)
pin_memory_group = parser.add_mutually_exclusive_group()
pin_memory_group.add_argument("--pin_memory", action="store_true")
pin_memory_group.add_argument("--no_pin_memory", action="store_false", dest="pin_memory")
pin_memory_group.set_defaults(pin_memory=None)
return vars(parser.parse_args())
def devices_are_idle(device_ids):
used_vram = os.popen("nvidia-smi --query-gpu=memory.used --format=csv,noheader").read().strip().split("\n")
for device_id in device_ids:
split = used_vram[device_id].split(" ")
# i think vram is always in MiB, but just to make sure
assert split[1] == "MiB"
value = int(split[0])
if value > 10:
logging.info(f"device {device_id} is in use")
return False
logging.info(f"devices {device_ids} are idle")
return True
def main(
folder,
devices,
accelerator,
wandb_config,
wandb_mode,
forever,
single,
start_on_idle,
datasets_were_preloaded,
num_workers,
pin_memory,
):
add_stdout_handler()
# start when the devices are idle for at least 15 minutes
if start_on_idle:
device_ids = yaml.safe_load(f"[{devices}]")
while True:
if devices_are_idle(device_ids):
# check frequently for the next 30 minutes if the device is still idle
for _ in range(30):
sleep(60)
if not devices_are_idle(device_ids):
# devices were not idle for a longer time (probably started a new run)
break
else:
# devices were idle for a longer time
break
else:
# wait 30 minutes for next check
sleep(30 * 60)
folder = Path(folder).expanduser()
assert folder.exists()
running_folder = folder / "running"
finished_folder = folder / "finished"
running_folder.mkdir(exist_ok=True)
finished_folder.mkdir(exist_ok=True)
counter = 0
while True:
# fetch list list of yamls to run
folder_content = [folder / name for name in os.listdir(folder)]
yaml_files = [entry for entry in folder_content if entry.is_file() and entry.name.endswith(".yaml")]
if len(yaml_files) == 0:
if forever:
logging.info(f"no yamls in {folder} -> wait a minute")
sleep(60)
continue
else:
logging.info(f"no yamls in {folder} -> terminate")
break
# sleep for a random interval to avoid race conditions
sleep(random.random())
# pick random yaml and move it to running folder
yaml_file = random.choice(yaml_files)
if not yaml_file.exists():
continue
running_yaml = running_folder / yaml_file.name
shutil.move(yaml_file, running_yaml)
logging.info(f"moved {yaml_file} to {running_yaml}")
# extract name from yaml (also implicitly checks if yaml is valid)
# noinspection PyBroadException
try:
with open(running_yaml) as f:
hp = yaml.safe_load(f)
name = hp.get("name", None)
except:
logging.info(f"couldnt load yaml {yaml_file}")
continue
# start
popen_arg_list = [
"python", "main_train.py",
"--hp", str(running_yaml),
"--devices", devices,
]
if name is None:
popen_arg_list += ["--name", running_yaml.name]
if wandb_config is not None:
popen_arg_list += ["--wandb_config", wandb_config]
if wandb_mode is not None:
popen_arg_list += ["--wandb_mode", wandb_mode]
if num_workers is not None:
popen_arg_list += ["--num_workers", str(num_workers)]
if pin_memory is not None:
if pin_memory:
popen_arg_list += ["--pin_memory"]
else:
popen_arg_list += ["--no_pin_memory"]
if accelerator is not None:
popen_arg_list += ["--accelerator", accelerator]
if datasets_were_preloaded:
popen_arg_list += ["--datasets_were_preloaded"]
process = subprocess.Popen(popen_arg_list)
logging.info(f"started {running_yaml.name}")
process.wait()
# move to finished folder
shutil.move(running_yaml, finished_folder / yaml_file.name)
counter += 1
if single:
break
logging.info(f"finished running {counter} yamls from {folder} (devices={devices})")
if __name__ == "__main__":
check_versions(verbose=False)
main(**parse_args())
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/default_initializer.py | src/initializers/default_initializer.py | from .base.initializer_base import InitializerBase
class DefaultInitializer(InitializerBase):
"""
implicitly applies the torch default initialization
useful e.g. when defining a list of initializers to sweep over
"""
def init_weights(self, model):
pass
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/functional.py | src/initializers/functional.py | import torch.nn as nn
ALL_BATCHNORMS = (
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.LazyBatchNorm1d,
nn.LazyBatchNorm2d,
nn.LazyBatchNorm3d,
nn.SyncBatchNorm,
)
_ALL_NORMS = (
*ALL_BATCHNORMS,
nn.LayerNorm,
nn.InstanceNorm1d,
nn.InstanceNorm2d,
nn.InstanceNorm3d,
nn.GroupNorm,
nn.LocalResponseNorm,
)
def initialize_norms_as_noaffine(m):
if isinstance(m, _ALL_NORMS):
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
if m.weight is not None:
nn.init.constant_(m.weight, 1.)
def initialize_norms_as_identity(m):
if isinstance(m, _ALL_NORMS):
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
if m.weight is not None:
nn.init.constant_(m.weight, 0.)
def initialize_layernorm_as_noaffine(m):
if isinstance(m, nn.LayerNorm):
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
if m.weight is not None:
nn.init.constant_(m.weight, 1.)
def initialize_layernorm_as_identity(m):
if isinstance(m, nn.LayerNorm):
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
if m.weight is not None:
nn.init.constant_(m.weight, 0.)
else:
raise NotImplementedError
def initialize_batchnorm_as_noaffine(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
if m.weight is not None:
nn.init.constant_(m.weight, 1.)
def initialize_batchnorm_as_identity(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
if m.weight is not None:
nn.init.constant_(m.weight, 0.)
else:
raise NotImplementedError
def initialize_linear_bias_to_zero(m):
if isinstance(m, nn.Linear):
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
def initialize_conv_bias_to_zero(m):
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
def initialize_xavier_uniform_zero_bias(m):
if isinstance(m, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d)):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
def initialize_qkv_seperately(model):
# https://github.com/facebookresearch/moco-v3/blob/main/vits.py#L35
for full_name, module in model.named_modules():
last_name = full_name.split(".")[-1]
if last_name == "qkv":
# treat the weights of Q, K, V separately
val = (6 / (module.weight.shape[0] // 3 + module.weight.shape[1])) ** 0.5
nn.init.uniform_(module.weight, -val, val)
if last_name == "qkv_mlpin":
# treat the weights of Q, K, V and MLP-in separately
# only implemented for mlp_ratio=4
input_dim = module.weight.shape[1]
assert module.weight.shape[0] == 7 * input_dim
qkv_bound = (3 / input_dim) ** 0.5
mlpin_bound = (6 / (5 * input_dim)) ** 0.5
nn.init.uniform_(module.weight[:3 * input_dim], -qkv_bound, qkv_bound)
nn.init.uniform_(module.weight[3 * input_dim:], -mlpin_bound, mlpin_bound)
def initialize_modulation_seperately(model):
for full_name, module in model.named_modules():
last_name = full_name.split(".")[-1]
if last_name == "modulation":
# a modulation produces a stack of vectors -> treat each vector seperately
val = (6 / (module.weight.shape[0] // 2 + module.weight.shape[1])) ** 0.5
nn.init.uniform_(module.weight, -val, val)
def initialize_seperately(model, name, denominator):
for full_name, module in model.named_modules():
last_name = full_name.split(".")[-1]
if last_name == name:
val = (6 / (module.weight.shape[0] // denominator + module.weight.shape[1])) ** 0.5
nn.init.uniform_(module.weight, -val, val)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/previous_run_initializer.py | src/initializers/previous_run_initializer.py | from .base.checkpoint_initializer import CheckpointInitializer
class PreviousRunInitializer(CheckpointInitializer):
"""
initializes a model from a checkpoint of a previous run (specified by the stage_id)
load_optim=False as this is usually used for frozen/pretrained models
"""
def __init__(
self,
load_optim=False,
keys_to_remove=None,
patterns_to_remove=None,
patterns_to_rename=None,
**kwargs,
):
super().__init__(load_optim=load_optim, **kwargs)
self.keys_to_remove = keys_to_remove or []
self.patterns_to_remove = patterns_to_remove or []
self.patterns_to_rename = patterns_to_rename or []
def init_weights(self, model):
sd, model_name, ckpt_uri = self.get_model_state_dict(model)
if len(self.keys_to_remove) > 0:
self.logger.info(f"removing keys {self.keys_to_remove} from {ckpt_uri}")
for key in self.keys_to_remove:
sd.pop(key)
if len(self.patterns_to_remove) > 0:
for pattern in self.patterns_to_remove:
self.logger.info(f"removing pattern {pattern} from {ckpt_uri}")
for key in list(sd.keys()):
if pattern in key:
self.logger.info(f"removing key {key}")
sd.pop(key)
if len(self.patterns_to_rename) > 0:
for pattern in self.patterns_to_rename:
src_pattern = pattern["src"]
dst_pattern = pattern["dst"]
self.logger.info(f"renaming pattern {src_pattern} to {dst_pattern} in {ckpt_uri}")
for key in list(sd.keys()):
if src_pattern in key:
new_value = sd.pop(key)
dst_key = key.replace(src_pattern, dst_pattern)
if dst_key in sd:
self.logger.info(f"overwriting key {dst_key} with {key}")
else:
self.logger.info(f"renaming key {key} to {dst_key}")
sd[dst_key] = new_value
model.load_state_dict(sd)
self.logger.info(f"loaded weights of {model_name} from {ckpt_uri}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/resume_initializer.py | src/initializers/resume_initializer.py | import os
import torch
from models.base.composite_model_base import CompositeModelBase
from models.base.single_model_base import SingleModelBase
from utils.checkpoint import Checkpoint
from .base.checkpoint_initializer import CheckpointInitializer
class ResumeInitializer(CheckpointInitializer):
"""
initializes models/optims from a checkpoint ready for resuming training
load_optim=True as this is usually used to resume a training run
stage_name is provided by the trainer as it already knows the correct stage_name
"""
def __init__(self, load_optim=True, load_random_states=True, **kwargs):
super().__init__(load_optim=load_optim, model_name=None, **kwargs)
self.load_random_states = load_random_states
def init_weights(self, model):
self._init_weights(model.name, model)
def _init_weights(self, name, model):
if isinstance(model, SingleModelBase):
model_name, ckpt_uri = self._get_modelname_and_ckpturi(model=model, model_name=name, file_type="model")
sd = torch.load(ckpt_uri, map_location=model.device)
if "state_dict" in sd:
sd = sd["state_dict"]
model.load_state_dict(sd)
self.logger.info(f"loaded weights of {model_name} from {ckpt_uri}")
if isinstance(model, CompositeModelBase):
for submodel_name, submodel in model.submodels.items():
self._init_weights(name=f"{name}.{submodel_name}", model=submodel)
def init_optim(self, model):
self._init_optim(name=model.name, model=model)
def _init_optim(self, name, model):
if isinstance(model, SingleModelBase):
if model.optim is None:
# e.g. EMA target network doesn't have an optimizer
self.logger.info(
f"skip loading optim from checkpoint '{self.checkpoint}' for {model.name} "
f"(optim is None)"
)
elif model.is_frozen:
self.logger.info(
f"skip loading optim from checkpoint '{self.checkpoint}' for {model.name} "
f"(is_frozen)"
)
else:
model_name, ckpt_uri = self._get_modelname_and_ckpturi(model=model, model_name=name, file_type="optim")
sd = torch.load(ckpt_uri, map_location=model.device)
model.optim.load_state_dict(sd)
self.logger.info(f"loaded optimizer of {model_name} from {ckpt_uri}")
if isinstance(model, CompositeModelBase):
for submodel_name, submodel in model.submodels.items():
self._init_optim(name=f"{name}.{submodel_name}", model=submodel)
def _get_trainer_ckpt_file(self):
return self._get_ckpt_uri(prefix=f"trainer cp=", suffix=".th")
def get_start_checkpoint(self):
if isinstance(self.checkpoint, str):
trainer_ckpt_uri = self._get_trainer_ckpt_file()
if trainer_ckpt_uri.exists():
trainer_ckpt = torch.load(trainer_ckpt_uri)
trainer_ckpt_without_rng_states = {k: v for k, v in trainer_ckpt.items() if k != "random_states"}
self.logger.info(f"loaded checkpoint from trainer_state_dict: {trainer_ckpt_without_rng_states}")
return Checkpoint(
epoch=trainer_ckpt["epoch"],
update=trainer_ckpt["update"],
sample=trainer_ckpt["sample"],
)
else:
self.logger.warning("no trainer checkpoint found -> try to fetch start_checkpoint from a model ckpt")
# try to get any model checkpoint
ckpt_folder = self.path_provider.get_stage_checkpoint_path(
stage_name=self.stage_name,
stage_id=self.stage_id,
)
fnames = [
fname
for fname in sorted(os.listdir(ckpt_folder))
if "model" in fname and self.checkpoint in fname
]
assert len(fnames) > 0, "no trainer checkpoint and no valid model found to infer start_checkpoint"
fname = fnames[0]
self.logger.info(
f"no trainer checkpoint found but start_checkpoint "
f"can be inferred from model checkpoint '{fname}'"
)
model_sd = torch.load(ckpt_folder / fname, map_location="cpu")
abs_ckpt = model_sd["abs_ckpt"]
assert isinstance(abs_ckpt, dict)
return Checkpoint(**abs_ckpt)
else:
return Checkpoint.to_fully_specified_from_fnames(
ckpt_folder=self.path_provider.get_stage_checkpoint_path(
stage_name=self.stage_name,
stage_id=self.stage_id,
),
ckpt=self.checkpoint,
)
def init_trainer(self, trainer):
ckpt_uri = self._get_trainer_ckpt_file()
if not ckpt_uri.exists():
self.logger.warning(f"no trainer checkpoint found -> skip trainer initialization from checkpoint")
return
trainer.load_state_dict(torch.load(ckpt_uri), load_random_states=self.load_random_states)
self.logger.info(f"loaded trainer checkpoint {ckpt_uri}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/__init__.py | src/initializers/__init__.py | from utils.factory import instantiate
def initializer_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[f"initializers.{kind}"],
type_names=[kind.split(".")[-1]],
**kwargs
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/pretrained_initializer.py | src/initializers/pretrained_initializer.py | from pathlib import Path
import torch
from models.ssl_heads.masked_decoder import MaskedDecoder
from .base.initializer_base import InitializerBase
class PretrainedInitializer(InitializerBase):
""" initialize with weights from an external, pretrained checkpoints (e.g. original facebook MAE checkpoints) """
def __init__(self, weights_file, root_key=None, key_mapping=None, **kwargs):
super().__init__(**kwargs)
self.weights_file = weights_file
self.weights_uri = Path(self.path_provider.model_path / weights_file).expanduser()
assert self.weights_uri.exists() and self.weights_uri.is_file(), self.weights_uri.as_posix()
self.key_mapping = key_mapping
self.root_key = root_key
def _get_model_kwargs(self):
self.logger.info(f"loading ckpt kwargs for '{self.weights_uri}'")
kwargs = dict(kind="vit.vit")
# I-JEPA no CLS token
if "ijepa" in self.weights_file:
kwargs["cls_tokens"] = 0
# ViT dimensions
if "base16" in self.weights_file:
return dict(patch_size=16, dim=768, num_attn_heads=12, depth=12, **kwargs)
if "large16" in self.weights_file:
return dict(patch_size=16, dim=1024, num_attn_heads=16, depth=24, **kwargs)
if "huge16" in self.weights_file:
return dict(patch_size=16, dim=1280, num_attn_heads=16, depth=32, **kwargs)
if "huge14" in self.weights_file:
return dict(patch_size=14, dim=1280, num_attn_heads=16, depth=32, **kwargs)
sd = torch.load(self.weights_uri, map_location=torch.device("cpu"))
if "ctor_kwargs" in sd:
kwargs = sd["ctor_kwargs"]
else:
kwargs = {}
self.logger.info(f"found kwargs: {kwargs}")
return kwargs
def init_weights(self, model):
self.logger.info(f"loading weights from '{self.weights_uri}'")
sd = torch.load(self.weights_uri, map_location=torch.device("cpu"))
# unpack state_dict
# - MLPlayground stores weights in "state_dict" field
# - MAE stores weights in "model" field
if "state_dict" in sd:
sd = sd["state_dict"]
elif "model" in sd:
sd = sd["model"]
# select model (e.g. used when student/teacher is stored in same checkpoint)
if self.root_key is not None:
sd = sd[self.root_key]
#
if isinstance(model, MaskedDecoder) and self.weights_file in [
"mae_base16.pth", "mae_large16.pth", "mae_huge14.pth", # MAE
"mae_base16res448.pth", "mae_large16res448.pth", # long sequence MAE
"mae_base16res448e800.pth", "mae_large16res448e800.pth", # long sequence MAE
]:
for key in sd.keys():
print(key)
sd = {k: v for k, v in sd.items() if "decoder" in k}
elif self.weights_file in [
"mae_base16.pth", "mae_large16.pth", "mae_huge14.pth", # MAE
"mae_base16res448.pth", "mae_large16res448.pth", # long sequence MAE
"mae_base16res448e800.pth", "mae_large16res448e800.pth", # long sequence MAE
]:
sd = {k: v for k, v in sd.items() if "decoder" not in k and k != "mask_token"}
elif "layergrafting" in self.weights_file:
sd = {
k.replace("module.momentum_encoder.", ""): v
for k, v in sd.items()
if k.startswith("module.momentum_encoder.") and "head" not in k
}
elif "mugs" in self.weights_file:
sd = {k: v for k, v in sd.items() if not k.startswith("relation_blocks")}
elif "ijepa" in self.weights_file:
sd = {k.replace("module.", ""): v for k, v in sd.items()}
# remap keys
if self.key_mapping is not None:
for old_key, new_key in self.key_mapping.items():
sd[new_key] = sd.pop(old_key)
model.load_state_dict(sd)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/base/initializer_base.py | src/initializers/base/initializer_base.py | import logging
from providers.path_provider import PathProvider
class InitializerBase:
def __init__(self, path_provider: PathProvider = None):
self.logger = logging.getLogger(type(self).__name__)
self.path_provider = path_provider
# check if children overwrite the correct method
assert type(self).get_model_kwargs == InitializerBase.get_model_kwargs
def init_weights(self, model):
raise NotImplementedError
def init_optim(self, model):
pass
def get_model_kwargs(self):
kwargs = self._get_model_kwargs()
# remove is_frozen/freezers
kwargs.pop("is_frozen", None)
kwargs.pop("freezers", None)
# initializers shouldnt be loaded
kwargs.pop("initializers", None)
# model extractors should be explicitly defined -> avoids errors
kwargs.pop("extractors", None)
self.logger.info(f"loaded model kwargs: {kwargs}")
return kwargs
def _get_model_kwargs(self):
return {}
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/base/checkpoint_initializer.py | src/initializers/base/checkpoint_initializer.py | import torch
from initializers.base.initializer_base import InitializerBase
from models.base.single_model_base import SingleModelBase
from utils.checkpoint import Checkpoint
from utils.factory import create
class CheckpointInitializer(InitializerBase):
def __init__(self, stage_id, model_name, checkpoint, load_optim, model_info=None, stage_name=None, **kwargs):
super().__init__(**kwargs)
self.stage_id = stage_id
self.model_name = model_name
self.load_optim = load_optim
self.model_info = model_info
self.stage_name = stage_name or self.path_provider.stage_name
# checkpoint can be a string (e.g. "best_accuracy" for initializing from a model saved by BestModelLogger)
# or dictionary with epoch/update/sample values
if isinstance(checkpoint, str):
self.checkpoint = checkpoint
else:
self.checkpoint = create(checkpoint, Checkpoint)
assert self.checkpoint.is_minimally_specified or self.checkpoint.is_fully_specified
def init_weights(self, model):
raise NotImplementedError
def get_model_state_dict(self, model):
model_name, ckpt_uri = self._get_modelname_and_ckpturi(model=model, file_type="model")
sd = torch.load(ckpt_uri, map_location=model.device)
if "state_dict" in sd:
sd = sd["state_dict"]
return sd, model_name, ckpt_uri
def _get_model_kwargs(self):
model_name, ckpt_uri = self._get_modelname_and_ckpturi(file_type="model")
sd = torch.load(ckpt_uri, map_location=torch.device("cpu"))
kwargs = sd["ctor_kwargs"]
self.logger.info(f"loaded model kwargs from {ckpt_uri}")
if "abs_ckpt" in kwargs:
self.logger.info(f"absolute checkpoint: {kwargs['abs_ckpt']}")
return kwargs
def init_optim(self, model):
if not isinstance(model, SingleModelBase):
return
if not self.load_optim:
return
assert model.optim is not None
model_name, ckpt_uri = self._get_modelname_and_ckpturi(model=model, file_type="optim")
sd = torch.load(ckpt_uri, map_location=model.device)
model.optim.load_state_dict(sd)
self.logger.info(f"loaded optimizer of {model_name} from {ckpt_uri}")
def _get_modelname_and_ckpturi(self, file_type, model=None, model_name=None):
model_name = model_name or self.model_name
if model_name is None:
assert isinstance(model, SingleModelBase)
self.logger.info(f"no model_name provided -> using {model.name}")
model_name = model.name
# model_info is e.g. ema=0.99
model_info_str = "" if self.model_info is None else f" {self.model_info}"
ckpt_uri = self._get_ckpt_uri(prefix=f"{model_name} cp=", suffix=f"{model_info_str} {file_type}.th")
assert ckpt_uri.exists(), f"'{ckpt_uri}' doesn't exist"
return model_name, ckpt_uri
def _get_ckpt_uri(self, prefix, suffix):
ckpt_folder = self.path_provider.get_stage_checkpoint_path(
stage_name=self.stage_name,
stage_id=self.stage_id,
)
# find full checkpoint from minimal specification
if not isinstance(self.checkpoint, str) and not self.checkpoint.is_fully_specified:
ckpt = Checkpoint.to_fully_specified_from_fnames(
ckpt_folder=ckpt_folder,
ckpt=self.checkpoint,
prefix=prefix,
suffix=suffix,
)
else:
ckpt = self.checkpoint
return ckpt_folder / f"{prefix}{ckpt}{suffix}"
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/initializers/base/__init__.py | src/initializers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/optimizer_wrapper.py | src/optimizers/optimizer_wrapper.py | import logging
import torch
from kappaschedules import object_to_schedule
from torch.cuda.amp import GradScaler
from utils.bidict import Bidict
from utils.factory import create_collection, create
from utils.formatting_util import float_to_scientific_notation
from .lr_scalers import lr_scaler_from_kwargs
from .lr_scalers.linear_lr_scaler import LinearLrScaler
from .param_group_modifiers import param_group_modifier_from_kwargs
class OptimizerWrapper:
"""
wrapper for torch optimizers that also handles
- learning rate scaling (with batchsize)
- creating parameter groups (e.g. excluding bias/norm from weight decay, layerwise lr scaling)
- stateless learning rate scheduling
- gradient clipping
"""
def __init__(
self,
model,
torch_optim_ctor,
schedule=None,
weight_decay_schedule=None,
clip_grad_value=None,
clip_grad_norm=None,
param_group_modifiers=None,
exclude_bias_from_wd=True,
exclude_norm_from_wd=True,
add_model_specific_param_group_modifiers=True,
update_counter=None,
lr_scale_factor=None,
lr_scaler=None,
):
self.logger = logging.getLogger(type(self).__name__)
self.model = model
self.update_counter = update_counter
self.clip_grad_value = clip_grad_value
self.clip_grad_norm = clip_grad_norm
assert self.clip_grad_value is None or self.clip_grad_value > 0
assert self.clip_grad_norm is None or self.clip_grad_norm > 0
# scale lr
assert "lr" in torch_optim_ctor.keywords
lr_scaler = create(lr_scaler, lr_scaler_from_kwargs) or LinearLrScaler()
base_lr = torch_optim_ctor.keywords["lr"]
lr_scale_factor = lr_scale_factor or update_counter.effective_batch_size
scaled_lr = lr_scaler.scale_lr(base_lr=base_lr, lr_scale_factor=lr_scale_factor)
self.logger.info(f"base lr: {float_to_scientific_notation(base_lr, max_precision=2)}")
self.logger.info(f"scaled lr: {float_to_scientific_notation(scaled_lr, max_precision=2)}")
self.logger.info(f"lr_scaler={lr_scaler}")
self.logger.info(f"lr_scale_factor={lr_scale_factor}")
torch_optim_ctor.keywords["lr"] = scaled_lr
# create a param group for each parameter
param_group_modifiers = create_collection(param_group_modifiers, param_group_modifier_from_kwargs)
if add_model_specific_param_group_modifiers:
param_group_modifiers = model.get_model_specific_param_group_modifiers() + param_group_modifiers
param_groups = []
self.logger.info(
f"group modifiers exclude_bias_from_wd={exclude_bias_from_wd} exclude_norm_from_wd={exclude_norm_from_wd} "
f"add_model_specific_param_group_modifiers={add_model_specific_param_group_modifiers} "
f"[{' '.join(str(pgm) for pgm in param_group_modifiers)}]"
)
for name, param in model.named_parameters():
properties = {}
# excluding norm and bias params is very common for all models -> support with simple flag
# bias has ndim == 1, so it needs to be checked before
# the bias of norm layers is considered a bias, not a norm parameter
if name.endswith(".bias") and exclude_bias_from_wd:
properties["weight_decay"] = 0.
# timm does it like this...not sure if other parameters can also have ndim <= 1
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/optim/optim_factory.py
elif param.ndim <= 1 and exclude_norm_from_wd:
properties["weight_decay"] = 0.
for param_group_modifier in param_group_modifiers:
for key, value in param_group_modifier.get_properties(model, name, param).items():
if key in properties and key == "lr_scale":
properties[key] *= value
else:
assert key not in properties
properties[key] = value
assert "param" not in properties
assert "name" not in properties
properties["name"] = name
properties["params"] = [param]
param_groups.append(properties)
# check that param group modifiers were successfully applied (e.g. check that param name was found in model)
for param_group_modifier in param_group_modifiers:
assert param_group_modifier.was_applied_successfully(), f"{param_group_modifier} failed"
# merge same groups with same parameters (useful for logging)
merged_groups = []
merged_groups_properties = []
merged_groups_paramnames = []
for param_group in param_groups:
param_name = param_group.pop("name")
properties = {k: v for k, v in param_group.items() if k != "params"}
matching_group_idx = None
for i, merged_group_properties in enumerate(merged_groups_properties):
if properties == merged_group_properties:
matching_group_idx = i
break
if matching_group_idx is None:
merged_groups.append(param_group)
merged_groups_properties.append(properties)
merged_groups_paramnames.append([param_name])
else:
merged_groups[matching_group_idx]["params"] += param_group["params"]
merged_groups_paramnames[matching_group_idx].append(param_name)
# add name to param_groups
for param_group in merged_groups:
names = []
for key, value in param_group.items():
if key == "params":
continue
if isinstance(value, float):
value_str = float_to_scientific_notation(value, max_precision=1, remove_plus=True)
else:
raise NotImplementedError
names.append(f"{key}={value_str}")
if len(names) > 0:
param_group["name"] = "&".join(names)
# log param groups
self.logger.info(f"using {len(merged_groups)} param groups:")
for param_group in merged_groups:
self.logger.info(
" ".join([
f"{key}={value}" for key, value in param_group.items()
if key not in ["params", "name"]
] + [f"len(params)={len(param_group['params'])}"])
)
# torch optimizer organizes parameters by enumerating them (not by name)
# so for loading an arbitrary optim state_dict an association from param_name to param_idx has to be stored
self.param_idx_to_name = Bidict()
idx = 0
for group_paramnames in merged_groups_paramnames:
for param_name in group_paramnames:
self.param_idx_to_name.set_forward(idx, param_name)
idx += 1
# initialize torch optim
self.torch_optim = torch_optim_ctor(merged_groups)
# for grad clipping all parameters of the optimizer are required
self.all_parameters = None
if self.clip_grad_value is not None or self.clip_grad_norm is not None:
self.all_parameters = list(model.parameters())
# scale lr (e.g. layerwise_lr_decay_modifier)
for param_group in self.torch_optim.param_groups:
if "lr_scale" in param_group:
assert "original_lr" not in param_group
param_group["original_lr"] = param_group["lr"]
# lr is float so inplace operation is fine
# this scaling is only relevant for logging and epoch based schedules
# for update based schedule the value is anyway scaled again at the start of the update
param_group["lr"] *= param_group["lr_scale"]
self.logger.info(
f"scaled lr of param_group '{param_group['name']}' "
f"from {float_to_scientific_notation(param_group['original_lr'], max_precision=2)} "
f"to {float_to_scientific_notation(param_group['lr'], max_precision=2)}"
)
# create schedules
self.schedule = object_to_schedule(
schedule,
batch_size=self.update_counter.effective_batch_size if self.update_counter is not None else None,
updates_per_epoch=self.update_counter.updates_per_epoch if self.update_counter is not None else None,
max_value=self.torch_optim.defaults["lr"]
)
self.weight_decay_schedule = object_to_schedule(
weight_decay_schedule,
batch_size=self.update_counter.effective_batch_size if self.update_counter is not None else None,
updates_per_epoch=self.update_counter.updates_per_epoch if self.update_counter is not None else None,
max_value=self.torch_optim.defaults["weight_decay"],
)
# store initial_lr/initial_wd in param_groups
# NOTE: torch optimizer broadcasts all values to all param groups (so every param_group has a weight_decay)
if self.weight_decay_schedule is not None:
for param_group in self.torch_optim.param_groups:
assert "exclude_from_wd" not in param_group
param_group["exclude_from_wd"] = param_group["weight_decay"] == 0.
def _has_param_with_grad(self):
for param_group in self.torch_optim.param_groups:
for p in param_group["params"]:
if p.grad is not None:
return True
return False
def step(self, grad_scaler):
# grad_scaler doesnt support update without gradient (e.g. GAN setting)
# Error: AssertionError: No inf checks were recorded for this optimizer
if isinstance(grad_scaler, GradScaler):
if not self._has_param_with_grad():
return
# NOTE: closure is not supported with GradScaler
if self.clip_grad_value is not None or self.clip_grad_norm is not None:
grad_scaler.unscale_(self.torch_optim)
# clip gradients
if self.clip_grad_value is not None:
torch.nn.utils.clip_grad_value_(self.all_parameters, self.clip_grad_value)
if self.clip_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.all_parameters, self.clip_grad_norm)
# torch optim step with grad scaler
grad_scaler.step(self.torch_optim)
grad_scaler.update()
def schedule_step(self):
if self.schedule is not None:
lr_scale = self.schedule.get_value(
step=self.update_counter.cur_checkpoint.update,
total_steps=self.update_counter.end_checkpoint.update,
)
for param_group in self.torch_optim.param_groups:
if "lr_scale" in param_group:
# lr_scale -> current lr from schedule
# param_group["lr_scale"] -> scale form layer-wise lr decay
param_group["lr"] = param_group["lr_scale"] * lr_scale
else:
param_group["lr"] = lr_scale
if self.weight_decay_schedule is not None:
wd_scale = self.weight_decay_schedule.get_value(
step=self.update_counter.cur_checkpoint.update,
total_steps=self.update_counter.end_checkpoint.update,
)
for param_group in self.torch_optim.param_groups:
if not param_group["exclude_from_wd"]:
param_group["weight_decay"] = wd_scale
def zero_grad(self, set_to_none=True):
# set_to_none is True by default (unlike torch.optim.optimizer)
# because it has better performance (https://www.youtube.com/watch?v=9mS1fIYj1So)
self.torch_optim.zero_grad(set_to_none)
def state_dict(self):
sd = self.torch_optim.state_dict()
sd["param_idx_to_name"] = self.param_idx_to_name.to_forward()
return sd
def load_state_dict(self, state_dict_to_load):
# state_dict doesn't have to have the same param_groups as the current state_dict
# - add new parameters
# - change weight_decay
if "param_idx_to_name" in state_dict_to_load:
# torch optim stores:
# - a list of param_idxs in each param_group
# - a dict from param_idxs to state for the state of the param
# -> match the param_idxs and overwrite the state
loaded_param_idx_to_name = Bidict(forward=state_dict_to_load["param_idx_to_name"])
loaded_states = state_dict_to_load["state"]
cur_state_dict = self.torch_optim.state_dict()
cur_states = cur_state_dict["state"]
cur_param_groups = cur_state_dict["param_groups"]
for cur_param_group in cur_param_groups:
for cur_param_idx in cur_param_group["params"]:
param_name = self.param_idx_to_name.get_forward(cur_param_idx)
loaded_param_idx = loaded_param_idx_to_name.get_backward(param_name)
if loaded_param_idx not in loaded_states:
# if no optim step was done no state exists -> dont load the state
cur_states.pop(loaded_param_idx, None)
else:
# overwrite state with loaded state
cur_states[cur_param_idx] = loaded_states[loaded_param_idx]
state_dict_to_load = dict(state=cur_states, param_groups=cur_param_groups)
self.torch_optim.load_state_dict(state_dict_to_load)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/interleaved_optimizer.py | src/optimizers/interleaved_optimizer.py | import logging
from kappaschedules import object_to_schedule
class InterleavedOptimizer:
"""
selects an optimizer from a set of optimizers per update step according to a schedule
the schedule should return the index of the current optimizer
"""
def __init__(self, model, optim_ctors, schedule, update_counter):
self.logger = logging.getLogger(type(self).__name__)
self.optims = [optim_ctor(model, update_counter=update_counter) for optim_ctor in optim_ctors]
self.update_counter = update_counter
self.schedule = object_to_schedule(
schedule,
batch_size=self.update_counter.effective_batch_size if self.update_counter is not None else None,
updates_per_epoch=self.update_counter.updates_per_epoch if self.update_counter is not None else None,
)
def _get_optim_for_current_step(self):
index = self.schedule.get_value(
step=self.update_counter.cur_checkpoint.update,
total_steps=self.update_counter.end_checkpoint.update,
)
return self.optims[int(index)]
def get_optim_for_previous_step(self):
index = self.schedule.get_value(
step=self.update_counter.cur_checkpoint.update - 1,
total_steps=self.update_counter.end_checkpoint.update,
)
return self.optims[int(index)]
def step(self, grad_scaler):
self._get_optim_for_current_step().step(grad_scaler)
def schedule_step(self):
self._get_optim_for_current_step().schedule_step()
def zero_grad(self, set_to_none=True):
for optim in self.optims:
optim.zero_grad(set_to_none)
def state_dict(self):
return {i: optim.state_dict() for i, optim in enumerate(self.optims)}
def load_state_dict(self, state_dict_to_load):
for i in range(len(state_dict_to_load)):
self.optims[i].load_state_dict(state_dict_to_load[i])
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/__init__.py | src/optimizers/__init__.py | from copy import deepcopy
from functools import partial
from optimizers.interleaved_optimizer import InterleavedOptimizer
from optimizers.optimizer_wrapper import OptimizerWrapper
from utils.factory import get_ctor
def optim_ctor_from_kwargs(kind, **kwargs):
kwargs = deepcopy(kwargs)
if kind == "interleaved_optimizer":
optim_ctors = [optim_ctor_from_kwargs(**optim) for optim in kwargs.pop("optims")]
return partial(InterleavedOptimizer, optim_ctors=optim_ctors, **kwargs)
# extract optimizer wrapper kwargs
wrapped_optim_kwargs = {}
wrapped_optim_kwargs_keys = [
"schedule",
"weight_decay_schedule",
"clip_grad_value",
"clip_grad_norm",
"exclude_bias_from_wd",
"exclude_norm_from_wd",
"param_group_modifiers",
"lr_scaler",
]
for key in wrapped_optim_kwargs_keys:
if key in kwargs:
wrapped_optim_kwargs[key] = kwargs.pop(key)
torch_optim_ctor = get_ctor(
module_names=["torch.optim", f"optimizers.custom.{kind}"],
type_names=[kind],
**kwargs,
)
return partial(_optimizer_wrapper_ctor, torch_optim_ctor=torch_optim_ctor, **wrapped_optim_kwargs)
def _optimizer_wrapper_ctor(model, torch_optim_ctor, **wrapped_optim_kwargs):
return OptimizerWrapper(model=model, torch_optim_ctor=torch_optim_ctor, **wrapped_optim_kwargs)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/custom/lars.py | src/optimizers/custom/lars.py | # https://raw.githubusercontent.com/Lightning-AI/lightning-bolts/master/pl_bolts/optimizers/lars.py
import torch
from torch.optim.optimizer import Optimizer
class LARS(Optimizer):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
trust_coefficient=1e-3,
eps=1e-8,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coefficient=trust_coefficient,
eps=eps,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
# not sure why only nesterov is set here
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# exclude scaling for params with 0 weight decay
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
# lars scaling + weight decay part
if weight_decay != 0:
if p_norm != 0 and g_norm != 0:
lars_lr = group["trust_coefficient"] * p_norm / (g_norm + p_norm * weight_decay + group["eps"])
grad.add_(p, alpha=weight_decay)
grad.mul_(lars_lr)
# sgd part
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(grad).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(grad, alpha=1 - dampening)
if nesterov:
grad = grad.add(buf, alpha=momentum)
else:
grad = buf
p.add_(grad, alpha=-group["lr"])
return loss
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/custom/lion.py | src/optimizers/custom/lion.py | import torch
from torch.optim.optimizer import Optimizer
class Lion(Optimizer):
""" https://raw.githubusercontent.com/lucidrains/lion-pytorch/main/lion_pytorch/lion_pytorch.py """
def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0):
assert lr > 0.
assert all([0. <= beta <= 1. for beta in betas])
defaults = dict(
lr=lr,
betas=betas,
weight_decay=weight_decay
)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda pp: pp.grad is not None, group["params"]):
grad = p.grad
lr = group["lr"]
wd = group["weight_decay"]
beta1, beta2 = group["betas"]
state = self.state[p]
# init state - exponential moving average of gradient values
if len(state) == 0:
state["exp_avg"] = torch.zeros_like(p)
exp_avg = state["exp_avg"]
# stepweight decay
p.data.mul_(1 - lr * wd)
# weight update
update = exp_avg.clone().mul_(beta1).add(grad, alpha=1 - beta1).sign_()
p.add_(update, alpha=-lr)
# decay the momentum running average coefficient
exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2)
return loss
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/custom/__init__.py | src/optimizers/custom/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/lr_scalers/linear_lr_scaler.py | src/optimizers/lr_scalers/linear_lr_scaler.py | class LinearLrScaler:
def __init__(self, divisor=256):
super().__init__()
self.divisor = divisor
def __str__(self):
return f"{type(self).__name__}(divisor={self.divisor})"
def scale_lr(self, base_lr, lr_scale_factor):
return base_lr * lr_scale_factor / self.divisor
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/lr_scalers/sqrt_lr_scaler.py | src/optimizers/lr_scalers/sqrt_lr_scaler.py | import math
class SqrtLrScaler:
def __init__(self, divisor=256):
super().__init__()
self.divisor = divisor
def __str__(self):
return f"{type(self).__name__}(divisor={self.divisor})"
def scale_lr(self, base_lr, lr_scale_factor):
return base_lr * math.sqrt(lr_scale_factor / self.divisor)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/lr_scalers/__init__.py | src/optimizers/lr_scalers/__init__.py | from utils.factory import instantiate
def lr_scaler_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[f"optimizers.lr_scalers.{kind}"],
type_names=[kind],
**kwargs
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/param_group_modifiers/vit_norm_lr_scale_modifier.py | src/optimizers/param_group_modifiers/vit_norm_lr_scale_modifier.py | from .base.param_group_modifier import ParamGroupModifier
class VitNormLrScaleModifier(ParamGroupModifier):
def __init__(self, scale, start_block_index=None):
self.scale = scale
self.start_block_index = start_block_index
def get_properties(self, model, name, param):
assert self.start_block_index is not None
if self.start_block_index < 0:
start_idx = self.start_block_index + len(model.blocks)
else:
start_idx = self.start_block_index
block_indices = list(range(start_idx, len(model.blocks)))
if name.startswith("block") and int(name.split(".")[1]) in block_indices and "norm" in name:
return dict(lr_scale=self.scale)
return {}
def __str__(self):
return f"{type(self).__name__}(scale={self.scale},start_block_index={self.start_block_index})"
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/param_group_modifiers/layerwise_lr_decay_modifier.py | src/optimizers/param_group_modifiers/layerwise_lr_decay_modifier.py | from .base.param_group_modifier import ParamGroupModifier
class LayerwiseLrDecayModifier(ParamGroupModifier):
def __init__(self, decay, skip_layers=None):
self.decay = decay
self.skip_layers = skip_layers
def get_properties(self, model, name, param):
# adapted from BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
# this will split the model into len(blocks) + 2 "layers"
# stem (patch_embed, cls_token, pos_embed) -> blocks -> last norm
# this means that the last block will already be decayed
num_layers = len(model.blocks) + 1
scales = list(self.decay ** (num_layers - i) for i in range(num_layers))
# shift scales if layers are skipped (first layer that is not skipped has the max learning rate)
if self.skip_layers is not None:
scales = scales[self.skip_layers:] + [1.] * self.skip_layers
if name in ["cls_token", "mask_token", "pos_embed"] or name.startswith('patch_embed'):
return dict(lr_scale=scales[0])
elif name.startswith("block"):
layer = int(name.split('.')[1]) + 1
return dict(lr_scale=scales[layer])
elif name.startswith("norm."):
# last norm is not scaled (i.e. original learning rate)
return {}
elif name.startswith("head."):
# head is not scaled (i.e. original learning rate)
return {}
else:
raise NotImplementedError
def __str__(self):
return f"{type(self).__name__}(decay={self.decay})"
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/param_group_modifiers/exclude_from_wd_by_name_modifier.py | src/optimizers/param_group_modifiers/exclude_from_wd_by_name_modifier.py | from .base.param_group_modifier import ParamGroupModifier
class ExcludeFromWdByNameModifier(ParamGroupModifier):
def __init__(self, name):
super().__init__()
self.name = name
self.param_was_found = False
def get_properties(self, model, name, param):
if name == self.name:
self.param_was_found = True
return dict(weight_decay=0.)
return {}
def __str__(self):
return f"{type(self).__name__}(name={self.name})"
def was_applied_successfully(self):
return self.param_was_found
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/param_group_modifiers/__init__.py | src/optimizers/param_group_modifiers/__init__.py | from utils.factory import instantiate
def param_group_modifier_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[f"optimizers.param_group_modifiers.{kind}"],
type_names=[kind],
**kwargs
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/param_group_modifiers/base/param_group_modifier.py | src/optimizers/param_group_modifiers/base/param_group_modifier.py | class ParamGroupModifier:
def get_properties(self, model, name, param):
raise NotImplementedError
def __repr__(self):
return str(self)
def __str__(self):
raise NotImplementedError
@staticmethod
def was_applied_successfully():
return True
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/optimizers/param_group_modifiers/base/__init__.py | src/optimizers/param_group_modifiers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/__init__.py | src/summarizers/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/summary_summarizers/best_metric_summary_summarizer.py | src/summarizers/summary_summarizers/best_metric_summary_summarizer.py | import fnmatch
import numpy as np
from utils.infer_higher_is_better import higher_is_better_from_metric_key
from .base.summary_summarizer_base import SummarySummarizerBase
class BestMetricSummarySummarizer(SummarySummarizerBase):
def __init__(self, pattern, **kwargs):
super().__init__(**kwargs)
self.pattern = pattern
def summarize(self):
# usually summaries are also generated with corresponding checkpoint info -> filter them out
filtered_keys = [
key
for key in self.summary_provider.keys()
if "/update" not in key and "/key" not in key
]
matching_keys = []
# filter out irrelevant keys
for key in filtered_keys:
if "*" in self.pattern or "?" in self.pattern:
# pattern with * or ?
if not fnmatch.fnmatch(key, self.pattern):
continue
else:
# pattern with contains
if self.pattern not in key:
continue
# filter out target metrics "e.g. <target_key>/atbest/<source_key>"
if "/atbest/" in key:
continue
matching_keys.append(key)
assert len(matching_keys) > 0, f"no matching_keys found for pattern '{self.pattern}'"
# get best value
values = [self.summary_provider[key] for key in matching_keys]
higher_is_better = higher_is_better_from_metric_key(matching_keys[0])
assert all(higher_is_better == higher_is_better_from_metric_key(key) for key in matching_keys[1:])
best_value = np.max(values) if higher_is_better else np.min(values)
best_idxs = np.argwhere(values == best_value).squeeze(1)
if len(best_idxs) > 1:
self.logger.info(f"multiple best_idxs {best_idxs}")
best_idx = best_idxs[0]
best_key = matching_keys[best_idx]
self.logger.info(f"pattern={self.pattern} best_key='{best_key}' best_value={best_value}")
self.summary_provider[f"{self.pattern}/best"] = float(best_value)
self.summary_provider[f"{self.pattern}/best/key"] = best_key
# extract source_key from best_key
# TODO
# source_key = best_key.replace("best ", "")
# target_at_source_keys = [key for key in filtered_keys if f" at best {source_key}" in key]
# for target_at_source_key in target_at_source_keys:
# target_at_source_value = self.summary_provider[target_at_source_key]
# target_key = target_at_source_key.split(" ")[0]
# self.logger.info(f"'{target_at_source_key}' of best {source_key}: {target_at_source_value}")
# self.summary_provider[f"{target_key} at best {self.pattern}"] = target_at_source_value
# self.summary_provider[f"{target_key} at_best {self.pattern} key"] = target_at_source_key
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/summary_summarizers/__init__.py | src/summarizers/summary_summarizers/__init__.py | from utils.factory import instantiate
def summary_summarizer_from_kwargs(kind, **kwargs):
return instantiate(module_names=[f"summarizers.summary_summarizers.{kind}"], type_names=[kind], **kwargs)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/summary_summarizers/base/summary_summarizer_base.py | src/summarizers/summary_summarizers/base/summary_summarizer_base.py | import logging
from providers.summary_providers.base.summary_provider_base import SummaryProviderBase
from providers.summary_providers.noop_summary_provider import NoopSummaryProvider
class SummarySummarizerBase:
def __init__(self, summary_provider: SummaryProviderBase):
self.logger = logging.getLogger(type(self).__name__)
self.summary_provider = summary_provider or NoopSummaryProvider()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/summary_summarizers/base/__init__.py | src/summarizers/summary_summarizers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/stage_summarizers/group_best_metric_summarizer.py | src/summarizers/stage_summarizers/group_best_metric_summarizer.py | from .base.stage_summarizer_base import StageSummarizerBase
from .best_metric_summarizer import BestMetricSummarizer
class GroupBestMetricSummarizer(StageSummarizerBase):
def __init__(self, source_group, target_group=None, **kwargs):
super().__init__(**kwargs)
self.source_group = source_group
self.target_group = target_group
self.kwargs = kwargs
def summarize(self):
source_tags = self._get_tags_of_group(self.source_group)
if self.target_group is not None:
target_tags = self._get_tags_of_group(self.target_group)
if len(target_tags) == 0:
self.logger.warning(f"couldn't find any tags of target_group {self.target_group}")
else:
target_tags = []
for i in range(len(source_tags)):
kwargs = dict(source_key=source_tags[i])
if i < len(target_tags):
kwargs["target_key"] = target_tags[i]
bms = BestMetricSummarizer(
**kwargs,
**self.kwargs,
)
bms.summarize()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/stage_summarizers/best_metric_summarizer.py | src/summarizers/stage_summarizers/best_metric_summarizer.py | import numpy as np
from utils.infer_higher_is_better import higher_is_better_from_metric_key
from .base.stage_summarizer_base import StageSummarizerBase
class BestMetricSummarizer(StageSummarizerBase):
"""
looks at the best source_key metric and logs the target_key metric at that global_step
e.g. source_key="accuracy/valid" target_key="accuracy/test" looks at the best validation accuracy and logs
the corresponding test accuracy
target groups can be used to e.g. summarize all kinds of accuracies at once. e.g. with target_key=accuracy it will
log accuracy/train accuracy/valid accuracy/test
"""
def __init__(
self,
source_key,
target_key=None,
target_keys=None,
target_group=None,
target_groups=None,
**kwargs,
):
super().__init__(**kwargs)
self.source_key = source_key
self.source_higher_is_better = higher_is_better_from_metric_key(self.source_key)
# allow setting single or list of target_keys
self.target_keys = target_keys or []
if target_key is not None:
self.target_keys.append(target_key)
# allow setting single or list of target_groups
self.target_groups = target_groups or []
if target_group is not None:
self.target_groups.append(self.target_groups)
def summarize(self):
# log best source metric
source_updates, source_values = zip(*self.all_log_entries[self.source_key].items())
best_source_value = np.max(source_values) if self.source_higher_is_better else np.min(source_values)
best_source_idxs = np.argwhere(source_values == best_source_value).squeeze(1)
best_source_checkpoints = [self._checkpoint_from_update(source_updates[idx]) for idx in best_source_idxs]
if len(best_source_checkpoints) > 1:
self.logger.info(f"multiple best_source_checkpoints {best_source_checkpoints}")
best_source_idx = best_source_idxs[0]
self.logger.info(
f"source_key={self.source_key} target_keys={self.target_keys} target_groups={self.target_groups}"
)
self.logger.info(f"best source metric at checkpoint(s) {best_source_checkpoints}: {best_source_value}")
for target_key in self.target_keys:
self._log_best_target_metric(target_key, source_updates, best_source_idx)
for target_group in self.target_groups:
tags = self._get_tags_of_group(target_group)
for tag in tags:
self._log_best_target_metric(tag, source_updates, best_source_idx)
def _log_best_target_metric(self, target_key, source_updates, best_source_idx):
target_updates, target_values = zip(*self.all_log_entries[target_key].items())
# check if source_checkpoints are equal to target_checkpoints (if different log intervals are used for
# source and target metric the source and target checkpoints won't match)
assert all(
self._checkpoint_from_update(source_update) == self._checkpoint_from_update(target_update)
for source_update, target_update in zip(source_updates, target_updates)
)
target_higher_is_better = higher_is_better_from_metric_key(target_key)
best_target_value = np.max(target_values) if target_higher_is_better else np.min(target_values)
best_target_idxs = np.argwhere(target_values == best_target_value).squeeze(1)
best_target_checkpoints = [self._checkpoint_from_update(target_updates[idx]) for idx in best_target_idxs]
if len(best_target_checkpoints) > 1:
self.logger.info(f"multiple best_target_checkpoints {best_target_checkpoints}")
# log target metric at best source metric
checkpoint = self._checkpoint_from_update(source_updates[best_source_idx])
target_value = target_values[best_source_idx]
self.logger.info(f"{target_key} at checkpoint {checkpoint}: {target_value}")
self.summary_provider[f"{target_key}/atbest/{self.source_key}"] = float(best_target_value)
self.summary_provider[f"{target_key}/atbest/{self.source_key}/update"] = checkpoint.update
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/stage_summarizers/__init__.py | src/summarizers/stage_summarizers/__init__.py | from utils.factory import instantiate
def stage_summarizer_from_kwargs(kind, **kwargs):
return instantiate(module_names=[f"summarizers.stage_summarizers.{kind}"], type_names=[kind], **kwargs)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/stage_summarizers/base/stage_summarizer_base.py | src/summarizers/stage_summarizers/base/stage_summarizer_base.py | import logging
import yaml
from providers.path_provider import PathProvider
from providers.summary_providers.base.summary_provider_base import SummaryProviderBase
from providers.summary_providers.noop_summary_provider import NoopSummaryProvider
from utils.checkpoint import Checkpoint
class StageSummarizerBase:
__all_log_entries = None
def __init__(self, path_provider: PathProvider, summary_provider: SummaryProviderBase):
self.logger = logging.getLogger(type(self).__name__)
self.path_provider = path_provider
self.summary_provider = summary_provider or NoopSummaryProvider()
@property
def all_log_entries(self):
if StageSummarizerBase.__all_log_entries is None:
with open(self.path_provider.primitive_output_path / "entries.yaml") as f:
StageSummarizerBase.__all_log_entries = yaml.safe_load(f)
return StageSummarizerBase.__all_log_entries
def summarize(self):
raise NotImplementedError
def _checkpoint_from_update(self, update):
epoch = self.all_log_entries["epoch"][update]
sample = self.all_log_entries["sample"][update]
if isinstance(epoch, float) and epoch.is_integer():
epoch = int(epoch)
return Checkpoint(epoch=epoch, update=update, sample=sample)
def _get_tags_of_group(self, group):
return [key for key in self.all_log_entries.keys() if key.startswith(group)]
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/summarizers/stage_summarizers/base/__init__.py | src/summarizers/stage_summarizers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/losses/elementwise_loss.py | src/losses/elementwise_loss.py | import torch
import torch.nn as nn
from losses import basic_loss_fn_from_kwargs
from utils.factory import create
from utils.loss_utils import apply_reduction
from utils.vit_util import patchify_as_1d
from .basic.mse_loss import MseLoss
class ElementwiseLoss(nn.Module):
def __init__(self, loss_function):
super().__init__()
self.loss_function = create(loss_function, basic_loss_fn_from_kwargs)
def forward(self, prediction, target, mask=None, reduction="mean"):
assert prediction.shape == target.shape
# unreduced loss
loss = self.loss_function(prediction, target, reduction="none")
# apply mask
if mask is not None:
assert mask.dtype == torch.bool and loss.shape == mask.shape
loss = loss[mask]
# apply reduction
loss = apply_reduction(loss, reduction=reduction)
return loss
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/losses/__init__.py | src/losses/__init__.py | from utils.factory import instantiate
def loss_fn_from_kwargs(kind, update_counter=None, **kwargs):
return instantiate(
module_names=[f"losses.{kind}", f"losses.basic.{kind}", "torch.nn"],
type_names=[kind.split(".")[-1]],
# pass update_counter to SchedulableLoss but not to e.g. torch.nn.MseLoss
optional_kwargs=dict(update_counter=update_counter),
**kwargs,
)
def basic_loss_fn_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[f"losses.basic.{kind}"],
type_names=[kind.split(".")[-1]],
**kwargs,
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/losses/basic/l1_loss.py | src/losses/basic/l1_loss.py | import torch.nn as nn
import torch.nn.functional as F
class L1Loss(nn.Module):
@staticmethod
def forward(pred, target, reduction="mean"):
return F.l1_loss(pred, target, reduction=reduction)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/losses/basic/mse_loss.py | src/losses/basic/mse_loss.py | import torch.nn as nn
import torch.nn.functional as F
class MseLoss(nn.Module):
@staticmethod
def forward(pred, target, reduction="mean"):
return F.mse_loss(pred, target, reduction=reduction)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/losses/basic/__init__.py | src/losses/basic/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/freezers/full_freezer.py | src/freezers/full_freezer.py | from .base.freezer_base import FreezerBase
class FullFreezer(FreezerBase):
def __str__(self):
return type(self).__name__
def _update_state(self, model, requires_grad):
model.eval()
for param in model.parameters():
param.requires_grad = requires_grad
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/freezers/__init__.py | src/freezers/__init__.py | from utils.factory import instantiate
def freezer_from_kwargs(kind, **kwargs):
return instantiate(
module_names=[f"freezers.{kind}"],
type_names=[kind.split(".")[-1]],
**kwargs
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/freezers/base/freezer_base.py | src/freezers/base/freezer_base.py | import logging
from kappaschedules import object_to_schedule, PeriodicBoolSchedule
from utils.update_counter import UpdateCounter
class FreezerBase:
def __init__(self, update_counter: UpdateCounter = None, schedule=None):
self.logger = logging.getLogger(type(self).__name__)
self.update_counter = update_counter
self.schedule = object_to_schedule(
schedule,
batch_size=self.update_counter.effective_batch_size if self.update_counter is not None else None,
updates_per_epoch=self.update_counter.updates_per_epoch if self.update_counter is not None else None,
)
# remember current state for logging/callbacks when schedules are used
# this should not be used in inherited classes in order to make them state less
self.requires_grad = None
# check if children overwrite the correct method
assert type(self).before_accumulation_step == FreezerBase.before_accumulation_step
def __repr__(self):
return str(self)
def __str__(self):
raise NotImplementedError
def _update_state(self, model, requires_grad):
raise NotImplementedError
def after_weight_init(self, model):
if self.schedule is not None:
# state is updated before each accumulation step
return
self.logger.info(f"update state of {model.name}.{self} to requires_grad=False/is_frozen=True")
self._update_state(model, requires_grad=False)
def before_accumulation_step(self, model):
if self.schedule is None:
# state was set with after_weight_init and never changes
return
value = self.schedule.get_value(
step=self.update_counter.cur_checkpoint.update,
total_steps=self.update_counter.end_checkpoint.update,
)
if value == 1:
if self.requires_grad or self.requires_grad is None:
if not isinstance(self.schedule, PeriodicBoolSchedule):
self.logger.info(f"update state of {model.name}.{self} to requires_grad=False/is_frozen=True")
self.requires_grad = False
elif value == 0:
if not self.requires_grad or self.requires_grad is None:
if not isinstance(self.schedule, PeriodicBoolSchedule):
self.logger.info(f"update state of {model.name}.{self} to requires_grad=True/is_frozen=False")
self.requires_grad = True
else:
raise NotImplementedError
self._update_state(model, requires_grad=self.requires_grad)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/freezers/base/__init__.py | src/freezers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/dummy_dataset.py | src/datasets/dummy_dataset.py | import torch
from torchvision.transforms.functional import to_pil_image
from .base.dataset_base import DatasetBase
class DummyDataset(DatasetBase):
def __init__(
self,
x_shape,
size=None,
n_classes=10,
n_abspos=10,
is_multilabel=False,
to_image=False,
semi_percent=None,
num_timesteps=10,
force_timestep_zero=False,
mode="on-the-fly",
**kwargs,
):
super().__init__(**kwargs)
self.size = size
self.x_shape = x_shape
self._n_classes = n_classes
self.n_abspos = n_abspos
self._is_multilabel = is_multilabel
self.to_image = to_image
self.semi_percent = semi_percent
self.num_timesteps = num_timesteps
self.force_timestep_zero = force_timestep_zero
self.mode = mode
assert semi_percent is None or 0. <= semi_percent <= 1.
assert mode in ["on-the-fly", "preloaded"]
if self.mode == "preloaded":
self.x = torch.randn(len(self), *self.x_shape, generator=torch.Generator().manual_seed(0))
self.y = torch.randint(
low=0,
high=max(2, self.getdim_class()),
size=(1,),
generator=torch.Generator().manual_seed(0),
).tolist()
else:
self.x = None
self.y = None
def __len__(self):
# return a large value divisible by 2 to avoid specifying a size when the dataset is only used
# for the eval_trainer to know the input shapes
return self.size or 131072
def getitem_x(self, idx, ctx=None):
if self.x is not None:
return self.x[idx]
x = torch.randn(*self.x_shape, generator=torch.Generator().manual_seed(int(idx)))
if self.to_image:
x = to_pil_image(x)
return x
# noinspection PyUnusedLocal
def getitem_timestep(self, idx, ctx=None):
if self.force_timestep_zero:
return 0
max_timestep = self.num_timesteps - self.x_shape[0]
timestep = torch.randint(max_timestep, size=(1,), generator=torch.Generator().manual_seed(int(idx)))
return timestep
def getshape_timestep(self):
return self.num_timesteps,
def getshape_class(self):
return (self._n_classes,) if self._n_classes > 2 else (1,)
def getitem_class(self, idx, ctx=None):
if self.semi_percent is not None and (idx / len(self)) < self.semi_percent:
return -1
return self.getitem_class_all(idx, ctx=ctx)
# noinspection PyUnusedLocal
def getitem_class_all(self, idx, ctx=None):
if self.y is not None:
return self.y[idx]
return torch.randint(
low=0,
high=max(2, self.getdim_class()),
size=(1,),
generator=torch.Generator().manual_seed(int(idx)),
).item()
def getall_class(self):
return [self.getitem_class(i) for i in range(len(self))]
def getshape_abspos(self):
return self.n_abspos,
# noinspection PyUnusedLocal
def getitem_abspos(self, idx, ctx=None):
return torch.randint(
low=0,
high=self.n_abspos,
size=(1,),
generator=torch.Generator().manual_seed(idx),
).item()
# noinspection PyUnusedLocal
def getitem_semseg(self, idx, ctx=None):
assert len(self.x_shape) == 3
return torch.randint(0, 10, size=self.x_shape[1:], generator=torch.Generator().manual_seed(int(idx)))
@staticmethod
def getshape_semseg():
return 10,
@property
def is_multilabel(self):
return self._is_multilabel
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/lagrangian_dataset.py | src/datasets/lagrangian_dataset.py | import os
import json
import h5py
import wget
import zipfile
import torch
from torch_geometric.data import Data
from torch_geometric.nn.pool import radius_graph, radius
from torch_geometric.transforms import KNNGraph
from .base.dataset_base import DatasetBase
from kappadata.copying.image_folder import copy_imagefolder_from_global_to_local
from distributed.config import barrier, is_data_rank0
import einops
URLS = {
"tgv2d": "https://zenodo.org/records/10021926/files/2D_TGV_2500_10kevery100.zip",
"rpf2d": "https://zenodo.org/records/10021926/files/2D_RPF_3200_20kevery100.zip",
"ldc2d": "https://zenodo.org/records/10021926/files/2D_LDC_2708_10kevery100.zip",
"dam2d": "https://zenodo.org/records/10021926/files/2D_DAM_5740_20kevery100.zip",
"tgv3d": "https://zenodo.org/records/10021926/files/3D_TGV_8000_10kevery100.zip",
"rpf3d": "https://zenodo.org/records/10021926/files/3D_RPF_8000_10kevery100.zip",
"ldc3d": "https://zenodo.org/records/10021926/files/3D_LDC_8160_10kevery100.zip",
}
class LagrangianDataset(DatasetBase):
def __init__(
self,
name,
n_input_timesteps=6,
n_pushforward_timesteps=0,
graph_mode='radius_graph',
knn_graph_k=1,
radius_graph_r=0.05,
radius_graph_max_num_neighbors=int(1e10),
split="train",
test_mode='parts_traj',
n_supernodes=None,
num_points_range=None,
global_root=None,
local_root=None,
seed=None,
pos_scale=200,
**kwargs,
):
super().__init__(**kwargs)
assert name in URLS.keys(), f"Dataset {name} not available."
assert split in ["train", "valid", "test"], f"Split {split} not available."
assert n_input_timesteps > 1, f"num_inputs_timesteps must be greater than 1 to calculate input velocities."
assert graph_mode in ['knn', 'radius_graph', 'radius_graph_with_supernodes'], f"graph_mode {graph_mode} not available."
assert test_mode in ['full_traj', 'parts_traj']
self.n_input_timesteps = n_input_timesteps
self.n_pushforward_timesteps = n_pushforward_timesteps
self.knn_graph_k = knn_graph_k
self.radius_graph_r = radius_graph_r
self.radius_graph_max_num_neighbors = radius_graph_max_num_neighbors
self.graph_mode = graph_mode
self.split = split
self.num_points_range = num_points_range
self.test_mode = test_mode
self.seed = seed
# TODO: Rewrite this for global and local
ds_name = os.path.splitext(URLS[name].split('/')[-1])[0]
global_root, local_root = self._get_roots(global_root, local_root, "lagrangian_dataset")
# TODO: Implement local dataloading
# load data from global_root
self.source_root = global_root / ds_name
# Check if dataset needs to be downloaded
if not os.path.isdir(self.source_root):
self.logger.info(f"downloading '{ds_name}'")
self.download(name, global_root)
self.logger.info(f"data_source (global): '{self.source_root}'")
assert self.source_root.exists(), f"'{self.source_root.as_posix()}' doesn't exist"
self.data = self.load_dataset(self.source_root, split)
self.metadata = self.load_metadata(self.source_root)
self.traj_keys = list(self.data.keys())
self.n_traj = len(self.traj_keys)
# Get number of particles
self.n_particles = self.metadata['num_particles_max']
if n_supernodes:
self.n_supernodes = n_supernodes
else:
self.n_supernodes = self.n_particles
# Normalization stats
self.vel_mean = torch.tensor(self.metadata['vel_mean'])
self.vel_std = torch.tensor(self.metadata['vel_std'])
self.acc_mean = torch.tensor(self.metadata['acc_mean'])
self.acc_std = torch.tensor(self.metadata['acc_std'])
# Check for PBC
if any(self.metadata['periodic_boundary_conditions']):
bounds = torch.tensor(self.metadata['bounds'])
self.box = bounds[:, 1] - bounds[:, 0]
# Scaling for positional embedding
# Positional embedding function is the same as for the ViT
# The range is from 0-197 -> we use as a max 200
self.pos_offset = bounds[:,0]
self.pos_scale = pos_scale / self.box
else:
assert NotImplementedError
if self.split == "train":
self.n_seq = self.metadata['sequence_length_train']
# num_input_timesteps is needed to train
# one additional position is needed as target (in the form of velocity or acceleration)
# num_pushforward_timesteps is the difference of timesteps between the input sequence and the output sequence
# Number of samples per trajectory is therefore:
self.n_per_traj = self.n_seq - self.n_input_timesteps - self.n_pushforward_timesteps
self.getter = self.get_window
else:
self.n_seq = self.metadata['sequence_length_test']
if self.test_mode == 'full_traj':
self.getter = self.get_full_trajectory
self.n_per_traj = 1
else:
self.n_sub = n_input_timesteps + n_pushforward_timesteps
self.n_per_traj = self.n_seq // self.n_sub
self.getter = self.get_trajectory
def load_dataset(self, path, split):
# Load dataset
data = h5py.File(os.path.join(path, split + '.h5'))
return data
def load_metadata(self, path):
# Load metadata
with open(os.path.join(path, "metadata.json"), "r") as f:
metadata = json.loads(f.read())
return metadata
def download(self, name, path):
url = URLS[name]
filename = os.path.split(url)[-1]
filepath = wget.download(url, out=os.path.join(path, filename))
# unzip the dataset
with zipfile.ZipFile(filepath, 'r') as zip_ref:
zip_ref.extractall(path)
# remove the zip file
os.remove(filepath)
def get_window(self, idx: int, ctx=None, downsample=False):
assert ctx is not None
if "window" in ctx:
if downsample and "perm" in ctx:
perm = ctx["perm"]
return ctx["window"][0][:,perm,:], ctx["window"][1][perm]
else:
return ctx["window"]
# Trajectory index
i_traj = idx // self.n_per_traj
traj = self.data[self.traj_keys[i_traj]]
# Index where to start in traj
start_idx = idx % self.n_per_traj
end_idx = start_idx+self.n_input_timesteps+self.n_pushforward_timesteps+1
ctx['time_idx'] = torch.arange(start_idx, end_idx)
ctx['traj_idx'] = i_traj
positions = traj['position'][ctx['time_idx']]
particle_types = traj['particle_type']
positions = torch.tensor(positions)
particle_types = torch.tensor(particle_types)
# Subsampling
if self.num_points_range:
if self.num_points_range[0] == self.num_points_range[1]:
# fixed num_points_range
end = self.num_points_range[1]
else:
lb, ub = self.num_points_range
num_points_range = torch.rand(size=(1,), generator=None).item() * (ub - lb) + lb
end = int(num_points_range)
# uniform sampling
perm = torch.randperm(self.n_particles, generator=None)[:end]
ctx["perm"] = perm
# window has the full data without subsampling in,
# so in one sampling we can access both the downsampled and the full version
ctx["window"] = positions, particle_types
# Save the maximum number of particles to the ctx
ctx['max_particles'] = self.n_particles
if downsample and "perm" in ctx:
return ctx["window"][0][:,perm,:], ctx["window"][1][perm]
else:
return ctx["window"]
def get_trajectory(self, idx: int, ctx=None, downsample=False):
# Trajectory index
i_traj = idx // self.n_per_traj
traj = self.data[self.traj_keys[i_traj]]
# Index where to start in traj
start_idx = (idx % self.n_per_traj) * self.n_sub
end_idx = start_idx + self.n_sub
ctx['time_idx'] = torch.arange(start_idx, end_idx)
ctx['traj_idx'] = i_traj
positions = traj['position'][ctx['time_idx']]
particle_types = traj['particle_type']
return torch.tensor(positions), torch.tensor(particle_types)
def get_full_trajectory(self, idx: int, ctx=None, downsample=False):
# Trajectory index
i_traj = idx
traj = self.data[self.traj_keys[i_traj]]
ctx['time_idx'] = torch.arange(0, len(traj['position']))
ctx['traj_idx'] = i_traj
positions = traj['position'][ctx['time_idx']]
particle_types = traj['particle_type']
return torch.tensor(positions), torch.tensor(particle_types)
def get_velocities(self, positions):
velocities = positions[1:,:,:] - positions[:-1,:,:]
if self.box is not None:
# Calculation of PBC is done like in jax_md.space.periodic
velocities = (velocities + self.box * 0.5) % self.box - 0.5 * self.box
# Normalization
velocities = self.normalize_vel(velocities)
return velocities
def get_accelerations(self, positions):
next_velocities = positions[2:,:,:] - positions[1:-1,:,:]
current_velocities = positions[1:-1,:,:] - positions[0:-2,:,:]
if self.box is not None:
# Calculation of PBC is done like in jax_md.space.periodic
next_velocities = (next_velocities + self.box * 0.5) % self.box - 0.5 * self.box
current_velocities = (current_velocities + self.box * 0.5) % self.box - 0.5 * self.box
accelerations = next_velocities - current_velocities
# Normalization
accelerations = self.normalize_acc(accelerations)
return accelerations
def __len__(self):
return self.n_traj * self.n_per_traj
def __getitem__(self, idx):
if self.split == "train":
positions, particle_types = self.get_window(idx)
else:
positions, particle_types = self.get_trajectory(idx)
return positions, particle_types
def scale_pos(self, pos):
pos = pos - self.pos_offset.to(pos.device)
pos = pos * self.pos_scale.to(pos.device)
return pos
def unscale_pos(self, pos):
pos = pos / self.pos_scale.to(pos.device)
pos = pos + self.pos_offset.to(pos.device)
return pos
def normalize_vel(self, vel):
vel = vel - self.vel_mean.to(vel.device)
vel = vel / self.vel_std.to(vel.device)
return vel
def unnormalize_vel(self, vel):
vel = vel * self.vel_std.to(vel.device)
vel = vel + self.vel_mean.to(vel.device)
return vel
def normalize_acc(self, acc):
acc = acc - self.acc_mean.to(acc.device)
acc = acc / self.acc_std.to(acc.device)
return acc
def unnormalize_acc(self, acc):
acc = acc * self.acc_std.to(acc.device)
acc = acc + self.acc_mean.to(acc.device)
return acc
def _get_generator(self, idx):
if self.split == "valid" or self.split == "test":
assert self.seed is not None
if self.seed is not None:
return torch.Generator().manual_seed(int(idx) + self.seed)
return None
# Kappadata getters
def getitem_timestep(self, idx, ctx=None):
if self.split == 'test' or self.split == 'valid':
if self.test_mode == 'full_traj':
return self.n_input_timesteps - 1
else:
return (idx % self.n_per_traj) * self.n_sub + self.n_input_timesteps - 1
return idx % self.n_per_traj + self.n_input_timesteps - 1
def getshape_timestep(self):
return max(self.metadata['sequence_length_train'], self.metadata['sequence_length_test']),
def getitem_curr_pos(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=True)
input_positions = positions[:self.n_input_timesteps,:,:]
current_input_position = input_positions[-1,:,:]
# Scale position so it fits for the positional embedding
current_input_position = self.scale_pos(current_input_position)
return current_input_position
def getitem_curr_pos_full(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
input_positions = positions[:self.n_input_timesteps,:,:]
current_input_position = input_positions[-1,:,:]
# Scale position so it fits for the positional embedding
current_input_position = self.scale_pos(current_input_position)
return current_input_position
def getitem_x(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=True)
# Calculate velocities
input_features = self.get_velocities(positions[:self.n_input_timesteps,:,:])
# Reshape for mesh_collator (num_input_timesteps, num_channels, num_points)
input_features = input_features.permute(0,2,1)
return input_features
def getitem_edge_index(self, idx, ctx=None, downsample=True):
positions, _ = self.getter(idx, ctx, downsample=True)
input_positions = positions[:self.n_input_timesteps,:,:]
current_input_position = input_positions[-1,:,:]
if self.graph_mode == 'knn':
knn = KNNGraph(k=self.knn_graph_k, loop=True, force_undirected=True)
edge_index = knn(Data(pos=current_input_position)).edge_index.T
elif self.graph_mode == 'radius_graph':
edge_index = radius_graph(x=current_input_position,
r=self.radius_graph_r,
max_num_neighbors=self.radius_graph_max_num_neighbors,
loop=True).T
elif self.graph_mode == 'radius_graph_with_supernodes':
# select supernodes
generator = self._get_generator(idx)
perm_supernodes = torch.randperm(current_input_position.shape[0], generator=generator)[:self.n_supernodes]
supernodes_pos = current_input_position[perm_supernodes]
# create edges: this can include self-loop or not depending on how many neighbors are found.
# if too many neighbors are found, neighbors are selected randomly which can discard the self-loop
edge_index = radius(
x=current_input_position,
y=supernodes_pos,
r=self.radius_graph_r,
max_num_neighbors=self.radius_graph_max_num_neighbors,
)
# correct supernode index
edge_index[0] = perm_supernodes[edge_index[0]]
edge_index = edge_index.T
return edge_index
def getitem_edge_index_target(self, idx, ctx=None, downsample=True):
positions, _ = self.getter(idx, ctx, downsample=True)
target_position = positions[-1,:,:]
if self.graph_mode == 'knn':
knn = KNNGraph(k=self.knn_graph_k, loop=True, force_undirected=True)
edge_index = knn(Data(pos=target_position)).edge_index.T
elif self.graph_mode == 'radius_graph':
edge_index = radius_graph(x=target_position,
r=self.radius_graph_r,
max_num_neighbors=self.radius_graph_max_num_neighbors,
loop=True).T
elif self.graph_mode == 'radius_graph_with_supernodes':
# select supernodes
generator = self._get_generator(idx)
perm_supernodes = torch.randperm(target_position.shape[0], generator=generator)[:self.n_supernodes]
supernodes_pos = target_position[perm_supernodes]
# create edges: this can include self-loop or not depending on how many neighbors are found.
# if too many neighbors are found, neighbors are selected randomly which can discard the self-loop
edge_index = radius(
x=target_position,
y=supernodes_pos,
r=self.radius_graph_r,
max_num_neighbors=self.radius_graph_max_num_neighbors,
)
# correct supernode index
edge_index[0] = perm_supernodes[edge_index[0]]
edge_index = edge_index.T
return edge_index
# Only used in GNS
def getitem_edge_features(self, idx, ctx=None, downsample=True):
edge_index = self.getitem_edge_index_target(idx, ctx, downsample=downsample)
positions, _ = self.getter(idx, ctx, downsample=True)
target_position = positions[-1,:,:]
relative_displacement = target_position[edge_index[:,0]] - target_position[edge_index[:,1]]
distance = ((target_position[edge_index[:,0]] - target_position[edge_index[:,1]])).norm(dim=-1)
return torch.concat([relative_displacement, distance.unsqueeze(dim=-1)], dim=1)
def getitem_target_vel(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
target_features = self.get_velocities(positions[-2:,:,:]).squeeze()
return target_features
def getitem_perm(self, idx, ctx=None):
if ctx and 'perm' in ctx:
return ctx['perm'], self.n_particles
else:
return torch.arange(self.n_particles), self.n_particles
def getitem_target_vel_large_t(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
target_features = self.get_velocities(positions[-3:,:,:]).squeeze()
target_features = einops.rearrange(
target_features,
"num_input_timesteps n_particles dim -> n_particles (num_input_timesteps dim)"
)
return target_features
def getitem_target_acc(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
target_features = self.get_accelerations(positions[-3:,:,:]).squeeze()
return target_features
def getitem_target_pos(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
if self.split == 'test' or self.split == 'valid':
current_output_position = positions[self.n_input_timesteps:,:,:]
else:
current_output_position = positions[-1,:,:]
current_output_position = self.scale_pos(current_output_position)
return current_output_position
def getitem_target_pos_encode(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=True)
if self.split == 'test' or self.split == 'valid':
current_output_position = positions[self.n_input_timesteps:,:,:]
else:
current_output_position = positions[-1,:,:]
current_output_position = self.scale_pos(current_output_position)
return current_output_position
def getitem_last_but_one_pos(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
current_output_position = positions[-2,:,:]
current_output_position = self.scale_pos(current_output_position)
return current_output_position
def getitem_particle_type(self, idx, ctx=None):
_, particle_type = self.getter(idx, ctx)
return particle_type
def getitem_prev_acc(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
prev_acc = self.get_accelerations(positions[-4:,:,:]).squeeze()
return prev_acc[0,:,:]
def getitem_prev_pos(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
input_positions = positions[:self.n_input_timesteps,:,:]
current_input_position = input_positions[-2,:,:]
# Scale position so it fits for the positional embedding
current_input_position = self.scale_pos(current_input_position)
return current_input_position
# Methods for rollout with large delta T
def getitem_all_pos(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
positions = self.scale_pos(positions)
return positions
def getitem_all_vel(self, idx, ctx=None):
positions, _ = self.getter(idx, ctx, downsample=False)
velocities = self.get_velocities(positions)
return velocities
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/__init__.py | src/datasets/__init__.py | import logging
from utils.factory import instantiate
def dataset_from_kwargs(
kind,
dataset_config_provider,
dataset_wrappers=None,
sample_wrappers=None,
**kwargs,
):
dataset = instantiate(
module_names=[f"datasets.{kind}", f"datasets.wrappers.{kind}"],
type_names=[kind],
dataset_config_provider=dataset_config_provider,
**kwargs,
)
if dataset_wrappers is not None:
assert isinstance(dataset_wrappers, list)
for dataset_wrapper_kwargs in dataset_wrappers:
dataset_wrapper_kind = dataset_wrapper_kwargs.pop("kind")
logging.info(f"instantiating dataset_wrapper {dataset_wrapper_kind}")
dataset = instantiate(
module_names=[
f"datasets.wrappers.dataset_wrappers.{dataset_wrapper_kind}",
f"kappadata.wrappers.dataset_wrappers.{dataset_wrapper_kind}"
],
type_names=[dataset_wrapper_kind],
dataset=dataset,
**dataset_wrapper_kwargs,
)
if sample_wrappers is not None:
assert isinstance(sample_wrappers, list)
for sample_wrapper_kwargs in sample_wrappers:
sample_wrapper_kind = sample_wrapper_kwargs.pop("kind")
logging.info(f"instantiating sample_wrapper {sample_wrapper_kind}")
dataset = instantiate(
module_names=[
f"datasets.sample_wrappers.{sample_wrapper_kind}",
f"kappadata.common.wrappers.sample_wrappers.{sample_wrapper_kind}",
f"kappadata.wrappers.sample_wrappers.{sample_wrapper_kind}",
],
type_names=[sample_wrapper_kind],
dataset=dataset,
**sample_wrapper_kwargs,
)
return dataset
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/cfd_dataset.py | src/datasets/cfd_dataset.py | import os
import einops
import numpy as np
import scipy
import torch
from kappadata.copying.image_folder import copy_imagefolder_from_global_to_local
from kappautils.param_checking import to_2tuple
from torch_geometric.nn.pool import radius, radius_graph
from distributed.config import barrier, is_data_rank0
from utils.num_worker_heuristic import get_fair_cpu_count
from .base.dataset_base import DatasetBase
class CfdDataset(DatasetBase):
def __init__(
self,
version,
num_input_timesteps,
radius_graph_r=None,
radius_graph_max_num_neighbors=None,
num_input_points=None,
num_input_points_ratio=None,
num_input_points_mode="uniform",
num_supernodes=None,
supernode_edge_mode="mesh_to_supernode",
num_query_points=None,
num_query_points_mode="input",
couple_query_with_input=False,
split="train",
standardize_query_pos=False,
global_root=None,
local_root=None,
grid_resolution=None,
max_num_sequences=None,
max_num_timesteps=None,
norm="mean0std1",
clamp=None,
clamp_mode="hard",
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.version = version
self.split = split
self.max_num_sequences = max_num_sequences
self.max_num_timesteps = max_num_timesteps
# radius graph
self.radius_graph_r = radius_graph_r
self.radius_graph_max_num_neighbors = radius_graph_max_num_neighbors or int(1e10)
if self.radius_graph_max_num_neighbors == float("inf"):
self.radius_graph_max_num_neighbors = int(1e10)
# query
self.num_query_points = num_query_points
self.num_query_points_mode = num_query_points_mode
self.couple_query_with_input = couple_query_with_input
if couple_query_with_input:
assert self.num_query_points is None, "couple_query_inputs requires 'num_query_points is None'"
# num input points
self.num_input_points = to_2tuple(num_input_points)
self.num_input_points_ratio = to_2tuple(num_input_points_ratio)
self.num_input_points_mode = num_input_points_mode
self.num_supernodes = num_supernodes
self.supernode_edge_mode = supernode_edge_mode
assert not (self.num_input_points is not None and self.num_input_points_ratio is not None)
# grid
assert grid_resolution is None or len(grid_resolution) == 2
self.grid_resolution = grid_resolution
# input timesteps
assert 0 < num_input_timesteps
self.num_input_timesteps = num_input_timesteps
# standardize query pos for interpolated (for torch.nn.functional.grid_sample)
self.standardize_query_pos = standardize_query_pos
self.seed = seed
self.norm = norm
self.clamp = clamp
self.clamp_mode = clamp_mode
self.num_input_points_cache = []
if norm == "none":
self.mean = torch.tensor([0., 0., 0.])
self.std = torch.tensor([1., 1., 1.])
elif version == "v1-1sim":
self.mean = torch.tensor([0.029124850407242775, 0.00255209649913013, 0.0010001148330047727])
self.std = torch.tensor([0.026886435225605965, 0.01963668502867222, 0.001666962169110775])
elif version == "v1-2sims":
# NOTE: copied from v1-1sim
self.mean = torch.tensor([0.029124850407242775, 0.00255209649913013, 0.0010001148330047727])
self.std = torch.tensor([0.026886435225605965, 0.01963668502867222, 0.001666962169110775])
elif version == "v1-1000sims":
self.mean = torch.tensor([0.036486752331256866, 2.509498517611064e-05, 0.000451924919616431])
self.std = torch.tensor([0.026924047619104385, 0.02058381214737892, 0.002078353427350521])
elif version == "v1-10000sims":
# crashed cases: case_6709 case_3580 case_3577 case_3578
# incomplete cases: case_3581
# no mesh data: case_3579
self.mean = torch.tensor([0.0152587890625, -1.7881393432617188e-06, 0.0003612041473388672])
self.std = torch.tensor([0.0233612060546875, 0.0184173583984375, 0.0019378662109375])
elif version == "v1-686sims-1object":
self.mean = torch.tensor([0.03460693359375, -3.236532211303711e-05, 7.647275924682617e-05])
self.std = torch.tensor([0.01055145263671875, 0.00829315185546875, 0.0004229545593261719])
elif version == "v1-1900sims":
self.mean = torch.tensor([0.03460693359375, -1.806020736694336e-05, 0.00010699033737182617])
self.std = torch.tensor([0.01363372802734375, 0.01102447509765625, 0.0006461143493652344])
elif version == "v2-2500sims":
if norm == "mean0std1q25":
# cfddataset_norm.py --q 0.25 --root /local00/bioinf/mesh_dataset/v2-2500sims --exclude_last 500
self.mean = torch.tensor([0.03450389206409454, -5.949020305706654e-06, 0.00010136327182408422])
self.std = torch.tensor([0.0031622101087123156, 0.0018765029963105917, 0.0001263884623767808])
else:
raise NotImplementedError
elif version == "v2-5000sims":
if norm == "mean0std1":
self.mean = torch.tensor([0.0258941650390625, -1.823902130126953e-05, 0.00012934207916259766])
self.std = torch.tensor([0.01482391357421875, 0.01200103759765625, 0.0007719993591308594])
elif norm == "mean0std1q05":
# cfddataset_norm.py with --q 0.05
self.mean = torch.tensor([0.035219039767980576, -2.1968364308122545e-05, 0.0001966722047654912])
self.std = torch.tensor([0.010309861041605473, 0.007318499963730574, 0.0005381687660701573])
elif norm == "mean0std1q1":
# cfddataset_norm.py with --q 0.1
self.mean = torch.tensor([0.036569397896528244, -2.364995816606097e-05, 0.00019191036699339747])
self.std = torch.tensor([0.00839781854301691, 0.005956545472145081, 0.0004608448361977935])
elif norm == "mean0std1q25":
# cfddataset_norm.py with --q 0.25
self.mean = torch.tensor([0.036188144236803055, -2.3106376829673536e-05, 0.0001511715818196535])
self.std = torch.tensor([0.0047589014284312725, 0.0034182844683527946, 0.00027269049314782023])
else:
raise NotImplementedError
elif version == "v2-6000sims":
if norm == "mean0std1q25":
# python cfddataset_norm.py --root .../v2-6000sims --q 0.25 --exclude_last 1000
self.mean = torch.tensor([0.026319274678826332, -1.2412071725975693e-07, 5.59896943741478e-05])
self.std = torch.tensor([0.0031868498772382736, 0.0021304511465132236, 0.000102771315141581])
else:
raise NotImplementedError
elif version == "v3-10000sims":
# crashed cases: case_1589 case_2188 case_2679 case_3021 case_5378 case_7508 case_7644 case_8035 case_8757
if norm == "mean0std1q25":
#cfddataset_norm.py --root .../v3-10000sims --q 0.25 --exclude_last 2000
self.mean = torch.tensor([0.03648518770933151, 1.927249059008318e-06, 0.000112384237581864])
self.std = torch.tensor([0.005249467678368092, 0.003499444341287017, 0.0002817418717313558])
else:
raise NotImplementedError
else:
raise NotImplementedError
# define spatial min/max of simulation
# x in [-0.5, 0.5] y in [-0.5, 1] -> sale by 200 -> x in [0, 200] y in [0, 300])
self.max_x_pos = 200
self.max_y_pos = 300
self.pos_scale = 200
self.sim_x_pos_min = -0.5
self.sim_y_pos_min = -0.5
# source_root
global_root, local_root = self._get_roots(global_root, local_root, "mesh_dataset")
if local_root is None:
# load data from global_root
self.source_root = global_root / version
self.logger.info(f"data_source (global): '{self.source_root}'")
else:
# load data from local_root
self.source_root = local_root / "mesh_dataset"
if is_data_rank0():
# copy data from global to local
self.logger.info(f"data_source (global): '{global_root}'")
self.logger.info(f"data_source (local): '{self.source_root}'")
# copy images
copy_imagefolder_from_global_to_local(
global_path=global_root,
local_path=self.source_root,
relative_path=version,
log_fn=self.logger.info,
num_workers=min(10, get_fair_cpu_count()),
)
self.source_root = self.source_root / version
barrier()
assert self.source_root.exists(), f"'{self.source_root.as_posix()}' doesn't exist"
# load name of sequences (name of folders)
seqnames = list(sorted([name for name in os.listdir(self.source_root) if (self.source_root / name).is_dir()]))
assert len(seqnames) > 0, f"couldnt find any sequences in '{self.source_root.as_posix()}'"
# filter out seqnames of split
seqnames = self._filter_split_seqnames(seqnames)
assert len(seqnames) > 0, f"filtered out all sequences of '{self.source_root.as_posix()}'"
# load filenames for each sequence
self.samples = []
for seqname in seqnames:
samples = [
fname
for fname in sorted(os.listdir(self.source_root / seqname))
if self._is_timestep_fname(fname)
]
self.samples.append((seqname, samples))
# check that all folders have equally many timesteps
seqlens = [len(fnames) for _, fnames in self.samples]
if not all(seqlens[0] == seqlen for seqlen in seqlens):
for seqname, fnames in self.samples:
self.logger.info(f"- {seqname} {len(fnames)}")
raise RuntimeError("not all sequencelengths are the same")
if self.max_num_timesteps is not None:
assert max_num_timesteps <= seqlens[0]
self.max_timestep = max_num_timesteps
else:
self.max_timestep = seqlens[0]
# first timestep cannot be predicted
self.max_timestep -= 1
def _filter_split_seqnames(self, seqnames):
if self.version in ["v1-1sim"]:
assert len(seqnames) == 1
return seqnames
if self.version in ["v1-2sims"]:
assert len(seqnames) == 2
return seqnames
if self.version in ["v1-1000sims"]:
assert self.max_num_sequences is None
# seqnames is e.g. "case_95"
seqname_to_caseidx = {seqname: int(seqname.split("_")[1]) for seqname in seqnames}
if self.split == "train":
return [seqname for seqname, idx in seqname_to_caseidx.items() if 10 <= idx]
if self.split == "test":
return [seqname for seqname, idx in seqname_to_caseidx.items() if idx < 10]
if self.split == "train-10sims":
return [seqname for seqname, idx in seqname_to_caseidx.items() if 10 <= idx][:10]
if self.version in ["v1-10000sims"]:
assert self.max_num_sequences is None
# seqnames is e.g. "case_95"
seqname_to_caseidx = {seqname: int(seqname.split("_")[1]) for seqname in seqnames}
if self.split == "train":
return [seqname for seqname, idx in seqname_to_caseidx.items() if idx <= 10005]
if self.split == "test":
return [seqname for seqname, idx in seqname_to_caseidx.items() if idx > 10005]
if self.split == "train-10sims":
return [seqname for seqname, idx in seqname_to_caseidx.items() if idx < 10]
if self.version in ["v1-686sims-1object"]:
caseidx_to_seqname = {int(seqname.split("_")[1]): seqname for seqname in seqnames}
sorted_caseidxs = list(sorted(caseidx_to_seqname.keys()))
num_train_sequences = 650
if self.split == "train":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[:num_train_sequences]]
elif self.split == "test":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[num_train_sequences:]]
else:
raise NotImplementedError
if self.max_num_sequences is not None:
split_seqnames = split_seqnames[:self.max_num_sequences]
return split_seqnames
if self.version in ["v1-1900sims"]:
caseidx_to_seqname = {int(seqname.split("_")[1]): seqname for seqname in seqnames}
sorted_caseidxs = list(sorted(caseidx_to_seqname.keys()))
num_train_sequences = 1900
if self.split == "train":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[:num_train_sequences]]
elif self.split == "test":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[num_train_sequences:]]
else:
raise NotImplementedError
if self.max_num_sequences is not None:
split_seqnames = split_seqnames[:self.max_num_sequences]
return split_seqnames
if self.version == "v2-5000sims":
# v2-5000sims is a subset of v2-10000sims
caseidx_to_seqname = {int(seqname.split("_")[1]): seqname for seqname in seqnames}
sorted_caseidxs = list(sorted(caseidx_to_seqname.keys()))
assert len(sorted_caseidxs) >= 5500
num_train_sequences = 5000
num_test_sequences = 500
num_val_sequences = 500
if self.split == "train":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[:num_train_sequences]]
elif self.split == "test":
split_seqnames = [
caseidx_to_seqname[case_idx]
for case_idx in sorted_caseidxs[num_train_sequences:num_train_sequences + num_test_sequences]
]
elif self.split == "val":
start = num_train_sequences + num_test_sequences
end = start + num_val_sequences
split_seqnames = [
caseidx_to_seqname[case_idx]
for case_idx in sorted_caseidxs[start:end]
]
else:
raise NotImplementedError
if self.max_num_sequences is not None:
split_seqnames = split_seqnames[:self.max_num_sequences]
return split_seqnames
if self.version == "v2-10000sims":
caseidx_to_seqname = {int(seqname.split("_")[1]): seqname for seqname in seqnames}
sorted_caseidxs = list(sorted(caseidx_to_seqname.keys()))
assert len(sorted_caseidxs) >= 10000
num_train_sequences = 9500
num_test_sequences = 500
if self.split == "train":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[:num_train_sequences]]
elif self.split == "test":
split_seqnames = [
caseidx_to_seqname[case_idx]
for case_idx in sorted_caseidxs[num_train_sequences:num_train_sequences + num_test_sequences]
]
else:
raise NotImplementedError
if self.max_num_sequences is not None:
split_seqnames = split_seqnames[:self.max_num_sequences]
return split_seqnames
if self.version == "v2-2500sims":
# v2-2500sims is a subset of v2-10000sims of simulations that contain only 1 object
caseidx_to_seqname = {int(seqname.split("_")[1]): seqname for seqname in seqnames}
sorted_caseidxs = list(sorted(caseidx_to_seqname.keys()))
assert len(sorted_caseidxs) >= 2500
num_train_sequences = 2000
num_test_sequences = 500
if self.split == "train":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[:num_train_sequences]]
elif self.split == "test":
split_seqnames = [
caseidx_to_seqname[case_idx]
for case_idx in sorted_caseidxs[num_train_sequences:num_train_sequences + num_test_sequences]
]
else:
raise NotImplementedError
if self.max_num_sequences is not None:
split_seqnames = split_seqnames[:self.max_num_sequences]
return split_seqnames
if self.version == "v2-6000sims":
# v2-6000sims is subset of v2-10000sims of simulations with 0.01 < v < 0.04
caseidx_to_seqname = {int(seqname.split("_")[1]): seqname for seqname in seqnames}
sorted_caseidxs = list(sorted(caseidx_to_seqname.keys()))
assert len(sorted_caseidxs) == 6000
num_train_sequences = 5000
num_test_sequences = 1000
if self.split == "train":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[:num_train_sequences]]
elif self.split == "test":
split_seqnames = [
caseidx_to_seqname[case_idx]
for case_idx in sorted_caseidxs[num_train_sequences:num_train_sequences + num_test_sequences]
]
else:
raise NotImplementedError
if self.max_num_sequences is not None:
split_seqnames = split_seqnames[:self.max_num_sequences]
return split_seqnames
if self.version == "v3-10000sims":
caseidx_to_seqname = {int(seqname.split("_")[1]): seqname for seqname in seqnames}
sorted_caseidxs = list(sorted(caseidx_to_seqname.keys()))
assert len(sorted_caseidxs) == 10000
num_train_sequences = 8000
num_valid_sequences = 1000
num_test_sequences = 1000
if self.split == "train":
split_seqnames = [caseidx_to_seqname[case_idx] for case_idx in sorted_caseidxs[:num_train_sequences]]
assert len(split_seqnames) == num_train_sequences
elif self.split == "valid":
split_seqnames = [
caseidx_to_seqname[case_idx]
for case_idx in sorted_caseidxs[num_train_sequences:num_train_sequences + num_valid_sequences]
]
assert len(split_seqnames) == num_valid_sequences
elif self.split == "test":
split_seqnames = [
caseidx_to_seqname[case_idx]
for case_idx in sorted_caseidxs[num_train_sequences + num_valid_sequences:]
]
assert len(split_seqnames) == num_test_sequences
else:
raise NotImplementedError
if self.max_num_sequences is not None:
split_seqnames = split_seqnames[:self.max_num_sequences]
return split_seqnames
raise NotImplementedError
@staticmethod
def _is_timestep_fname(fname):
if not fname.endswith(".th"):
return False
if fname in ["object_mask.th", "U_init.th", "x.th", "y.th", "movement_per_position.th", "num_objects.th"]:
return False
if fname.startswith("edge_index"):
return False
if fname.startswith("sampling_weights"):
return False
assert fname.endswith("_mesh.th") and fname[:-len("_mesh.th")].isdigit()
return True
def __len__(self):
if self.num_input_timesteps == float("inf"):
# dataset for rollout: all samples of a sequence are returned
return len(self.samples)
return self.max_timestep * len(self.samples)
# noinspection PyUnusedLocal
def getitem_timestep(self, idx, ctx=None):
return idx % self.max_timestep
def getshape_timestep(self):
return self.max_timestep,
def denormalize(self, data, inplace=False):
assert data.size(1) == len(self.mean)
shape = [1] * data.ndim
shape[1] = len(self.mean)
mean = self.mean.view(*shape).to(data.device)
std = self.std.view(*shape).to(data.device)
if inplace:
data.mul_(std).add_(mean)
else:
data = data * std + mean
return data
def _get_sim_name(self, idx):
if self.num_input_timesteps == float("inf"):
# dataset for rollout -> return full trajectory
sim_name, _ = self.samples[idx]
else:
# dataset for training -> predict random timestep
seqidx = idx // self.max_timestep
sim_name, _ = self.samples[seqidx]
return sim_name
# noinspection PyUnusedLocal
def getitem_geometry2d(self, idx, ctx=None):
sim_name = self._get_sim_name(idx)
return torch.load(self.source_root / sim_name / "object_mask.th")
def getshape_geometry2d(self):
# none=0
# is_obstacle=1
shape = self.getitem_geometry2d(0).shape
return 2, *shape
# noinspection PyUnusedLocal
def getitem_num_objects(self, idx, ctx=None):
sim_name = self._get_sim_name(idx)
return torch.load(self.source_root / sim_name / f"num_objects.th")
# noinspection PyUnusedLocal
def getitem_velocity(self, idx, ctx=None):
if self.version in ["v1-1sim"]:
return 0
sim_name = self._get_sim_name(idx)
if self.version in ["v1-2sims"]:
if idx < len(self) // 2:
return 0
return 1
# v is samples from U[0.01, 0.06]
v = torch.load(self.source_root / sim_name / f"U_init.th")
# convert to [0, 200] for sincos embedding
v.sub_(0.01).div_(0.05).mul_(200)
return v
def _get_generator(self, idx):
if self.num_input_timesteps == float("inf"):
# deterministically downsample for evaluation
return torch.Generator().manual_seed(int(idx) + (self.seed or 0))
if self.split == "test":
assert self.seed is not None
if self.seed is not None:
return torch.Generator().manual_seed(int(idx) + self.seed)
return None
def _downsample_input(self, data, idx=None, ctx=None):
if self.num_input_points_ratio is None and self.num_input_points is None:
return data
assert ctx is not None
if "input_perm" in ctx:
perm = ctx["input_perm"]
else:
generator = self._get_generator(idx)
if self.num_input_points is not None:
if self.num_input_points[0] == self.num_input_points[1]:
# fixed num_input_points
end = self.num_input_points[0]
else:
# variable num_input_points
# make sure each batch has the same number of points to avoid heavy memory fluctuations
# this is ensured by ensuring that the sum of points in 2 consecutive samples is lb + ub
if len(self.num_input_points_cache) > 0:
assert len(self.num_input_points_cache) == 1
end = self.num_input_points_cache.pop()
else:
assert generator is None, "variable num_input_points doesnt support seed"
lb, ub = self.num_input_points
midpoint = torch.randint(ub - lb, size=(1,)).item()
end = ub - midpoint
self.num_input_points_cache.append(lb + midpoint)
elif self.num_input_points_ratio is not None:
if self.num_input_points_ratio[0] == self.num_input_points_ratio[1]:
# fixed num_input_points_ratio
end = int(len(data) * self.num_input_points_ratio[0])
else:
# variable num_points_ratio
lb, ub = self.num_input_points_ratio
num_points_ratio = torch.rand(size=(1,), generator=generator).item() * (ub - lb) + lb
end = int(len(data) * num_points_ratio)
else:
raise NotImplementedError
if self.num_input_points_mode == "uniform":
# uniform sampling
perm = torch.randperm(len(data), generator=generator)[:end]
else:
# weighted sampling
sim_name = self._get_sim_name(idx)
weights = torch.load(self.source_root / sim_name / f"sampling_weights_{self.num_input_points_mode}.th")
perm = torch.multinomial(weights.float(), num_samples=end, replacement=False, generator=generator)
ctx["input_perm"] = perm
return data[perm]
def _downsample_query(self, data, idx=None, ctx=None):
# rollout needs to have same permutation for input and query because the prediction is used as next input
# if rollout is via latent space its not strictly needed but this edge case is not considered
if self.num_input_timesteps == float("inf"):
assert self.num_query_points is None
return self._downsample_input(data, idx=idx, ctx=ctx)
# use input perm also for query (required for consistency losses)
if self.couple_query_with_input:
assert self.num_query_points is None
return self._downsample_input(data, idx=idx, ctx=ctx)
if self.num_query_points is None:
return data
if "query_perm" in ctx:
perm = ctx["query_perm"]
else:
if self.num_query_points_mode == "input":
# use the same permutation as for downsampling the input
# -> the points that are used as input are always used as target
perm = ctx["input_perm"]
assert len(perm) >= self.num_query_points
elif self.num_query_points_mode == "arbitrary":
# generate new permutation -> any points can be used as target
generator = self._get_generator(idx)
perm = torch.randperm(len(data), generator=generator)
else:
raise NotImplementedError
perm = perm[:self.num_query_points]
ctx["query_perm"] = perm
return data[perm]
def _downsample_reconstruction_output(self, data, idx=None, ctx=None):
# rollout needs to have same permutation for input and query because the prediction is used as next input
# if rollout is via latent space its not strictly needed but this edge case is not considered
assert not self.num_input_timesteps == float("inf")
assert not self.couple_query_with_input
if self.num_query_points is None:
return data
if "rec_perm" in ctx:
perm = ctx["rec_perm"]
else:
if self.num_query_points_mode == "input":
# use the same permutation as for downsampling the input
# -> the points that are used as input are always used as target
raise NotImplementedError
elif self.num_query_points_mode == "arbitrary":
# generate new permutation -> any points can be used as target
generator = self._get_generator(idx)
perm = torch.randperm(len(data), generator=generator)
else:
raise NotImplementedError
perm = perm[:self.num_query_points]
ctx["rec_perm"] = perm
return data[perm]
def _load_xy(self, case_uri):
# swap from simulation format (width=x height=y) to torch format (height=x width=y)
x = torch.load(case_uri / f"y.th").float()
y = torch.load(case_uri / f"x.th").float()
# shift positions to start from 0 and scale by 200
# x in [-0.5, 0.5] y in [-0.5, 1]
x.sub_(self.sim_x_pos_min).mul_(self.pos_scale)
y.sub_(self.sim_y_pos_min).mul_(self.pos_scale)
assert torch.all(0 <= x), f"error in {sim_name} x.min={x.min().item()}"
assert torch.all(x < self.max_x_pos), f"error in {sim_name} y.max={x.max().item()}"
assert torch.all(0 <= y), f"error in {sim_name} y.min={y.min().item()}"
assert torch.all(y < self.max_y_pos), f"error in {sim_name} y.max={y.max().item()}"
# stack
all_pos = torch.stack([x, y], dim=1)
return all_pos
def getitem_all_pos(self, idx, ctx=None):
if ctx is not None and "all_pos" in ctx:
return ctx["all_pos"]
sim_name = self._get_sim_name(idx)
all_pos = self._load_xy(self.source_root / sim_name)
# cache
if ctx is not None:
assert "all_pos" not in ctx
ctx["all_pos"] = all_pos
return all_pos
def getitem_mesh_pos(self, idx, ctx=None):
if ctx is not None and "mesh_pos" in ctx:
return ctx["mesh_pos"]
mesh_pos = self.getitem_all_pos(idx, ctx=ctx)
mesh_pos = self._downsample_input(mesh_pos, idx=idx, ctx=ctx)
if ctx is not None:
assert "mesh_pos" not in ctx
ctx["mesh_pos"] = mesh_pos
return mesh_pos
def getitem_query_pos(self, idx, ctx=None):
if ctx is not None and "query_pos" in ctx:
return ctx["query_pos"]
query_pos = self.getitem_all_pos(idx, ctx=ctx)
query_pos = self._downsample_query(query_pos, idx=idx, ctx=ctx)
if self.standardize_query_pos:
query_pos = query_pos / (torch.tensor([self.max_x_pos, self.max_y_pos])[None, :] / 2) - 1
if ctx is not None:
assert "query_pos" not in ctx
ctx["query_pos"] = query_pos
return query_pos
def getitem_reconstruction_pos(self, idx, ctx=None):
if ctx is not None and "rec_pos" in ctx:
return ctx["rec_pos"]
rec_pos = self.getitem_all_pos(idx, ctx=ctx)
rec_pos = self._downsample_reconstruction_output(rec_pos, idx=idx, ctx=ctx)
if ctx is not None:
assert "rec_pos" not in ctx
ctx["rec_pos"] = rec_pos
return rec_pos
# noinspection PyUnusedLocal
def getitem_grid_pos(self, idx=None, ctx=None):
if ctx is not None and "grid_pos" in ctx:
return ctx["grid_pos"]
# generate positions for a regular grid (e.g. for GINO encoder)
assert self.grid_resolution is not None
x_linspace = torch.linspace(0, self.max_x_pos, self.grid_resolution[0])
y_linspace = torch.linspace(0, self.max_y_pos, self.grid_resolution[1])
# generate positions (grid_resolution[0] * grid_resolution[1], 2)
grid_pos = torch.stack(torch.meshgrid(x_linspace, y_linspace, indexing="ij")).flatten(start_dim=1).T
#
if ctx is not None:
assert "grid_pos" not in ctx
ctx["grid_pos"] = grid_pos
return grid_pos
# noinspection PyUnusedLocal
def getitem_mesh_edges(self, idx, ctx=None):
assert self.grid_resolution is None
if self.radius_graph_r is None:
# radius graph is created on GPU
return None
sim_name = self._get_sim_name(idx)
# load positions
mesh_pos = self.getitem_mesh_pos(idx, ctx=ctx)
if self.supernode_edge_mode == "mesh_to_mesh":
# generate mesh_to_supernode edges by creating mesh_to_mesh edges and filtering them
# this makes sure to include a self connection but leads to dataloading bottlenecks on slow CPUs
# mesh to mesh interactions -> scales quadratically O(num_mesh_points^2)
if self.num_supernodes is None:
# normal flow direction
flow = "source_to_target"
else:
# inverted flow direction is required to have sorted dst_indices
flow = "target_to_source"
edges = radius_graph(
x=mesh_pos,
r=self.radius_graph_r,
max_num_neighbors=self.radius_graph_max_num_neighbors,
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | true |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/shapenet_car.py | src/datasets/shapenet_car.py | import einops
import scipy
import os
import shutil
import meshio
import numpy as np
import torch
from kappautils.param_checking import to_3tuple, to_2tuple
from torch_geometric.nn.pool import radius, radius_graph
from distributed.config import barrier, is_data_rank0
from .base.dataset_base import DatasetBase
class ShapenetCar(DatasetBase):
# generated with torch.randperm(889, generator=torch.Generator().manual_seed(0))[:189]
TEST_INDICES = {
550, 592, 229, 547, 62, 464, 798, 836, 5, 732, 876, 843, 367, 496,
142, 87, 88, 101, 303, 352, 517, 8, 462, 123, 348, 714, 384, 190,
505, 349, 174, 805, 156, 417, 764, 788, 645, 108, 829, 227, 555, 412,
854, 21, 55, 210, 188, 274, 646, 320, 4, 344, 525, 118, 385, 669,
113, 387, 222, 786, 515, 407, 14, 821, 239, 773, 474, 725, 620, 401,
546, 512, 837, 353, 537, 770, 41, 81, 664, 699, 373, 632, 411, 212,
678, 528, 120, 644, 500, 767, 790, 16, 316, 259, 134, 531, 479, 356,
641, 98, 294, 96, 318, 808, 663, 447, 445, 758, 656, 177, 734, 623,
216, 189, 133, 427, 745, 72, 257, 73, 341, 584, 346, 840, 182, 333,
218, 602, 99, 140, 809, 878, 658, 779, 65, 708, 84, 653, 542, 111,
129, 676, 163, 203, 250, 209, 11, 508, 671, 628, 112, 317, 114, 15,
723, 746, 765, 720, 828, 662, 665, 399, 162, 495, 135, 121, 181, 615,
518, 749, 155, 363, 195, 551, 650, 877, 116, 38, 338, 849, 334, 109,
580, 523, 631, 713, 607, 651, 168,
}
def __init__(
self,
split,
radius_graph_r=None,
radius_graph_max_num_neighbors=None,
num_input_points_ratio=None,
num_query_points_ratio=None,
grid_resolution=None,
num_supernodes=None,
standardize_query_pos=False,
concat_pos_to_sdf=False,
global_root=None,
local_root=None,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.split = split
self.radius_graph_r = radius_graph_r
self.radius_graph_max_num_neighbors = radius_graph_max_num_neighbors or int(1e10)
self.num_supernodes = num_supernodes
self.seed = seed
if num_input_points_ratio is None:
self.num_input_points_ratio = None
else:
self.num_input_points_ratio = to_2tuple(num_input_points_ratio)
self.num_query_points_ratio = num_query_points_ratio
if grid_resolution is not None:
self.grid_resolution = to_3tuple(grid_resolution)
else:
self.grid_resolution = None
# define spatial min/max of simulation (for normalizing to [0, 1] and then scaling to [0, 200] for pos_embed)
# min: [-1.7978, -0.7189, -4.2762]
# max: [1.8168, 4.3014, 5.8759]
self.domain_min = torch.tensor([-2.0, -1.0, -4.5])
self.domain_max = torch.tensor([2.0, 4.5, 6.0])
self.scale = 200
self.standardize_query_pos = standardize_query_pos
self.concat_pos_to_sdf = concat_pos_to_sdf
# mean/std for normalization (calculated on the 700 train samples)
# import torch
# from datasets.shapenet_car import ShapenetCar
# ds = ShapenetCar(global_root="/local00/bioinf/shapenet_car", split="train")
# targets = [ds.getitem_pressure(i) for i in range(len(ds))]
# targets = torch.stack(targets)
# targets.mean()
# targets.std()
self.mean = torch.tensor(-36.3099)
self.std = torch.tensor(48.5743)
# source_root
global_root, local_root = self._get_roots(global_root, local_root, "shapenet_car")
if local_root is None:
# load data from global_root
self.source_root = global_root / "preprocessed"
self.logger.info(f"data_source (global): '{self.source_root}'")
else:
# load data from local_root
self.source_root = local_root / "shapenet_car"
if is_data_rank0():
# copy data from global to local
self.logger.info(f"data_source (global): '{global_root}'")
self.logger.info(f"data_source (local): '{self.source_root}'")
if not self.source_root.exists():
self.logger.info(
f"copying {(global_root / 'preprocessed').as_posix()} "
f"to {(self.source_root / 'preprocessed').as_posix()}"
)
shutil.copytree(global_root / "preprocessed", self.source_root / "preprocessed")
self.source_root = self.source_root / "preprocessed"
barrier()
assert self.source_root.exists(), f"'{self.source_root.as_posix()}' doesn't exist"
assert self.source_root.name == "preprocessed", f"'{self.source_root.as_posix()}' is not preprocessed folder"
# discover uris
self.uris = []
for i in range(9):
param_uri = self.source_root / f"param{i}"
for name in sorted(os.listdir(param_uri)):
sample_uri = param_uri / name
if sample_uri.is_dir():
self.uris.append(sample_uri)
assert len(self.uris) == 889, f"found {len(self.uris)} uris instead of 889"
# split into train/test uris
if split == "train":
train_idxs = [i for i in range(len(self.uris)) if i not in self.TEST_INDICES]
self.uris = [self.uris[train_idx] for train_idx in train_idxs]
assert len(self.uris) == 700
elif split == "test":
self.uris = [self.uris[test_idx] for test_idx in self.TEST_INDICES]
assert len(self.uris) == 189
else:
raise NotImplementedError
def __len__(self):
return len(self.uris)
# noinspection PyUnusedLocal
def getitem_pressure(self, idx, ctx=None):
p = torch.load(self.uris[idx] / "pressure.th")
p -= self.mean
p /= self.std
return p
# noinspection PyUnusedLocal
def getitem_grid_pos(self, idx=None, ctx=None):
if ctx is not None and "grid_pos" in ctx:
return ctx["grid_pos"]
# generate positions for a regular grid (e.g. for GINO encoder)
assert self.grid_resolution is not None
x_linspace = torch.linspace(0, self.scale, self.grid_resolution[0])
y_linspace = torch.linspace(0, self.scale, self.grid_resolution[1])
z_linspace = torch.linspace(0, self.scale, self.grid_resolution[2])
# generate positions (grid_resolution[0] * grid_resolution[1], 2)
meshgrid = torch.meshgrid(x_linspace, y_linspace, z_linspace, indexing="ij")
grid_pos = torch.stack(meshgrid).flatten(start_dim=1).T
#
if ctx is not None:
assert "grid_pos" not in ctx
ctx["grid_pos"] = grid_pos
return grid_pos
def getitem_mesh_to_grid_edges(self, idx, ctx=None):
assert self.grid_resolution is not None
assert self.radius_graph_r is not None
mesh_pos = self.getitem_mesh_pos(idx, ctx=ctx)
grid_pos = self.getitem_grid_pos(idx, ctx=ctx)
# create graph between mesh and regular grid points
edges = radius(
x=mesh_pos,
y=grid_pos,
r=self.radius_graph_r,
max_num_neighbors=self.radius_graph_max_num_neighbors,
).T
# edges is (num_points, 2)
return edges
def getitem_grid_to_query_edges(self, idx, ctx=None):
assert self.grid_resolution is not None
assert self.radius_graph_r is not None
query_pos = self.getitem_query_pos(idx, ctx=ctx)
grid_pos = self.getitem_grid_pos(idx, ctx=ctx)
# create graph between mesh and regular grid points
edges = radius(
x=grid_pos,
y=query_pos,
r=self.radius_graph_r,
max_num_neighbors=int(1e10),
).T
# edges is (num_points, 2)
return edges
def getitem_mesh_pos(self, idx, ctx=None):
if ctx is not None and "mesh_pos" in ctx:
return ctx["mesh_pos"]
mesh_pos = self.getitem_all_pos(idx, ctx=ctx)
# sample mesh points
if self.num_input_points_ratio is not None:
if self.split == "test":
assert self.seed is not None
if self.seed is not None:
# deterministically downsample for evaluation
generator = torch.Generator().manual_seed(self.seed + int(idx))
else:
generator = None
# get number of samples
if self.num_input_points_ratio[0] == self.num_input_points_ratio[1]:
# fixed num_input_points_ratio
end = int(len(mesh_pos) * self.num_input_points_ratio[0])
else:
# variable num_input_points_ratio
lb, ub = self.num_input_points_ratio
num_input_points_ratio = torch.rand(size=(1,), generator=generator).item() * (ub - lb) + lb
end = int(len(mesh_pos) * num_input_points_ratio)
# uniform sampling
perm = torch.randperm(len(mesh_pos), generator=generator)[:end]
mesh_pos = mesh_pos[perm]
if ctx is not None:
ctx["mesh_pos"] = mesh_pos
return mesh_pos
def getitem_all_pos(self, idx, ctx=None):
if ctx is not None and "all_pos" in ctx:
return ctx["all_pos"]
all_pos = torch.load(self.uris[idx] / "mesh_points.th")
# rescale for sincos positional embedding
all_pos.sub_(self.domain_min).div_(self.domain_max - self.domain_min).mul_(self.scale)
assert torch.all(0 < all_pos)
assert torch.all(all_pos < self.scale)
if ctx is not None:
ctx["all_pos"] = all_pos
return all_pos
def getitem_query_pos(self, idx, ctx=None):
if ctx is not None and "query_pos" in ctx:
return ctx["query_pos"]
query_pos = self.getitem_all_pos(idx, ctx=ctx)
# sample query points
if self.num_query_points_ratio is not None:
if self.split == "test":
assert self.seed is not None
if self.seed is not None:
# deterministically downsample for evaluation
generator = torch.Generator().manual_seed(self.seed + int(idx))
else:
generator = None
# get number of samples
end = int(len(query_pos) * self.num_query_points_ratio)
# uniform sampling
perm = torch.randperm(len(query_pos), generator=generator)[:end]
query_pos = query_pos[perm]
# shift query_pos to [-1, 1] (required for torch.nn.functional.grid_sample)
if self.standardize_query_pos:
query_pos = query_pos / (self.scale / 2) - 1
if ctx is not None:
ctx["query_pos"] = query_pos
return query_pos
def _get_generator(self, idx):
if self.split == "test":
return torch.Generator().manual_seed(int(idx) + (self.seed or 0))
if self.seed is not None:
return torch.Generator().manual_seed(int(idx) + self.seed)
return None
# noinspection PyUnusedLocal
def getitem_mesh_edges(self, idx, ctx=None):
assert self.radius_graph_r is not None
# load mesh positions
mesh_pos = self.getitem_mesh_pos(idx, ctx=ctx)
if self.num_supernodes is None:
# create graph
edges = radius_graph(
x=mesh_pos,
r=self.radius_graph_r,
max_num_neighbors=self.radius_graph_max_num_neighbors,
loop=True,
)
else:
# select supernodes
generator = self._get_generator(idx)
perm = torch.randperm(len(mesh_pos), generator=generator)[:self.num_supernodes]
supernodes_pos = mesh_pos[perm]
# create edges: this can include self-loop or not depending on how many neighbors are found.
# if too many neighbors are found, neighbors are selected randomly which can discard the self-loop
edges = radius(
x=mesh_pos,
y=supernodes_pos,
r=self.radius_graph_r,
max_num_neighbors=self.radius_graph_max_num_neighbors,
)
# correct supernode index
edges[0] = perm[edges[0]]
return edges.T
# noinspection PyUnusedLocal
def getitem_sdf(self, idx, ctx=None):
assert self.grid_resolution is not None
assert all(self.grid_resolution[0] == grid_resolution for grid_resolution in self.grid_resolution[1:])
sdf = torch.load(self.uris[idx] / f"sdf_res{self.grid_resolution[0]}.th")
# check that sdf features were generated with correct positions by checking the distance to the nearest point
# from the domain minimum/maximum
# mesh_pos = torch.load(self.uris[idx] / "mesh_points.th")
# minpoint_dists = (self.domain_min[None, :] - mesh_pos).norm(p=2, dim=1)
# maxpoint_dists = (self.domain_max[None, :] - mesh_pos).norm(p=2, dim=1)
# assert torch.allclose(sdf[0, 0, 0], minpoint_dists.min()), f"{sdf[0, 0, 0]} != {minpoint_dists.min()}"
# assert torch.allclose(sdf[-1, -1, -1], maxpoint_dists.min()), f"{sdf[-1, -1, -1]} != {maxpoint_dists.min()}"
if self.concat_pos_to_sdf:
# add position to sdf (GINO uses this for interpolated FNO model)
x_linspace = torch.linspace(-1, 1, self.grid_resolution[0])
y_linspace = torch.linspace(-1, 1, self.grid_resolution[1])
z_linspace = torch.linspace(-1, 1, self.grid_resolution[2])
grid_pos = torch.meshgrid(x_linspace, y_linspace, z_linspace, indexing="ij")
# stack features (models expect dim_last format)
sdf = torch.stack([sdf, *grid_pos], dim=-1)
else:
sdf = sdf.unsqueeze(-1)
return sdf
def getitem_interpolated(self, idx, ctx=None):
assert self.grid_resolution is not None
assert self.standardize_query_pos
mesh_pos = self.getitem_mesh_pos(idx, ctx=ctx)
# generate grid positions (these are different than getitem_gridpos because interpolate requires xy indexing)
# it should be the same if indexing=ij since the mapping and inverse mapping consider the change in indexing
# but for consistency with scipy.interpolate xy was chosen
x_linspace = torch.linspace(0, self.scale, self.grid_resolution[0])
y_linspace = torch.linspace(0, self.scale, self.grid_resolution[1])
z_linspace = torch.linspace(0, self.scale, self.grid_resolution[2])
grid_pos = torch.meshgrid(x_linspace, y_linspace, z_linspace, indexing="xy")
grid = torch.from_numpy(
scipy.interpolate.griddata(
mesh_pos.unbind(1),
torch.ones_like(mesh_pos),
grid_pos,
method="linear",
fill_value=0.,
),
).float()
# check for correctness of interpolation
# import matplotlib.pyplot as plt
# import os
# os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# plt.scatter(mesh_pos[:, 0], mesh_pos[:, 1])
# plt.show()
# plt.clf()
# plt.imshow(grid.sum(dim=2).sum(dim=2), origin="lower")
# plt.show()
# plt.clf()
# import torch.nn.functional as F
# grid = einops.rearrange(grid, "h w d dim -> 1 dim h w d")
# query_pos = self.getitem_query_pos(idx, ctx=ctx)
# query_pos = einops.rearrange(query_pos, "num_points ndim -> 1 num_points 1 1 ndim")
# mesh_values = F.grid_sample(input=grid, grid=query_pos, align_corners=False).squeeze(-1)
# plt.scatter(*query_pos.squeeze().unbind(1), c=mesh_values[0, 0, :, 0])
# plt.show()
# plt.clf()
return grid | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/base/dataset_base.py | src/datasets/base/dataset_base.py | from kappadata.datasets import KDDataset
from providers.dataset_config_provider import DatasetConfigProvider
from providers.path_provider import PathProvider
from utils.collator_from_kwargs import collator_from_kwargs
from utils.factory import create_collection
from utils.param_checking import to_path
class DatasetBase(KDDataset):
def __init__(
self,
collators=None,
dataset_config_provider: DatasetConfigProvider = None,
path_provider: PathProvider = None,
**kwargs,
):
collators = create_collection(collators, collator_from_kwargs)
super().__init__(collators=collators, **kwargs)
self.dataset_config_provider = dataset_config_provider
self.path_provider = path_provider
def _get_roots(self, global_root, local_root, dataset_identifier):
# automatically populate global_root/local_root if they are not defined explicitly
global_root = self._get_global_root(global_root, dataset_identifier)
if local_root is None:
if self.dataset_config_provider is not None:
source_mode = self.dataset_config_provider.get_data_source_mode(dataset_identifier)
# use local by default
if source_mode in [None, "local"]:
local_root = self.dataset_config_provider.get_local_dataset_path()
else:
local_root = to_path(local_root)
return global_root, local_root
def _get_global_root(self, global_root, dataset_identifier):
if global_root is None:
global_root = self.dataset_config_provider.get_global_dataset_path(dataset_identifier)
else:
global_root = to_path(global_root)
return global_root
@staticmethod
def _to_consistent_split(split, has_train=True, has_val=True, has_test=True):
if has_train and split in ["train", "training"]:
return "train"
if has_val and split in ["val", "valid", "validation"]:
return "val"
if has_test and split in ["test", "testing"]:
return "test"
raise NotImplementedError(
f"invalid split '{split}' "
f"(has_train={has_train} has_val={has_val} has_test={has_test})"
)
def __repr__(self):
return str(self)
def __str__(self):
return type(self).__name__
def __len__(self):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/base/__init__.py | src/datasets/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/rans_interpolated_collator.py | src/datasets/collators/rans_interpolated_collator.py | import einops
import torch
from kappadata.collators import KDSingleCollator
from kappadata.wrappers import ModeWrapper
from torch.utils.data import default_collate
class RansInterpolatedCollator(KDSingleCollator):
def collate(self, batch, dataset_mode, ctx=None):
# make sure that batch was not collated
assert isinstance(batch, (tuple, list)) and isinstance(batch[0], tuple)
batch, ctx = zip(*batch)
# properties in context can have variable shapes (e.g. perm) -> delete ctx
ctx = {}
collated_batch = {}
# to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
query_pos = []
query_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="query_pos")
query_lens.append(len(item))
query_pos.append(item)
collated_batch["query_pos"] = torch.concat(query_pos)
# to sparse tensor: batch_size * (num_mesh_points,) -> (batch_size * num_mesh_points, 1)
pressure = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="pressure")
assert len(item) == query_lens[i]
pressure.append(item)
collated_batch["pressure"] = torch.concat(pressure).unsqueeze(1)
# create batch_idx tensor
batch_idx = torch.empty(sum(query_lens), dtype=torch.long)
start = 0
cur_batch_idx = 0
for i in range(len(query_lens)):
end = start + query_lens[i]
batch_idx[start:end] = cur_batch_idx
start = end
cur_batch_idx += 1
ctx["batch_idx"] = batch_idx
# create query_batch_idx tensor (required for test loss)
query_batch_idx = torch.empty(sum(query_lens), dtype=torch.long)
start = 0
cur_query_batch_idx = 0
for i in range(len(query_lens)):
end = start + query_lens[i]
query_batch_idx[start:end] = cur_query_batch_idx
start = end
cur_query_batch_idx += 1
ctx["query_batch_idx"] = query_batch_idx
# normal collation for other properties (timestep, velocity, geometry2d)
result = []
for item in dataset_mode.split(" "):
if item in collated_batch:
result.append(collated_batch[item])
else:
result.append(
default_collate([
ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=item)
for sample in batch
])
)
return tuple(result), ctx
@property
def default_collate_mode(self):
raise RuntimeError
def __call__(self, batch):
raise NotImplementedError("wrap KDSingleCollator with KDSingleCollatorWrapper")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/cfd_interpolated_collator.py | src/datasets/collators/cfd_interpolated_collator.py | import einops
import torch
from kappadata.collators import KDSingleCollator
from kappadata.wrappers import ModeWrapper
from torch.utils.data import default_collate
from torch.nn.utils.rnn import pad_sequence
class CfdInterpolatedCollator(KDSingleCollator):
def collate(self, batch, dataset_mode, ctx=None):
# make sure that batch was not collated
assert isinstance(batch, (tuple, list)) and isinstance(batch[0], tuple)
batch, ctx = zip(*batch)
# properties in context can have variable shapes (e.g. perm) -> delete ctx
ctx = {}
# collect collated properties
collated_batch = {}
# query_pos to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
# target to sparse tensor: batch_size * (num_mesh_points, dim) -> (batch_size * num_mesh_points, dim)
query_pos = []
query_lens = []
target = []
for i in range(len(batch)):
query_pos_item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="query_pos")
target_item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="target")
assert len(query_pos_item) == len(target_item)
query_lens.append(len(query_pos_item))
query_pos.append(query_pos_item)
target.append(target_item)
assert all(query_lens[0] == query_len for query_len in query_lens[1:])
collated_batch["query_pos"] = pad_sequence(query_pos, batch_first=True)
collated_batch["target"] = torch.concat(target)
# normal collation for other properties (timestep, velocity, geometry2d)
result = []
for item in dataset_mode.split(" "):
if item in collated_batch:
result.append(collated_batch[item])
else:
result.append(
default_collate([
ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=item)
for sample in batch
])
)
return tuple(result), ctx
@property
def default_collate_mode(self):
raise RuntimeError
def __call__(self, batch):
raise NotImplementedError("wrap KDSingleCollator with KDSingleCollatorWrapper")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/lagrangian_simformer_collator.py | src/datasets/collators/lagrangian_simformer_collator.py | import einops
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import default_collate
from kappadata.collators import KDSingleCollator
from kappadata.wrappers import ModeWrapper
from torch.utils.data import default_collate
from torch_geometric.data import Data
from torch_geometric.transforms import KNNGraph
class LagrangianSimformerCollator(KDSingleCollator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __call__(self, batch):
raise NotImplementedError("wrap KDSingleCollator with KDSingleCollatorWrapper")
def collate(self, batch, dataset_mode, ctx=None):
# make sure that batch was not collated
assert isinstance(batch, (tuple, list)) and isinstance(batch[0], tuple)
batch, ctx = zip(*batch)
# extract necessary things from context before deleting it
num_grid_points = ctx[0].get("num_grid_points", 0)
for i in range(1, len(ctx)):
assert ctx[i].get("num_grid_points", 0) == num_grid_points
time_idx = torch.stack([ctx[i]['time_idx'] for i in range(len(batch))])
traj_idx = torch.tensor([ctx[i]['traj_idx'] for i in range(len(batch))])
# properties in context can have variable shapes (e.g. perm) -> delete ctx
ctx = {}
# Add idx ctx
ctx['time_idx'] = time_idx
ctx['traj_idx'] = traj_idx
collated_batch = {}
lens = None
if ModeWrapper.has_item(mode=dataset_mode, item="x"):
# create x
# batch_size * (num_input_timesteps + 1, num_channels, num_points) ->
# (batch_size * num_points, num_input_timesteps + 1, num_channels)
x = [ModeWrapper.get_item(mode=dataset_mode, batch=sample, item="x") for sample in batch]
if lens is None:
lens = [xx.size(2) for xx in x]
x_flat = einops.rearrange(torch.concat(x, dim=2), "timesteps channels flat -> flat timesteps channels")
collated_batch["x"] = x_flat
else:
raise NotImplementedError
pos_items = ("curr_pos", "target_pos_encode")
for pos_item in pos_items:
if ModeWrapper.has_item(mode=dataset_mode, item=pos_item):
# create flat_pos
# batch_size * (num_points, ndim) -> (batch_size * num_points, ndim) where num_points is variable
pos = [ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=pos_item) for sample in batch]
if lens is None:
lens = [c.size(0) for c in pos]
flat_pos = torch.concat(pos)
collated_batch[pos_item] = flat_pos
assert ModeWrapper.has_item(mode=dataset_mode, item="edge_index")
# flatten edge_index
# batch_size * (num_points, ndim) -> (batch_size * num_points, ndim) where num_points is variable
edge_index = []
edge_index_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="edge_index") + edge_index_offset
edge_index.append(idx)
edge_index_offset += lens[i]
collated_batch["edge_index"] = torch.concat(edge_index)
if ModeWrapper.has_item(mode=dataset_mode, item="edge_index_target"):
edge_index_target = []
edge_index_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="edge_index_target") + edge_index_offset
edge_index_target.append(idx)
edge_index_offset += lens[i]
collated_batch["edge_index_target"] = torch.concat(edge_index_target)
if ModeWrapper.has_item(mode=dataset_mode, item="edge_features"):
edge_features = []
edge_index_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="edge_features") + edge_index_offset
edge_features.append(idx)
edge_index_offset += lens[i]
collated_batch["edge_features"] = torch.concat(edge_features)
if ModeWrapper.has_item(mode=dataset_mode, item="perm"):
perm_batch = []
for i in range(len(batch)):
perm, n_particles = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="perm")
perm_batch.append(perm + i*n_particles)
perm_batch = torch.concat(perm_batch)
collated_batch["perm"] = perm_batch
if lens is not None and ctx is not None:
batch_size = len(lens)
maxlen = max(lens)
# create batch_idx tensor
batch_idx = torch.empty(sum(lens), dtype=torch.long)
start = 0
cur_batch_idx = 0
for i in range(len(lens)):
end = start + lens[i]
batch_idx[start:end] = cur_batch_idx
start = end
cur_batch_idx += 1
ctx["batch_idx"] = batch_idx
# unbatch index is used with full data
target = [ModeWrapper.get_item(mode=dataset_mode, batch=sample, item='target_acc') for sample in batch]
lens = [xx.size(0) for xx in target]
maxlen = max(lens)
unbatch_idx = torch.empty(maxlen * batch_size, dtype=torch.long)
unbatch_select = []
unbatch_start = 0
cur_unbatch_idx = 0
for i in range(len(lens)):
unbatch_end = unbatch_start + lens[i]
unbatch_idx[unbatch_start:unbatch_end] = cur_unbatch_idx
unbatch_select.append(cur_unbatch_idx)
cur_unbatch_idx += 1
unbatch_start = unbatch_end
padding = maxlen - lens[i]
if padding > 0:
unbatch_end = unbatch_start + padding
unbatch_idx[unbatch_start:unbatch_end] = cur_unbatch_idx
cur_unbatch_idx += 1
unbatch_start = unbatch_end
unbatch_select = torch.tensor(unbatch_select)
ctx["unbatch_idx"] = unbatch_idx
ctx["unbatch_select"] = unbatch_select
# normal collation for other properties (timestep, velocity, geometry2d)
result = []
for item in dataset_mode.split(" "):
if item in collated_batch:
result.append(collated_batch[item])
else:
result.append(
default_collate([
ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=item)
for sample in batch
])
)
return tuple(result), ctx
@property
def default_collate_mode(self):
raise RuntimeError | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/rans_baseline_collator.py | src/datasets/collators/rans_baseline_collator.py | import einops
import torch
from kappadata.collators import KDSingleCollator
from kappadata.wrappers import ModeWrapper
from torch.utils.data import default_collate
class RansBaselineCollator(KDSingleCollator):
def collate(self, batch, dataset_mode, ctx=None):
# make sure that batch was not collated
assert isinstance(batch, (tuple, list)) and isinstance(batch[0], tuple)
batch, ctx = zip(*batch)
# properties in context can have variable shapes (e.g. perm) -> delete ctx
ctx = {}
collated_batch = {}
# to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
mesh_pos = []
mesh_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_pos")
mesh_lens.append(len(item))
mesh_pos.append(item)
collated_batch["mesh_pos"] = torch.concat(mesh_pos)
# to sparse tensor: batch_size * (num_grid_points, ndim) -> (batch_size * num_grid_points, ndim)
grid_pos = []
grid_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="grid_pos")
grid_lens.append(len(item))
grid_pos.append(item)
collated_batch["grid_pos"] = torch.concat(grid_pos)
# to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
query_pos = []
query_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="query_pos")
query_lens.append(len(item))
query_pos.append(item)
collated_batch["query_pos"] = torch.concat(query_pos)
# sparse mesh_to_grid_edges: batch_size * (num_points, ndim) -> (batch_size * num_points, ndim)
mesh_to_grid_edges = []
mesh_offset = 0
grid_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_to_grid_edges")
idx[:, 0] += grid_offset
idx[:, 1] += mesh_offset
mesh_to_grid_edges.append(idx)
mesh_offset += mesh_lens[i]
grid_offset += grid_lens[i]
collated_batch["mesh_to_grid_edges"] = torch.concat(mesh_to_grid_edges)
# sparse grid_to_query_edges: batch_size * (num_points, ndim) -> (batch_size * num_points, ndim)
grid_to_query_edges = []
mesh_offset = 0
grid_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="grid_to_query_edges")
idx[:, 0] += mesh_offset
idx[:, 1] += grid_offset
grid_to_query_edges.append(idx)
mesh_offset += mesh_lens[i]
grid_offset += grid_lens[i]
collated_batch["grid_to_query_edges"] = torch.concat(grid_to_query_edges)
# to sparse tensor: batch_size * (num_mesh_points,) -> (batch_size * num_mesh_points, 1)
pressure = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="pressure")
assert len(item) == query_lens[i]
pressure.append(item)
collated_batch["pressure"] = torch.concat(pressure).unsqueeze(1)
# create batch_idx tensor
batch_idx = torch.empty(sum(query_lens), dtype=torch.long)
start = 0
cur_batch_idx = 0
for i in range(len(query_lens)):
end = start + query_lens[i]
batch_idx[start:end] = cur_batch_idx
start = end
cur_batch_idx += 1
ctx["batch_idx"] = batch_idx
# create query_batch_idx tensor (required for test loss)
query_batch_idx = torch.empty(sum(query_lens), dtype=torch.long)
start = 0
cur_query_batch_idx = 0
for i in range(len(query_lens)):
end = start + query_lens[i]
query_batch_idx[start:end] = cur_query_batch_idx
start = end
cur_query_batch_idx += 1
ctx["query_batch_idx"] = query_batch_idx
# normal collation for other properties (timestep, velocity, geometry2d)
result = []
for item in dataset_mode.split(" "):
if item in collated_batch:
result.append(collated_batch[item])
else:
result.append(
default_collate([
ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=item)
for sample in batch
])
)
return tuple(result), ctx
@property
def default_collate_mode(self):
raise RuntimeError
def __call__(self, batch):
raise NotImplementedError("wrap KDSingleCollator with KDSingleCollatorWrapper")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/rans_simformer_nognn_collator.py | src/datasets/collators/rans_simformer_nognn_collator.py | import torch
from kappadata.collators import KDSingleCollator
from kappadata.wrappers import ModeWrapper
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import default_collate
class RansSimformerNognnCollator(KDSingleCollator):
def collate(self, batch, dataset_mode, ctx=None):
# make sure that batch was not collated
assert isinstance(batch, (tuple, list)) and isinstance(batch[0], tuple)
batch, ctx = zip(*batch)
# properties in context can have variable shapes (e.g. perm) -> delete ctx
ctx = {}
# dict to hold collated items
collated_batch = {}
# sparse mesh_pos: batch_size * (num_points, ndim) -> (batch_size * num_points, ndim)
mesh_pos = []
mesh_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_pos")
mesh_lens.append(len(item))
mesh_pos.append(item)
collated_batch["mesh_pos"] = torch.concat(mesh_pos)
# dense_query_pos: batch_size * (num_points, ndim) -> (batch_size, max_num_points, ndim)
# sparse target (decoder output is converted to sparse format before loss)
pressures = [ModeWrapper.get_item(mode=dataset_mode, batch=sample, item="pressure") for sample in batch]
# predict all positions -> pad
query_pos = []
query_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="query_pos")
assert len(item) == len(pressures[i])
query_lens.append(len(item))
query_pos.append(item)
collated_batch["query_pos"] = pad_sequence(query_pos, batch_first=True)
collated_batch["pressure"] = torch.concat(pressures).unsqueeze(1)
# create batch_idx tensor
batch_size = len(mesh_lens)
batch_idx = torch.empty(sum(mesh_lens), dtype=torch.long)
start = 0
cur_batch_idx = 0
for i in range(len(mesh_lens)):
end = start + mesh_lens[i]
batch_idx[start:end] = cur_batch_idx
start = end
cur_batch_idx += 1
ctx["batch_idx"] = batch_idx
# create query_batch_idx tensor (required for test loss)
query_batch_idx = torch.empty(sum(query_lens), dtype=torch.long)
start = 0
cur_query_batch_idx = 0
for i in range(len(query_lens)):
end = start + query_lens[i]
query_batch_idx[start:end] = cur_query_batch_idx
start = end
cur_query_batch_idx += 1
ctx["query_batch_idx"] = query_batch_idx
# create unbatch_idx tensors (unbatch via torch_geometrics.utils.unbatch)
# e.g. batch_size=2, num_points=[2, 3] -> unbatch_idx=[0, 0, 1, 2, 2, 2] unbatch_select=[0, 2]
# then unbatching can be done via unbatch(dense, unbatch_idx)[unbatch_select]
maxlen = max(query_lens)
unbatch_idx = torch.empty(maxlen * batch_size, dtype=torch.long)
unbatch_select = []
unbatch_start = 0
cur_unbatch_idx = 0
for i in range(len(query_lens)):
unbatch_end = unbatch_start + query_lens[i]
unbatch_idx[unbatch_start:unbatch_end] = cur_unbatch_idx
unbatch_select.append(cur_unbatch_idx)
cur_unbatch_idx += 1
unbatch_start = unbatch_end
padding = maxlen - query_lens[i]
if padding > 0:
unbatch_end = unbatch_start + padding
unbatch_idx[unbatch_start:unbatch_end] = cur_unbatch_idx
cur_unbatch_idx += 1
unbatch_start = unbatch_end
unbatch_select = torch.tensor(unbatch_select)
ctx["unbatch_idx"] = unbatch_idx
ctx["unbatch_select"] = unbatch_select
# normal collation for other properties
result = []
for item in dataset_mode.split(" "):
if item in collated_batch:
result.append(collated_batch[item])
else:
result.append(
default_collate([
ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=item)
for sample in batch
])
)
return tuple(result), ctx
@property
def default_collate_mode(self):
raise RuntimeError
def __call__(self, batch):
raise NotImplementedError("wrap KDSingleCollator with KDSingleCollatorWrapper")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/cfd_simformer_collator.py | src/datasets/collators/cfd_simformer_collator.py | import einops
import torch
from kappadata.collators import KDSingleCollator
from kappadata.wrappers import ModeWrapper
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import default_collate
class CfdSimformerCollator(KDSingleCollator):
def __init__(self, num_supernodes=None, **kwargs):
super().__init__(**kwargs)
self.num_supernodes = num_supernodes
def collate(self, batch, dataset_mode, ctx=None):
# make sure that batch was not collated
assert isinstance(batch, (tuple, list)) and isinstance(batch[0], tuple)
batch, ctx = zip(*batch)
# properties in context can have variable shapes (e.g. perm) -> delete ctx
ctx = {}
# collect collated properties
collated_batch = {}
# to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
mesh_pos = []
mesh_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_pos")
mesh_lens.append(len(item))
mesh_pos.append(item)
collated_batch["mesh_pos"] = torch.concat(mesh_pos)
# select supernodes
if self.num_supernodes is not None:
supernodes_offset = 0
supernode_idxs = []
for i in range(len(mesh_lens)):
perm = torch.randperm(len(mesh_pos[i]))[:self.num_supernodes] + supernodes_offset
supernode_idxs.append(perm)
supernodes_offset += mesh_lens[i]
ctx["supernode_idxs"] = torch.concat(supernode_idxs)
# create batch_idx tensor
batch_idx = torch.empty(sum(mesh_lens), dtype=torch.long)
start = 0
cur_batch_idx = 0
for i in range(len(mesh_lens)):
end = start + mesh_lens[i]
batch_idx[start:end] = cur_batch_idx
start = end
cur_batch_idx += 1
ctx["batch_idx"] = batch_idx
# batch_size * (num_mesh_points, num_input_timesteps * num_channels) ->
# (batch_size * num_mesh_points, num_input_timesteps * num_channels)
x = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="x")
assert len(item) == mesh_lens[i]
x.append(item)
collated_batch["x"] = torch.concat(x)
# to sparse tensor: batch_size * (num_grid_points, ndim) -> (batch_size * num_grid_points, ndim)
grid_pos = []
grid_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_pos")
grid_lens.append(len(item))
grid_pos.append(item)
collated_batch["grid_pos"] = torch.concat(grid_pos)
# create batch_idx tensor
batch_idx = torch.empty(sum(mesh_lens), dtype=torch.long)
start = 0
cur_batch_idx = 0
for i in range(len(mesh_lens)):
end = start + mesh_lens[i]
batch_idx[start:end] = cur_batch_idx
start = end
cur_batch_idx += 1
ctx["batch_idx"] = batch_idx
# query_pos to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
# target to sparse tensor: batch_size * (num_mesh_points, dim) -> (batch_size * num_mesh_points, dim)
query_pos = []
query_lens = []
target = []
for i in range(len(batch)):
query_pos_item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="query_pos")
target_item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="target")
assert len(query_pos_item) == len(target_item)
query_lens.append(len(query_pos_item))
query_pos.append(query_pos_item)
target.append(target_item)
collated_batch["query_pos"] = pad_sequence(query_pos, batch_first=True)
collated_batch["target"] = torch.concat(target)
# create unbatch_idx tensors (unbatch via torch_geometrics.utils.unbatch)
# e.g. batch_size=2, num_points=[2, 3] -> unbatch_idx=[0, 0, 1, 2, 2, 2] unbatch_select=[0, 2]
# then unbatching can be done via unbatch(dense, unbatch_idx)[unbatch_select]
batch_size = len(query_lens)
maxlen = max(query_lens)
unbatch_idx = torch.empty(maxlen * batch_size, dtype=torch.long)
unbatch_select = []
unbatch_start = 0
cur_unbatch_idx = 0
for i in range(len(query_lens)):
unbatch_end = unbatch_start + query_lens[i]
unbatch_idx[unbatch_start:unbatch_end] = cur_unbatch_idx
unbatch_select.append(cur_unbatch_idx)
cur_unbatch_idx += 1
unbatch_start = unbatch_end
padding = maxlen - query_lens[i]
if padding > 0:
unbatch_end = unbatch_start + padding
unbatch_idx[unbatch_start:unbatch_end] = cur_unbatch_idx
cur_unbatch_idx += 1
unbatch_start = unbatch_end
unbatch_select = torch.tensor(unbatch_select)
ctx["unbatch_idx"] = unbatch_idx
ctx["unbatch_select"] = unbatch_select
# sparse mesh_edges: batch_size * (num_points, ndim) -> (batch_size * num_points, ndim)
mesh_edges = []
mesh_edges_offset = 0
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_edges")
# if None -> create graph on GPU
if item is None:
break
idx = item + mesh_edges_offset
mesh_edges.append(idx)
mesh_edges_offset += mesh_lens[i]
if len(mesh_edges) > 0:
# noinspection PyTypedDict
collated_batch["mesh_edges"] = torch.concat(mesh_edges)
else:
collated_batch["mesh_edges"] = None
# normal collation for other properties (timestep, velocity, geometry2d)
result = []
for item in dataset_mode.split(" "):
if item in collated_batch:
result.append(collated_batch[item])
else:
result.append(
default_collate([
ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=item)
for sample in batch
])
)
return tuple(result), ctx
@property
def default_collate_mode(self):
raise RuntimeError
def __call__(self, batch):
raise NotImplementedError("wrap KDSingleCollator with KDSingleCollatorWrapper")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/__init__.py | src/datasets/collators/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/cfd_baseline_collator.py | src/datasets/collators/cfd_baseline_collator.py | import einops
import torch
from kappadata.collators import KDSingleCollator
from kappadata.wrappers import ModeWrapper
from torch.utils.data import default_collate
class CfdBaselineCollator(KDSingleCollator):
def collate(self, batch, dataset_mode, ctx=None):
# make sure that batch was not collated
assert isinstance(batch, (tuple, list)) and isinstance(batch[0], tuple)
batch, ctx = zip(*batch)
# properties in context can have variable shapes (e.g. perm) -> delete ctx
ctx = {}
# collect collated properties
collated_batch = {}
# to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
mesh_pos = []
mesh_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_pos")
mesh_lens.append(len(item))
mesh_pos.append(item)
collated_batch["mesh_pos"] = torch.concat(mesh_pos)
# create batch_idx tensor
batch_idx = torch.empty(sum(mesh_lens), dtype=torch.long)
start = 0
cur_batch_idx = 0
for i in range(len(mesh_lens)):
end = start + mesh_lens[i]
batch_idx[start:end] = cur_batch_idx
start = end
cur_batch_idx += 1
ctx["batch_idx"] = batch_idx
# batch_size * (num_mesh_points, num_input_timesteps * num_channels) ->
# (batch_size * num_mesh_points, num_input_timesteps * num_channels)
x = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="x")
assert len(item) == mesh_lens[i]
x.append(item)
collated_batch["x"] = torch.concat(x)
# to sparse tensor: batch_size * (num_grid_points, ndim) -> (batch_size * num_grid_points, ndim)
grid_pos = []
grid_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="grid_pos")
grid_lens.append(len(item))
grid_pos.append(item)
collated_batch["grid_pos"] = torch.concat(grid_pos)
# query_pos to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
# target to sparse tensor: batch_size * (num_mesh_points, dim) -> (batch_size * num_mesh_points, dim)
query_pos = []
query_lens = []
target = []
for i in range(len(batch)):
query_pos_item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="query_pos")
target_item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="target")
assert len(query_pos_item) == len(target_item)
query_lens.append(len(query_pos_item))
query_pos.append(query_pos_item)
target.append(target_item)
collated_batch["query_pos"] = torch.concat(query_pos)
collated_batch["target"] = torch.concat(target)
# to sparse tensor batch_size * (num_points, ndim) -> (batch_size * num_points, ndim)
mesh_to_grid_edges = []
mesh_offset = 0
grid_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_to_grid_edges")
# if None -> create graph on GPU
if idx is None:
break
idx[:, 0] += grid_offset
idx[:, 1] += mesh_offset
mesh_to_grid_edges.append(idx)
mesh_offset += mesh_lens[i]
grid_offset += grid_lens[i]
if len(mesh_to_grid_edges) > 0:
# noinspection PyTypedDict
collated_batch["mesh_to_grid_edges"] = torch.concat(mesh_to_grid_edges)
else:
collated_batch["mesh_to_grid_edges"] = None
# sparse grid_to_query_edges: batch_size * (num_points, ndim) -> (batch_size * num_points, ndim)
grid_to_query_edges = []
query_offset = 0
grid_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="grid_to_query_edges")
# if None -> create graph on GPU
if idx is None:
break
idx[:, 0] += query_offset
idx[:, 1] += grid_offset
grid_to_query_edges.append(idx)
query_offset += query_lens[i]
grid_offset += grid_lens[i]
if len(grid_to_query_edges) > 0:
# noinspection PyTypedDict
collated_batch["grid_to_query_edges"] = torch.concat(grid_to_query_edges)
else:
collated_batch["grid_to_query_edges"] = None
# normal collation for other properties (timestep, velocity, geometry2d)
result = []
for item in dataset_mode.split(" "):
if item in collated_batch:
result.append(collated_batch[item])
else:
result.append(
default_collate([
ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=item)
for sample in batch
])
)
return tuple(result), ctx
@property
def default_collate_mode(self):
raise RuntimeError
def __call__(self, batch):
raise NotImplementedError("wrap KDSingleCollator with KDSingleCollatorWrapper")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/datasets/collators/rans_gino_encdec_sdf_collator.py | src/datasets/collators/rans_gino_encdec_sdf_collator.py | import einops
import torch
from kappadata.collators import KDSingleCollator
from kappadata.wrappers import ModeWrapper
from torch.utils.data import default_collate
class RansGinoEncdecSdfCollator(KDSingleCollator):
def collate(self, batch, dataset_mode, ctx=None):
# make sure that batch was not collated
assert isinstance(batch, (tuple, list)) and isinstance(batch[0], tuple)
batch, ctx = zip(*batch)
# properties in context can have variable shapes (e.g. perm) -> delete ctx
ctx = {}
collated_batch = {}
# to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
mesh_pos = []
mesh_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_pos")
mesh_lens.append(len(item))
mesh_pos.append(item)
collated_batch["mesh_pos"] = torch.concat(mesh_pos)
# to sparse tensor: batch_size * (num_grid_points, ndim) -> (batch_size * num_grid_points, ndim)
grid_pos = []
grid_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="grid_pos")
grid_lens.append(len(item))
grid_pos.append(item)
collated_batch["grid_pos"] = torch.concat(grid_pos)
# to sparse tensor: batch_size * (num_mesh_points, ndim) -> (batch_size * num_mesh_points, ndim)
query_pos = []
query_lens = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="query_pos")
query_lens.append(len(item))
query_pos.append(item)
collated_batch["query_pos"] = torch.concat(query_pos)
# sparse mesh_to_grid_edges: batch_size * (num_points, ndim) -> (batch_size * num_points, ndim)
mesh_to_grid_edges = []
mesh_offset = 0
grid_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="mesh_to_grid_edges")
idx[:, 0] += grid_offset
idx[:, 1] += mesh_offset
mesh_to_grid_edges.append(idx)
mesh_offset += mesh_lens[i]
grid_offset += grid_lens[i]
collated_batch["mesh_to_grid_edges"] = torch.concat(mesh_to_grid_edges)
# sparse grid_to_query_edges: batch_size * (num_points, ndim) -> (batch_size * num_points, ndim)
grid_to_query_edges = []
mesh_offset = 0
grid_offset = 0
for i in range(len(batch)):
idx = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="grid_to_query_edges")
idx[:, 0] += mesh_offset
idx[:, 1] += grid_offset
grid_to_query_edges.append(idx)
mesh_offset += mesh_lens[i]
grid_offset += grid_lens[i]
collated_batch["grid_to_query_edges"] = torch.concat(grid_to_query_edges)
# to sparse tensor: batch_size * (num_mesh_points,) -> (batch_size * num_mesh_points, 1)
pressure = []
for i in range(len(batch)):
item = ModeWrapper.get_item(mode=dataset_mode, batch=batch[i], item="pressure")
assert len(item) == query_lens[i]
pressure.append(item)
collated_batch["pressure"] = torch.concat(pressure).unsqueeze(1)
# create batch_idx tensor
batch_idx = torch.empty(sum(query_lens), dtype=torch.long)
start = 0
cur_batch_idx = 0
for i in range(len(query_lens)):
end = start + query_lens[i]
batch_idx[start:end] = cur_batch_idx
start = end
cur_batch_idx += 1
ctx["batch_idx"] = batch_idx
# create query_batch_idx tensor (required for test loss)
query_batch_idx = torch.empty(sum(query_lens), dtype=torch.long)
start = 0
cur_query_batch_idx = 0
for i in range(len(query_lens)):
end = start + query_lens[i]
query_batch_idx[start:end] = cur_query_batch_idx
start = end
cur_query_batch_idx += 1
ctx["query_batch_idx"] = query_batch_idx
# normal collation for other properties (timestep, velocity, geometry2d)
result = []
for item in dataset_mode.split(" "):
if item in collated_batch:
result.append(collated_batch[item])
else:
result.append(
default_collate([
ModeWrapper.get_item(mode=dataset_mode, batch=sample, item=item)
for sample in batch
])
)
return tuple(result), ctx
@property
def default_collate_mode(self):
raise RuntimeError
def __call__(self, batch):
raise NotImplementedError("wrap KDSingleCollator with KDSingleCollatorWrapper")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/metrics/__init__.py | src/metrics/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/cfd_interpolated_trainer.py | src/trainers/cfd_interpolated_trainer.py | from functools import cached_property
import einops
from kappadata.wrappers import ModeWrapper
from torch import nn
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.cfd_interpolated_collator import CfdInterpolatedCollator
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
class CfdInterpolatedTrainer(SgdTrainer):
def __init__(self, loss_function, max_batch_size=None, **kwargs):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
assert dataset.root_dataset.num_query_points is not None
assert isinstance(collator.collator, CfdInterpolatedCollator)
input_shape = dataset.getshape_x()
self.logger.info(f"input_shape: {input_shape}")
return input_shape
@cached_property
def output_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="target")
assert isinstance(collator.collator, CfdInterpolatedCollator)
output_shape = dataset.getshape_target()
self.logger.info(f"output_shape: {output_shape}")
return output_shape
@cached_property
def dataset_mode(self):
return "interpolated query_pos timestep velocity target"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def to_device(self, item, batch, dataset_mode):
data = ModeWrapper.get_item(mode=dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch, dataset_mode=None):
dataset_mode = dataset_mode or self.trainer.dataset_mode
batch, ctx = batch
data = dict(
x=self.to_device(item="interpolated", batch=batch, dataset_mode=dataset_mode),
query_pos=self.to_device(item="query_pos", batch=batch, dataset_mode=dataset_mode),
timestep=self.to_device(item="timestep", batch=batch, dataset_mode=dataset_mode),
velocity=self.to_device(item="velocity", batch=batch, dataset_mode=dataset_mode),
target=self.to_device(item="target", batch=batch, dataset_mode=dataset_mode),
)
return data
def forward(self, batch, reduction="mean"):
data = self.prepare(batch)
target = data.pop("target")
# forward pass
model_outputs = self.model(**data)
losses = dict(
x_hat=self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction=reduction,
),
)
if reduction == "mean_per_sample":
raise NotImplementedError("reduce with query_batch_idx")
return dict(total=losses["x_hat"], **losses), {}
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/rans_gino_encdec_sdf_trainer.py | src/trainers/rans_gino_encdec_sdf_trainer.py | from functools import cached_property
import torch
from kappadata.wrappers import ModeWrapper
from torch import nn
from torch_scatter import segment_csr
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.rans_gino_encdec_sdf_collator import RansGinoEncdecSdfCollator
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
class RansGinoEncdecSdfTrainer(SgdTrainer):
def __init__(self, loss_function, max_batch_size=None, **kwargs):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
def get_trainer_callbacks(self, model=None):
return [
UpdateOutputCallback(
keys=["degree/input"],
every_n_updates=self.track_every_n_updates,
every_n_samples=self.track_every_n_samples,
**self.get_default_callback_kwargs(),
),
UpdateOutputCallback(
keys=["degree/input"],
**self.get_default_callback_intervals(),
**self.get_default_callback_kwargs(),
),
]
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="mesh_pos")
assert isinstance(collator.collator, RansGinoEncdecSdfCollator)
mesh_pos, _ = dataset[0]
# mesh_pos has shape (num_points, ndim)
assert mesh_pos.ndim == 2 and 2 <= mesh_pos.size(1) <= 3
return None, mesh_pos.size(1)
@cached_property
def output_shape(self):
# pressure is predicted
return None, 1
@cached_property
def dataset_mode(self):
return "pressure mesh_pos sdf grid_pos query_pos mesh_to_grid_edges grid_to_query_edges"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def to_device(self, item, batch):
data = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch):
batch, ctx = batch
return dict(
mesh_pos=self.to_device(item="mesh_pos", batch=batch),
sdf=self.to_device(item="sdf", batch=batch),
grid_pos=self.to_device(item="grid_pos", batch=batch),
query_pos=self.to_device(item="query_pos", batch=batch),
mesh_to_grid_edges=self.to_device(item="mesh_to_grid_edges", batch=batch),
grid_to_query_edges=self.to_device(item="grid_to_query_edges", batch=batch),
target=self.to_device(item="pressure", batch=batch),
)
def forward(self, batch, reduction="mean"):
data = self.prepare(batch)
target = data.pop("target")
# forward pass
model_outputs = self.model(**data)
loss = self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction=reduction,
)
# accumulate losses of points
if reduction == "mean_per_sample":
_, ctx = batch
query_batch_idx = ctx["query_batch_idx"].to(self.model.device, non_blocking=True)
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
indices, counts = query_batch_idx.unique(return_counts=True)
# first index has to be 0
padded_counts = torch.zeros(len(indices) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
loss = segment_csr(src=loss, indptr=indptr, reduce="mean")
# calculate degree of graph (average number of connections p)
infos = {
"degree/input": len(data["mesh_to_grid_edges"]) / len(data["grid_pos"]),
"degree/output": len(data["grid_to_query_edges"]) / len(target),
}
return dict(total=loss, x_hat=loss), infos
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/rans_simformer_nognn_sdf_trainer.py | src/trainers/rans_simformer_nognn_sdf_trainer.py | from functools import cached_property
import torch
from kappadata.wrappers import ModeWrapper
from torch import nn
from torch_scatter import segment_csr
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.rans_simformer_nognn_collator import RansSimformerNognnCollator
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
class RansSimformerNognnSdfTrainer(SgdTrainer):
def __init__(self, loss_function, max_batch_size=None, **kwargs):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="mesh_pos")
assert isinstance(collator.collator, RansSimformerNognnCollator)
mesh_pos, _ = dataset[0]
# mesh_pos has shape (num_points, ndim)
assert mesh_pos.ndim == 2 and 2 <= mesh_pos.size(1) <= 3
return None, mesh_pos.size(1)
@cached_property
def output_shape(self):
# pressure is predicted
return None, 1
@cached_property
def dataset_mode(self):
return "pressure mesh_pos sdf query_pos"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def to_device(self, item, batch):
data = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch):
batch, ctx = batch
return dict(
mesh_pos=self.to_device(item="mesh_pos", batch=batch),
sdf=self.to_device(item="sdf", batch=batch),
query_pos=self.to_device(item="query_pos", batch=batch),
batch_idx=ctx["batch_idx"].to(self.model.device, non_blocking=True),
unbatch_idx=ctx["unbatch_idx"].to(self.model.device, non_blocking=True),
unbatch_select=ctx["unbatch_select"].to(self.model.device, non_blocking=True),
target=self.to_device(item="pressure", batch=batch),
)
def forward(self, batch, reduction="mean"):
data = self.prepare(batch)
target = data.pop("target")
# forward pass
model_outputs = self.model(**data)
loss = self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction=reduction,
)
# accumulate losses of points
if reduction == "mean_per_sample":
_, ctx = batch
query_batch_idx = ctx["query_batch_idx"].to(self.model.device, non_blocking=True)
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
indices, counts = query_batch_idx.unique(return_counts=True)
# first index has to be 0
padded_counts = torch.zeros(len(indices) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
loss = segment_csr(src=loss, indptr=indptr, reduce="mean")
return dict(total=loss, x_hat=loss), {}
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/gns_trainer.py | src/trainers/gns_trainer.py | import kappamodules.utils.tensor_cache as tc
import os
from functools import cached_property
import torch
import einops
from torch import nn
from kappadata.wrappers import ModeWrapper
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
from datasets.collators.lagrangian_simformer_collator import LagrangianSimformerCollator
from functools import cached_property
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
class GnsTrainer(SgdTrainer):
def __init__(
self,
loss_function,
forward_kwargs=None,
max_batch_size=None,
**kwargs
):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
self.forward_kwargs = forward_kwargs or {}
def get_trainer_callbacks(self, model=None):
return [
UpdateOutputCallback(
keys=["degree/input"],
every_n_updates=self.track_every_n_updates,
every_n_samples=self.track_every_n_samples,
**self.get_default_callback_kwargs(),
),
UpdateOutputCallback(
keys=["degree/input"],
**self.get_default_callback_intervals(),
**self.get_default_callback_kwargs(),
),
]
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
sample, _ = dataset[0]
# num_input_timesteps are concated along channel dimension
input_shape = list(sample.shape[1:])
input_shape[0] *= sample.size(0)
if collator is not None:
assert isinstance(collator.collator, LagrangianSimformerCollator)
assert len(input_shape) == 2
input_shape[1] = None
self.logger.info(f"input_shape: {tuple(input_shape)}")
return tuple(input_shape)
@cached_property
def output_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="target_acc")
sample, _ = dataset[0]
output_shape = list(sample.shape)[::-1]
if collator is not None:
assert isinstance(collator.collator, LagrangianSimformerCollator)
assert len(output_shape) == 2
output_shape[1] = None
self.logger.info(f"output_shape: {tuple(output_shape)}")
return tuple(output_shape)
@cached_property
def dataset_mode(self):
return "x curr_pos curr_pos_full edge_index timestep target_acc target_pos prev_pos prev_acc edge_features"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def forward(self, batch, reduction="mean"):
# prepare data
batch, ctx = batch
x = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="x", batch=batch)
x = x.to(self.model.device, non_blocking=True)
timestep = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="timestep", batch=batch)
timestep = timestep.to(self.model.device, non_blocking=True)
target_acc = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="target_acc", batch=batch)
target_acc = target_acc.to(self.model.device, non_blocking=True)
edge_features = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="edge_features", batch=batch)
edge_features = edge_features.to(self.model.device, non_blocking=True)
curr_pos = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="curr_pos", batch=batch)
curr_pos = curr_pos.to(self.model.device, non_blocking=True)
curr_pos_full = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="curr_pos_full", batch=batch)
curr_pos_full = curr_pos_full.to(self.model.device, non_blocking=True)
prev_pos = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="prev_pos", batch=batch)
prev_pos = prev_pos.to(self.model.device, non_blocking=True)
prev_acc = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="prev_acc", batch=batch)
prev_acc = prev_acc.to(self.model.device, non_blocking=True)
edge_index = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="edge_index", batch=batch)
edge_index = edge_index.to(self.model.device, non_blocking=True)
batch_idx = ctx["batch_idx"].to(self.model.device, non_blocking=True)
unbatch_idx = ctx["unbatch_idx"].to(self.model.device, non_blocking=True)
unbatch_select = ctx["unbatch_select"].to(self.model.device, non_blocking=True)
# inputs are the velocities of all timesteps
x = einops.rearrange(
x,
"bs num_input_timesteps num_points -> bs (num_input_timesteps num_points)",
)
target_acc = einops.rearrange(
target_acc,
"bs n_particles n_dim -> (bs n_particles) n_dim",
)
prev_acc = einops.rearrange(
prev_acc,
"bs n_particles n_dim -> (bs n_particles) n_dim",
)
# forward pass
model_outputs = self.model(
x,
timestep=timestep,
curr_pos=curr_pos,
curr_pos_decode=curr_pos_full,
prev_pos_decode=prev_pos,
edge_index=edge_index,
edge_features=edge_features,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
**self.trainer.forward_kwargs,
)
# next timestep loss
losses = dict(
a_hat=self.trainer.loss_function(
prediction=model_outputs,
target=target_acc,
reduction=reduction,
),
)
# weight losses
total_loss = losses["a_hat"]
infos = {
# calculate degree of graph (average number of connections p)
"degree/input": len(edge_index) / len(x)
}
return dict(total=total_loss, **losses), infos
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/cfd_baseline_trainer.py | src/trainers/cfd_baseline_trainer.py | from torch_scatter import segment_csr
import torch
from functools import cached_property
import einops
from kappadata.wrappers import ModeWrapper
from torch import nn
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.cfd_baseline_collator import CfdBaselineCollator
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
from torch_geometric.nn.pool import radius
class CfdBaselineTrainer(SgdTrainer):
def __init__(
self,
loss_function,
radius_graph_r=None,
radius_graph_max_num_neighbors=None,
max_batch_size=None,
**kwargs
):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.radius_graph_r = radius_graph_r
self.radius_graph_max_num_neighbors = radius_graph_max_num_neighbors
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
def get_trainer_callbacks(self, model=None):
return [
UpdateOutputCallback(
keys=["degree/input", "degree/output"],
every_n_updates=self.track_every_n_updates,
every_n_samples=self.track_every_n_samples,
**self.get_default_callback_kwargs(),
),
UpdateOutputCallback(
keys=["degree/input", "degree/output"],
**self.get_default_callback_intervals(),
**self.get_default_callback_kwargs(),
),
]
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
assert dataset.root_dataset.num_query_points is not None
assert isinstance(collator.collator, CfdBaselineCollator)
input_shape = dataset.getshape_x()
self.logger.info(f"input_shape: {input_shape}")
return input_shape
@cached_property
def output_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="target")
assert isinstance(collator.collator, CfdBaselineCollator)
output_shape = dataset.getshape_target()
self.logger.info(f"output_shape: {output_shape}")
return output_shape
@cached_property
def dataset_mode(self):
return "x mesh_pos grid_pos query_pos mesh_to_grid_edges grid_to_query_edges timestep velocity target"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def to_device(self, item, batch, dataset_mode):
data = ModeWrapper.get_item(mode=dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch, dataset_mode=None):
dataset_mode = dataset_mode or self.trainer.dataset_mode
batch, ctx = batch
batch_idx = ctx["batch_idx"].to(self.model.device, non_blocking=True)
data = dict(
x=self.to_device(item="x", batch=batch, dataset_mode=dataset_mode),
mesh_pos=self.to_device(item="mesh_pos", batch=batch, dataset_mode=dataset_mode),
grid_pos=self.to_device(item="grid_pos", batch=batch, dataset_mode=dataset_mode),
query_pos=self.to_device(item="query_pos", batch=batch, dataset_mode=dataset_mode),
timestep=self.to_device(item="timestep", batch=batch, dataset_mode=dataset_mode),
velocity=self.to_device(item="velocity", batch=batch, dataset_mode=dataset_mode),
target=self.to_device(item="target", batch=batch, dataset_mode=dataset_mode),
batch_idx=batch_idx,
)
mesh_to_grid_edges = ModeWrapper.get_item(item="mesh_to_grid_edges", batch=batch, mode=dataset_mode)
grid_to_query_edges = ModeWrapper.get_item(item="grid_to_query_edges", batch=batch, mode=dataset_mode)
batch_size = len(data["timestep"])
if mesh_to_grid_edges is None or grid_to_query_edges is None:
assert len(data["grid_pos"]) % batch_size == 0
num_grid_points = len(data["grid_pos"]) // batch_size
grid_batch_idx = torch.arange(batch_size, device=self.model.device).repeat_interleave(num_grid_points)
else:
grid_batch_idx = None
# mesh_to_grid_edges
if mesh_to_grid_edges is None:
# create on GPU
assert self.trainer.radius_graph_r is not None
assert self.trainer.radius_graph_max_num_neighbors is not None
mesh_to_grid_edges = radius(
x=data["mesh_pos"],
y=data["grid_pos"],
batch_x=batch_idx,
batch_y=grid_batch_idx,
r=self.trainer.radius_graph_r,
max_num_neighbors=self.trainer.radius_graph_max_num_neighbors,
).T
else:
assert self.trainer.radius_graph_r is None
assert self.trainer.radius_graph_max_num_neighbors is None
mesh_to_grid_edges = mesh_to_grid_edges.to(self.model.device, non_blocking=True)
data["mesh_to_grid_edges"] = mesh_to_grid_edges
# grid_to_query_edges
if grid_to_query_edges is None:
# create on GPU
assert self.trainer.radius_graph_r is not None
assert self.trainer.radius_graph_max_num_neighbors is not None
assert len(data["query_pos"]) % batch_size == 0
num_query_pos = len(data["query_pos"]) // batch_size
query_batch_idx = torch.arange(batch_size, device=self.model.device).repeat_interleave(num_query_pos)
grid_to_query_edges = radius(
x=data["grid_pos"],
y=data["query_pos"],
batch_x=grid_batch_idx,
batch_y=query_batch_idx,
r=self.trainer.radius_graph_r,
max_num_neighbors=self.trainer.radius_graph_max_num_neighbors,
).T
else:
assert self.trainer.radius_graph_r is None
assert self.trainer.radius_graph_max_num_neighbors is None
grid_to_query_edges = grid_to_query_edges.to(self.model.device, non_blocking=True)
data["grid_to_query_edges"] = grid_to_query_edges
return data
def forward(self, batch, reduction="mean"):
data = self.prepare(batch)
target = data.pop("target")
# forward pass
model_outputs = self.model(**data)
x_hat_loss = self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction="none",
)
losses = {}
if reduction == "mean":
losses["x_hat"] = x_hat_loss.mean()
if reduction == "mean_per_sample":
batch_index = data["batch_idx"]
batch_size = batch_index.max() + 1
#num_zero_pos = (data["query_pos"] == 0).sum()
#assert num_zero_pos == 0, f"padded query_pos not supported {num_zero_pos}"
query_pos_len = data["query_pos"].size(1)
query_batch_idx = torch.arange(batch_size, device=self.model.device).repeat_interleave(query_pos_len)
#query_batch_idx = ctx["query_batch_idx"].to(self.model.device, non_blocking=True)
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
indices, counts = query_batch_idx.unique(return_counts=True)
# first index has to be 0
padded_counts = torch.zeros(len(indices) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
losses["x_hat"] = segment_csr(src=x_hat_loss.mean(dim=1), indptr=indptr, reduce="mean")
# infos
infos = {
"degree/input": len(data["mesh_to_grid_edges"]) / len(data["grid_pos"]),
"degree/output": len(data["grid_to_query_edges"]) / len(target),
}
return dict(total=losses["x_hat"], **losses), infos
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/lagrangian_simformer_trainer.py | src/trainers/lagrangian_simformer_trainer.py | import kappamodules.utils.tensor_cache as tc
import os
from functools import cached_property
import torch
import einops
from torch import nn
from kappadata.wrappers import ModeWrapper
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
from datasets.collators.lagrangian_simformer_collator import LagrangianSimformerCollator
from functools import cached_property
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
class LagrangianSimformerTrainer(SgdTrainer):
def __init__(
self,
loss_function,
forward_kwargs=None,
max_batch_size=None,
**kwargs
):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
self.forward_kwargs = forward_kwargs or {}
def get_trainer_callbacks(self, model=None):
return [
UpdateOutputCallback(
keys=["degree/input"],
every_n_updates=self.track_every_n_updates,
every_n_samples=self.track_every_n_samples,
**self.get_default_callback_kwargs(),
),
UpdateOutputCallback(
keys=["degree/input"],
**self.get_default_callback_intervals(),
**self.get_default_callback_kwargs(),
),
]
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
sample, _ = dataset[0]
# num_input_timesteps are concated along channel dimension
input_shape = list(sample.shape[1:])
input_shape[0] *= sample.size(0)
if collator is not None:
assert isinstance(collator.collator, LagrangianSimformerCollator)
assert len(input_shape) == 2
input_shape[1] = None
self.logger.info(f"input_shape: {tuple(input_shape)}")
return tuple(input_shape)
@cached_property
def output_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="target_acc")
sample, _ = dataset[0]
output_shape = list(sample.shape)[::-1]
if collator is not None:
assert isinstance(collator.collator, LagrangianSimformerCollator)
assert len(output_shape) == 2
output_shape[1] = None
self.logger.info(f"output_shape: {tuple(output_shape)}")
return tuple(output_shape)
@cached_property
def dataset_mode(self):
return "x curr_pos curr_pos_full edge_index edge_index_target timestep target_acc target_pos prev_pos prev_acc target_pos_encode perm target_vel"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def forward(self, batch, reduction="mean"):
# prepare data
batch, ctx = batch
x = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="x", batch=batch)
x = x.to(self.model.device, non_blocking=True)
timestep = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="timestep", batch=batch)
timestep = timestep.to(self.model.device, non_blocking=True)
target_acc = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="target_acc", batch=batch)
target_acc = target_acc.to(self.model.device, non_blocking=True)
target_vel = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="target_vel", batch=batch)
target_vel = target_vel.to(self.model.device, non_blocking=True)
curr_pos = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="curr_pos", batch=batch)
curr_pos = curr_pos.to(self.model.device, non_blocking=True)
curr_pos_full = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="curr_pos_full", batch=batch)
curr_pos_full = curr_pos_full.to(self.model.device, non_blocking=True)
prev_pos = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="prev_pos", batch=batch)
prev_pos = prev_pos.to(self.model.device, non_blocking=True)
prev_acc = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="prev_acc", batch=batch)
prev_acc = prev_acc.to(self.model.device, non_blocking=True)
edge_index = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="edge_index", batch=batch)
edge_index = edge_index.to(self.model.device, non_blocking=True)
batch_idx = ctx["batch_idx"].to(self.model.device, non_blocking=True)
unbatch_idx = ctx["unbatch_idx"].to(self.model.device, non_blocking=True)
unbatch_select = ctx["unbatch_select"].to(self.model.device, non_blocking=True)
target_pos_encode = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="target_pos_encode", batch=batch)
target_pos_encode = target_pos_encode.to(self.model.device, non_blocking=True)
perm = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="perm", batch=batch)
perm = perm.to(self.model.device, non_blocking=True)
edge_index_target = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="edge_index_target", batch=batch)
edge_index_target = edge_index_target.to(self.model.device, non_blocking=True)
# Flatten input
x = einops.rearrange(
x,
"bs num_input_timesteps num_points -> bs (num_input_timesteps num_points)",
)
# Targets are predicted for all particles of all batches simultaneously
target_acc = einops.rearrange(
target_acc,
"bs n_particles n_dim -> (bs n_particles) n_dim",
)
target_vel = einops.rearrange(
target_vel,
"bs n_particles n_dim -> (bs n_particles) n_dim",
)
prev_acc = einops.rearrange(
prev_acc,
"bs n_particles n_dim -> (bs n_particles) n_dim",
)
# forward pass
model_outputs = self.model(
x,
timestep=timestep,
curr_pos=curr_pos,
curr_pos_decode=curr_pos_full,
prev_pos_decode=prev_pos,
edge_index=edge_index,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
edge_index_target = edge_index_target,
target_pos_encode=target_pos_encode,
perm_batch=perm,
**self.trainer.forward_kwargs,
)
# Target is either next velocity or next acceleration
if self.trainer.forward_kwargs['predict_velocity']:
target = target_vel
else:
target = target_acc
# next timestep loss
losses = dict(
target=self.trainer.loss_function(
prediction=model_outputs["target"],
target=target,
reduction=reduction,
),
)
# input_reconstruction losses
if "prev_target" in model_outputs:
prev_target_loss = self.trainer.loss_function(
prediction=model_outputs["prev_target"],
target=prev_acc,
reduction=reduction,
)
losses["prev_target"] = prev_target_loss
if "pred_dynamics" in model_outputs:
pred_dynamics_loss = self.trainer.loss_function(
prediction=model_outputs["pred_dynamics"],
target=model_outputs["dynamics"],
reduction=reduction,
)
losses["pred_dynamics"] = pred_dynamics_loss
# weight losses
total_loss = losses["target"]
if "prev_target" in losses:
total_loss = total_loss + losses["prev_target"]
if "pred_dynamics" in losses:
total_loss = total_loss + losses["pred_dynamics"]
infos = {
# calculate degree of graph (average number of connections p)
"degree/input": len(edge_index) / len(x)
}
return dict(total=total_loss, **losses), infos
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/rans_baseline_trainer.py | src/trainers/rans_baseline_trainer.py | from functools import cached_property
import torch
from kappadata.wrappers import ModeWrapper
from torch import nn
from torch_scatter import segment_csr
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.rans_baseline_collator import RansBaselineCollator
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
class RansBaselineTrainer(SgdTrainer):
def __init__(
self,
loss_function,
max_batch_size=None,
**kwargs
):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
def get_trainer_callbacks(self, model=None):
return [
UpdateOutputCallback(
keys=["degree/input"],
every_n_updates=self.track_every_n_updates,
every_n_samples=self.track_every_n_samples,
**self.get_default_callback_kwargs(),
),
UpdateOutputCallback(
keys=["degree/input"],
**self.get_default_callback_intervals(),
**self.get_default_callback_kwargs(),
),
]
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="mesh_pos")
assert isinstance(collator.collator, RansBaselineCollator)
mesh_pos, _ = dataset[0]
# mesh_pos has shape (num_points, ndim)
assert mesh_pos.ndim == 2 and 2 <= mesh_pos.size(1) <= 3
return None, mesh_pos.size(1)
@cached_property
def output_shape(self):
# pressure is predicted
return None, 1
@cached_property
def dataset_mode(self):
return "pressure mesh_pos grid_pos query_pos mesh_to_grid_edges grid_to_query_edges"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def to_device(self, item, batch):
data = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch):
batch, ctx = batch
return dict(
mesh_pos=self.to_device(item="mesh_pos", batch=batch),
grid_pos=self.to_device(item="grid_pos", batch=batch),
query_pos=self.to_device(item="query_pos", batch=batch),
mesh_to_grid_edges=self.to_device(item="mesh_to_grid_edges", batch=batch),
grid_to_query_edges=self.to_device(item="grid_to_query_edges", batch=batch),
target=self.to_device(item="pressure", batch=batch),
)
def forward(self, batch, reduction="mean"):
data = self.prepare(batch)
target = data.pop("target")
# forward pass
model_outputs = self.model(**data)
loss = self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction=reduction,
)
# accumulate losses of points
if reduction == "mean_per_sample":
_, ctx = batch
query_batch_idx = ctx["query_batch_idx"].to(self.model.device, non_blocking=True)
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
indices, counts = query_batch_idx.unique(return_counts=True)
# first index has to be 0
padded_counts = torch.zeros(len(indices) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
loss = segment_csr(src=loss, indptr=indptr, reduce="mean")
# calculate degree of graph (average number of connections p)
infos = {
"degree/input": len(data["mesh_to_grid_edges"]) / len(data["grid_pos"]),
"degree/output": len(data["grid_to_query_edges"]) / len(target),
}
return dict(total=loss, x_hat=loss), infos
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/rans_interpolated_trainer.py | src/trainers/rans_interpolated_trainer.py | from functools import cached_property
import torch
from kappadata.wrappers import ModeWrapper
from torch import nn
from torch_scatter import segment_csr
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.rans_interpolated_collator import RansInterpolatedCollator
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
class RansInterpolatedTrainer(SgdTrainer):
def __init__(
self,
loss_function,
use_sdf=True,
max_batch_size=None,
**kwargs
):
# has to be before super() because dataset_mode uses it
self.use_sdf = use_sdf
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
@cached_property
def input_shape(self):
mode = "sdf" if self.use_sdf else "interpolated"
dataset, collator = self.data_container.get_dataset("train", mode=mode)
assert isinstance(collator.collator, RansInterpolatedCollator)
sdf, _ = dataset[0]
# sdf has shape (*resolution, dim) -> omit resolution dims to be consistent with mesh input shapes
return None, sdf.size(-1)
@cached_property
def output_shape(self):
# pressure is predicted
return None, 1
@cached_property
def dataset_mode(self):
return f"{'sdf' if self.use_sdf else 'interpolated'} pressure query_pos"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def to_device(self, item, batch):
data = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch):
batch, ctx = batch
return dict(
x=self.to_device(item="sdf" if self.trainer.use_sdf else "interpolated", batch=batch),
query_pos=self.to_device(item="query_pos", batch=batch),
target=self.to_device(item="pressure", batch=batch),
)
def forward(self, batch, reduction="mean"):
data = self.prepare(batch)
target = data.pop("target")
# forward pass
model_outputs = self.model(**data)
loss = self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction=reduction,
)
# accumulate losses of points
if reduction == "mean_per_sample":
_, ctx = batch
query_batch_idx = ctx["query_batch_idx"].to(self.model.device, non_blocking=True)
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
indices, counts = query_batch_idx.unique(return_counts=True)
# first index has to be 0
padded_counts = torch.zeros(len(indices) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
loss = segment_csr(src=loss, indptr=indptr, reduce="mean")
return dict(total=loss, x_hat=loss), {}
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/cfd_simformer_trainer.py | src/trainers/cfd_simformer_trainer.py | from functools import cached_property
import kappamodules.utils.tensor_cache as tc
import torch
from kappadata.wrappers import ModeWrapper
from torch import nn
from torch_geometric.nn.pool import radius_graph
from torch_scatter import segment_csr
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.cfd_simformer_collator import CfdSimformerCollator
from losses import loss_fn_from_kwargs
from utils.checkpoint import Checkpoint
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
class CfdSimformerTrainer(SgdTrainer):
def __init__(
self,
loss_function,
detach_reconstructions=False,
reconstruct_from_target=False,
reconstruct_prev_x_weight=0,
reconstruct_dynamics_weight=0,
radius_graph_r=None,
radius_graph_max_num_neighbors=None,
max_batch_size=None,
mask_loss_start_checkpoint=None,
mask_loss_threshold=None,
**kwargs
):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
self.detach_reconstructions = detach_reconstructions
self.reconstruct_from_target = reconstruct_from_target
self.reconstruct_prev_x_weight = reconstruct_prev_x_weight
self.reconstruct_dynamics_weight = reconstruct_dynamics_weight
self.radius_graph_r = radius_graph_r
self.radius_graph_max_num_neighbors = radius_graph_max_num_neighbors
self.mask_loss_start_checkpoint = create(mask_loss_start_checkpoint, Checkpoint)
if self.mask_loss_start_checkpoint is not None:
assert self.mask_loss_start_checkpoint.is_minimally_specified
self.mask_loss_start_checkpoint = self.mask_loss_start_checkpoint.to_fully_specified(
updates_per_epoch=self.update_counter.updates_per_epoch,
effective_batch_size=self.update_counter.effective_batch_size,
)
self.mask_loss_threshold = mask_loss_threshold
self.num_supernodes = None
def get_trainer_callbacks(self, model=None):
keys = ["degree/input"]
patterns = ["loss_stats", "tensor_stats"]
return [
UpdateOutputCallback(
keys=keys,
patterns=patterns,
every_n_updates=self.track_every_n_updates,
every_n_samples=self.track_every_n_samples,
**self.get_default_callback_kwargs(),
),
UpdateOutputCallback(
keys=keys,
patterns=patterns,
**self.get_default_callback_intervals(),
**self.get_default_callback_kwargs(),
),
]
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
assert isinstance(collator.collator, CfdSimformerCollator)
self.num_supernodes = collator.collator.num_supernodes
input_shape = dataset.getshape_x()
self.logger.info(f"input_shape: {input_shape}")
if self.reconstruct_prev_x_weight > 0 or self.reconstruct_dynamics_weight > 0:
# make sure query is coupled with input
assert dataset.couple_query_with_input
else:
if self.end_checkpoint.is_zero:
# eval run -> doesnt matter
pass
else:
# check that num_query_points is used if no latent rollout losses are used
# there is no reason not to use it without reconstruction losses
assert dataset.root_dataset.num_query_points is not None
return input_shape
@cached_property
def output_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
assert isinstance(collator.collator, CfdSimformerCollator)
output_shape = dataset.getshape_target()
self.logger.info(f"output_shape: {output_shape}")
return output_shape
@cached_property
def dataset_mode(self):
return "x mesh_pos query_pos mesh_edges geometry2d timestep velocity target"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
# self.counter = 0
def to_device(self, item, batch, dataset_mode):
data = ModeWrapper.get_item(mode=dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch, dataset_mode=None):
dataset_mode = dataset_mode or self.trainer.dataset_mode
batch, ctx = batch
mesh_pos = self.to_device(item="mesh_pos", batch=batch, dataset_mode=dataset_mode)
batch_idx = ctx["batch_idx"].to(self.model.device, non_blocking=True)
data = dict(
x=self.to_device(item="x", batch=batch, dataset_mode=dataset_mode),
geometry2d=self.to_device(item="geometry2d", batch=batch, dataset_mode=dataset_mode),
timestep=self.to_device(item="timestep", batch=batch, dataset_mode=dataset_mode),
velocity=self.to_device(item="velocity", batch=batch, dataset_mode=dataset_mode),
query_pos=self.to_device(item="query_pos", batch=batch, dataset_mode=dataset_mode),
mesh_pos=mesh_pos,
batch_idx=batch_idx,
unbatch_idx=ctx["unbatch_idx"].to(self.model.device, non_blocking=True),
unbatch_select=ctx["unbatch_select"].to(self.model.device, non_blocking=True),
target=self.to_device(item="target", batch=batch, dataset_mode=dataset_mode),
)
mesh_edges = ModeWrapper.get_item(item="mesh_edges", batch=batch, mode=dataset_mode)
if mesh_edges is None:
# create mesh edges on GPU
assert self.trainer.radius_graph_r is not None
assert self.trainer.radius_graph_max_num_neighbors is not None
if self.trainer.num_supernodes is None:
# normal flow direction
flow = "source_to_target"
supernode_idxs = None
else:
# inverted flow direction is required to have sorted dst_indices
flow = "target_to_source"
supernode_idxs = ctx["supernode_idxs"].to(self.model.device, non_blocking=True)
mesh_edges = radius_graph(
x=mesh_pos,
r=self.trainer.radius_graph_r,
max_num_neighbors=self.trainer.radius_graph_max_num_neighbors,
batch=batch_idx,
loop=True,
flow=flow,
)
if supernode_idxs is not None:
is_supernode_edge = torch.isin(mesh_edges[0], supernode_idxs)
mesh_edges = mesh_edges[:, is_supernode_edge]
mesh_edges = mesh_edges.T
else:
assert self.trainer.radius_graph_r is None
assert self.trainer.radius_graph_max_num_neighbors is None
assert self.trainer.num_supernodes is None
mesh_edges = mesh_edges.to(self.model.device, non_blocking=True)
data["mesh_edges"] = mesh_edges
return data
def forward(self, batch, reduction="mean"):
data = self.prepare(batch=batch)
x = data.pop("x")
target = data.pop("target")
batch_idx = data["batch_idx"]
batch_size = batch_idx.max() + 1
# forward pass
forward_kwargs = {}
if self.trainer.reconstruct_from_target:
forward_kwargs["target"] = target
model_outputs = self.model(
x,
**data,
**forward_kwargs,
detach_reconstructions=self.trainer.detach_reconstructions,
reconstruct_prev_x=self.trainer.reconstruct_prev_x_weight > 0,
reconstruct_dynamics=self.trainer.reconstruct_dynamics_weight > 0,
)
infos = {}
losses = {}
# next timestep loss
x_hat_loss = self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction="none",
)
infos.update(
{
"loss_stats/x_hat/min": x_hat_loss.min(),
"loss_stats/x_hat/max": x_hat_loss.max(),
"loss_stats/x_hat/gt1": (x_hat_loss > 1).sum() / x_hat_loss.numel(),
"loss_stats/x_hat/eq0": (x_hat_loss == 0).sum() / x_hat_loss.numel(),
}
)
# mask high values after some time to avoid instabilities
if self.trainer.mask_loss_start_checkpoint is not None:
if self.trainer.mask_loss_start_checkpoint > self.trainer.update_counter.cur_checkpoint:
x_hat_loss_mask = x_hat_loss > self.trainer.mask_loss_threshold
x_hat_loss = x_hat_loss[x_hat_loss_mask]
infos["loss_stats/x_hat/gt_loss_threshold"] = x_hat_loss_mask.sum() / x_hat_loss_mask.numel()
if reduction == "mean":
losses["x_hat"] = x_hat_loss.mean()
elif reduction == "mean_per_sample":
_, ctx = batch
num_zero_pos = (data["query_pos"] == 0).sum()
assert num_zero_pos == 0, f"padded query_pos not supported {num_zero_pos}"
query_pos_len = data["query_pos"].size(1)
query_batch_idx = torch.arange(batch_size, device=self.model.device).repeat_interleave(query_pos_len)
#query_batch_idx = ctx["query_batch_idx"].to(self.model.device, non_blocking=True)
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
indices, counts = query_batch_idx.unique(return_counts=True)
# first index has to be 0
padded_counts = torch.zeros(len(indices) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
losses["x_hat"] = segment_csr(src=x_hat_loss.mean(dim=1), indptr=indptr, reduce="mean")
else:
raise NotImplementedError
total_loss = losses["x_hat"]
# num_objects = self.to_device(item="num_objects", batch=batch[0], dataset_mode=self.trainer.dataset_mode),
# out = self.trainer.path_provider.stage_output_path / f"tensors"
# out.mkdir(exist_ok=True)
# torch.save(num_objects, out / f"{self.counter:04d}_numobjects.th")
# torch.save(x, out / f"{self.counter:04d}_x.th")
# torch.save(target, out / f"{self.counter:04d}_target.th")
# torch.save(data["timestep"], out / f"{self.counter:04d}_timestep.th")
# torch.save(data["velocity"], out / f"{self.counter:04d}_velocity.th")
# torch.save(data["query_pos"], out / f"{self.counter:04d}_querypos.th")
# torch.save(data["mesh_pos"], out / f"{self.counter:04d}_meshpos.th")
# torch.save(data["batch_idx"], out / f"{self.counter:04d}_batchidx.th")
# torch.save(data["mesh_edges"], out / f"{self.counter:04d}_meshedges.th")
# torch.save(model_outputs["x_hat"], out / f"{self.counter:04d}_xhat.th")
# self.counter += 1
# input_reconstruction losses
if self.trainer.reconstruct_prev_x_weight > 0:
num_channels = model_outputs["prev_x_hat"].size(1)
prev_x_hat_loss = self.trainer.loss_function(
prediction=model_outputs["prev_x_hat"],
target=x[:, -num_channels:],
reduction="none",
)
if reduction == "mean":
# mask out reconstruction for timestep==0
timestep = data["timestep"]
timestep_per_point = torch.gather(timestep, dim=0, index=batch_idx)
prev_x_hat_loss = prev_x_hat_loss[timestep_per_point != 0]
if self.trainer.mask_loss_start_checkpoint is not None:
if self.trainer.mask_loss_start_checkpoint > self.trainer.update_counter.cur_checkpoint:
prev_x_hat_loss_mask = prev_x_hat_loss > self.trainer.mask_loss_threshold
prev_x_hat_loss = prev_x_hat_loss[prev_x_hat_loss_mask]
infos["loss_stats/prev_x_hat/gt_loss_threshold"] = \
prev_x_hat_loss_mask.sum() / prev_x_hat_loss_mask.numel()
prev_x_hat_loss = prev_x_hat_loss.mean()
elif reduction == "mean_per_sample":
raise NotImplementedError
# prev_x_hat_loss = prev_x_hat_loss.flatten(start_dim=1).mean(dim=1)
# # set loss for timestep==0 to 0
# prev_x_hat_loss[is_timestep0] = 0.
else:
raise NotImplementedError
losses["prev_x_hat"] = prev_x_hat_loss
total_loss = total_loss + self.trainer.reconstruct_prev_x_weight * prev_x_hat_loss
# dynamics reconstruction losses
if self.trainer.reconstruct_dynamics_weight > 0:
dynamics_hat_loss = self.trainer.loss_function(
prediction=model_outputs["dynamics_hat"],
target=model_outputs["dynamics"],
reduction="none",
)
max_timestep = self.model.conditioner.num_total_timesteps - 1
timestep = data["timestep"]
if reduction == "mean":
# mask out reconstruction for timestep==T
dynamics_hat_mask = timestep != max_timestep
if dynamics_hat_mask.sum() > 0:
dynamics_hat_loss = dynamics_hat_loss[dynamics_hat_mask].mean()
else:
dynamics_hat_loss = tc.zeros(size=(1,), device=timestep.device)
elif reduction == "mean_per_sample":
# set loss for timestep==0 to 0
dynamics_hat_loss[timestep == max_timestep] = 0.
# average per sample
dynamics_hat_loss = dynamics_hat_loss.flatten(start_dim=1).mean(dim=1)
else:
raise NotImplementedError
losses["dynamics_hat"] = dynamics_hat_loss
total_loss = total_loss + self.trainer.reconstruct_dynamics_weight * dynamics_hat_loss
infos.update(
{
# "tensor_stats/x/absmax": x.abs().max(),
# "tensor_stats/x/absmin": x.abs().max(),
# "tensor_stats/x/mean": x.mean(),
# "tensor_stats/x/absmean": x.abs().mean(),
# "tensor_stats/x/std": x.std(),
# "tensor_stats/target/absmax": target.abs().max(),
# "tensor_stats/target/absmin": target.abs().max(),
# "tensor_stats/target/mean": target.mean(),
# "tensor_stats/target/absmean": target.abs().mean(),
# "tensor_stats/target/std": target.std(),
# "tensor_stats/timestep/max": data["timestep"].max(),
# "tensor_stats/timestep/min": data["timestep"].min(),
# "tensor_stats/timestep/mean": data["timestep"].float().mean(),
# "tensor_stats/timestep/std": data["timestep"].float().std(),
# "tensor_stats/velocity/max": data["velocity"].max(),
# "tensor_stats/velocity/min": data["velocity"].min(),
# "tensor_stats/velocity/mean": data["velocity"].float().mean(),
# "tensor_stats/velocity/std": data["velocity"].float().std(),
# "tensor_stats/x_hat/absmax": model_outputs["x_hat"].abs().max(),
# "tensor_stats/x_hat/absmin": model_outputs["x_hat"].abs().min(),
# "tensor_stats/x_hat/mean": model_outputs["x_hat"].mean(),
# "tensor_stats/x_hat/absmean": model_outputs["x_hat"].abs().mean(),
# "tensor_stats/x_hat/std": model_outputs["x_hat"].abs().std(),
},
)
# calculate degree of graph (average number of connections p)
# TODO: degree is incorrectly calculated if num_supernodes is handled by dataset and not by collator
if self.trainer.num_supernodes is None:
infos["degree/input"] = len(data["mesh_edges"]) / len(x)
else:
infos["degree/input"] = len(data["mesh_edges"]) / (self.trainer.num_supernodes * batch_size)
return dict(total=total_loss, **losses), infos
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/lagrangian_large_t_simformer_trainer.py | src/trainers/lagrangian_large_t_simformer_trainer.py | import kappamodules.utils.tensor_cache as tc
import os
from functools import cached_property
import torch
import einops
from torch import nn
from kappadata.wrappers import ModeWrapper
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
from datasets.collators.lagrangian_simformer_collator import LagrangianSimformerCollator
from functools import cached_property
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
class LagrangianLargeTSimformerTrainer(SgdTrainer):
def __init__(
self,
loss_function,
forward_kwargs=None,
max_batch_size=None,
**kwargs
):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
self.forward_kwargs = forward_kwargs or {}
def get_trainer_callbacks(self, model=None):
return [
UpdateOutputCallback(
keys=["degree/input"],
every_n_updates=self.track_every_n_updates,
every_n_samples=self.track_every_n_samples,
**self.get_default_callback_kwargs(),
),
UpdateOutputCallback(
keys=["degree/input"],
**self.get_default_callback_intervals(),
**self.get_default_callback_kwargs(),
),
]
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
sample, _ = dataset[0]
# num_input_timesteps are concated along channel dimension
input_shape = list(sample.shape[1:])
input_shape[0] *= sample.size(0)
if collator is not None:
assert isinstance(collator.collator, LagrangianSimformerCollator)
assert len(input_shape) == 2
input_shape[1] = None
self.logger.info(f"input_shape: {tuple(input_shape)}")
return tuple(input_shape)
@cached_property
def output_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="target_vel_large_t")
sample, _ = dataset[0]
output_shape = list(sample.shape)[::-1]
if collator is not None:
assert isinstance(collator.collator, LagrangianSimformerCollator)
assert len(output_shape) == 2
output_shape[1] = None
self.logger.info(f"output_shape: {tuple(output_shape)}")
return tuple(output_shape)
@cached_property
def dataset_mode(self):
return "x curr_pos curr_pos_full edge_index edge_index_target timestep target_vel_large_t target_acc all_pos all_vel target_pos target_pos_encode perm"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def forward(self, batch, reduction="mean"):
# prepare data
batch, ctx = batch
x = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="x", batch=batch)
x = x.to(self.model.device, non_blocking=True)
timestep = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="timestep", batch=batch)
timestep = timestep.to(self.model.device, non_blocking=True)
target_vel_large_t = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="target_vel_large_t", batch=batch)
target_vel_large_t = target_vel_large_t.to(self.model.device, non_blocking=True)
curr_pos = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="curr_pos", batch=batch)
curr_pos = curr_pos.to(self.model.device, non_blocking=True)
target_pos = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="target_pos", batch=batch)
target_pos = target_pos.to(self.model.device, non_blocking=True)
target_pos_encode = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="target_pos_encode", batch=batch)
target_pos_encode = target_pos_encode.to(self.model.device, non_blocking=True)
all_pos = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="all_pos", batch=batch)
all_pos = all_pos.to(self.model.device, non_blocking=True)
all_vel = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="all_vel", batch=batch)
all_vel = all_vel.to(self.model.device, non_blocking=True)
perm = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="perm", batch=batch)
perm = perm.to(self.model.device, non_blocking=True)
edge_index = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="edge_index", batch=batch)
edge_index = edge_index.to(self.model.device, non_blocking=True)
edge_index_target = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item="edge_index_target", batch=batch)
edge_index_target = edge_index_target.to(self.model.device, non_blocking=True)
batch_idx = ctx["batch_idx"].to(self.model.device, non_blocking=True)
unbatch_idx = ctx["unbatch_idx"].to(self.model.device, non_blocking=True)
unbatch_select = ctx["unbatch_select"].to(self.model.device, non_blocking=True)
n_input_timesteps = x.shape[1]
# Flatten input
x = einops.rearrange(
x,
"a num_input_timesteps dim -> a (num_input_timesteps dim)",
)
# Targets are predicted for all particles of all batches simultaneously
target_vel_large_t = einops.rearrange(
target_vel_large_t,
"bs n_particles b -> (bs n_particles) b",
)
# Get current position for decoding
prev_pos_decode = all_pos[:,n_input_timesteps,:,:]
# forward pass
model_outputs = self.model.forward_large_t(
x,
timestep=timestep,
curr_pos=curr_pos,
curr_pos_decode=target_pos,
prev_pos_decode=prev_pos_decode,
edge_index=edge_index,
batch_idx=batch_idx,
unbatch_idx=unbatch_idx,
unbatch_select=unbatch_select,
edge_index_target = edge_index_target,
target_pos_encode=target_pos_encode,
perm_batch=perm,
**self.trainer.forward_kwargs,
)
# next timestep loss
losses = dict(
target=self.trainer.loss_function(
prediction=model_outputs["target"],
target=target_vel_large_t,
reduction=reduction,
),
)
if "prev_target" in model_outputs:
# Prev target are the first n_input_timesteps timesteps
target = einops.rearrange(
all_vel[:,:n_input_timesteps,:,:],
"bs time n_particles dim -> (bs n_particles) (time dim)"
)
prev_target_loss = self.trainer.loss_function(
prediction=model_outputs["prev_target"],
target=target,
reduction=reduction,
)
losses["prev_target_loss"] = prev_target_loss
if "pred_dynamics" in model_outputs:
dynamics_loss = self.trainer.loss_function(
prediction=model_outputs["pred_dynamics"],
target=model_outputs["dynamics"],
reduction='mean',
)
losses["dynamics_loss"] = dynamics_loss
total_loss = losses["target"]
if "prev_target" in model_outputs:
total_loss = total_loss + losses["prev_target_loss"]
if "pred_dynamics" in model_outputs:
total_loss = total_loss + losses["dynamics_loss"]
infos = {
# calculate degree of graph (average number of connections p)
"degree/input": len(edge_index) / len(x)
}
return dict(total=total_loss, **losses), infos
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/__init__.py | src/trainers/__init__.py | from utils.factory import instantiate
def trainer_from_kwargs(kind, **kwargs):
if "eval" in kind:
return instantiate(module_names=[f"trainers.eval.{kind}"], type_names=[kind], **kwargs)
return instantiate(module_names=[f"trainers.{kind}"], type_names=[kind.split(".")[-1]], **kwargs)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/rans_simformer_nognn_trainer.py | src/trainers/rans_simformer_nognn_trainer.py | from functools import cached_property
import torch
from kappadata.wrappers import ModeWrapper
from torch import nn
from torch_scatter import segment_csr
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.rans_simformer_nognn_collator import RansSimformerNognnCollator
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
class RansSimformerNognnTrainer(SgdTrainer):
def __init__(self, loss_function, max_batch_size=None, **kwargs):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="mesh_pos")
assert isinstance(collator.collator, RansSimformerNognnCollator)
mesh_pos, _ = dataset[0]
# mesh_pos has shape (num_points, ndim)
assert mesh_pos.ndim == 2 and 2 <= mesh_pos.size(1) <= 3
return None, mesh_pos.size(1)
@cached_property
def output_shape(self):
# pressure is predicted
return None, 1
@cached_property
def dataset_mode(self):
return "pressure mesh_pos query_pos"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def to_device(self, item, batch):
data = ModeWrapper.get_item(mode=self.trainer.dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch):
batch, ctx = batch
return dict(
mesh_pos=self.to_device(item="mesh_pos", batch=batch),
query_pos=self.to_device(item="query_pos", batch=batch),
batch_idx=ctx["batch_idx"].to(self.model.device, non_blocking=True),
unbatch_idx=ctx["unbatch_idx"].to(self.model.device, non_blocking=True),
unbatch_select=ctx["unbatch_select"].to(self.model.device, non_blocking=True),
target=self.to_device(item="pressure", batch=batch),
)
def forward(self, batch, reduction="mean"):
data = self.prepare(batch)
target = data.pop("target")
# forward pass
model_outputs = self.model(**data)
loss = self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction=reduction,
)
# accumulate losses of points
if reduction == "mean_per_sample":
_, ctx = batch
query_batch_idx = ctx["query_batch_idx"].to(self.model.device, non_blocking=True)
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
indices, counts = query_batch_idx.unique(return_counts=True)
# first index has to be 0
padded_counts = torch.zeros(len(indices) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
loss = segment_csr(src=loss, indptr=indptr, reduce="mean")
return dict(total=loss, x_hat=loss), {}
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/cfd_hybrid_trainer.py | src/trainers/cfd_hybrid_trainer.py | import torch
from functools import cached_property
import einops
from kappadata.wrappers import ModeWrapper
from torch import nn
from callbacks.online_callbacks.update_output_callback import UpdateOutputCallback
from datasets.collators.cfd_hybrid_collator import CfdHybridCollator
from losses import loss_fn_from_kwargs
from utils.factory import create
from .base.sgd_trainer import SgdTrainer
from torch_geometric.nn.pool import radius
class CfdHybridTrainer(SgdTrainer):
def __init__(
self,
loss_function,
radius_graph_r=None,
radius_graph_max_num_neighbors=None,
max_batch_size=None,
**kwargs
):
# automatic batchsize is not supported with mesh data
disable_gradient_accumulation = max_batch_size is None
super().__init__(
max_batch_size=max_batch_size,
disable_gradient_accumulation=disable_gradient_accumulation,
**kwargs,
)
self.radius_graph_r = radius_graph_r
self.radius_graph_max_num_neighbors = radius_graph_max_num_neighbors
self.loss_function = create(loss_function, loss_fn_from_kwargs, update_counter=self.update_counter)
def get_trainer_callbacks(self, model=None):
return [
UpdateOutputCallback(
keys=["degree/input"],
every_n_updates=self.track_every_n_updates,
every_n_samples=self.track_every_n_samples,
**self.get_default_callback_kwargs(),
),
UpdateOutputCallback(
keys=["degree/input"],
**self.get_default_callback_intervals(),
**self.get_default_callback_kwargs(),
),
]
@cached_property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
assert dataset.root_dataset.num_query_points is not None
assert isinstance(collator.collator, CfdHybridCollator)
input_shape = dataset.getshape_x()
self.logger.info(f"input_shape: {input_shape}")
return input_shape
@cached_property
def output_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
assert isinstance(collator.collator, CfdHybridCollator)
output_shape = dataset.getshape_target()
self.logger.info(f"output_shape: {output_shape}")
return output_shape
@cached_property
def dataset_mode(self):
return "x mesh_pos grid_pos query_pos mesh_to_grid_edges timestep velocity target"
def get_trainer_model(self, model):
return self.Model(model=model, trainer=self)
class Model(nn.Module):
def __init__(self, model, trainer):
super().__init__()
self.model = model
self.trainer = trainer
def to_device(self, item, batch, dataset_mode):
data = ModeWrapper.get_item(mode=dataset_mode, item=item, batch=batch)
data = data.to(self.model.device, non_blocking=True)
return data
def prepare(self, batch, dataset_mode=None):
dataset_mode = dataset_mode or self.trainer.dataset_mode
batch, ctx = batch
data = dict(
x=self.to_device(item="x", batch=batch, dataset_mode=dataset_mode),
mesh_pos=self.to_device(item="mesh_pos", batch=batch, dataset_mode=dataset_mode),
grid_pos=self.to_device(item="grid_pos", batch=batch, dataset_mode=dataset_mode),
timestep=self.to_device(item="timestep", batch=batch, dataset_mode=dataset_mode),
velocity=self.to_device(item="velocity", batch=batch, dataset_mode=dataset_mode),
query_pos=self.to_device(item="query_pos", batch=batch, dataset_mode=dataset_mode),
unbatch_idx=ctx["unbatch_idx"].to(self.model.device, non_blocking=True),
unbatch_select=ctx["unbatch_select"].to(self.model.device, non_blocking=True),
target=self.to_device(item="target", batch=batch, dataset_mode=dataset_mode),
)
mesh_to_grid_edges = ModeWrapper.get_item(item="mesh_to_grid_edges", batch=batch, mode=dataset_mode)
batch_size = len(data["timestep"])
if mesh_to_grid_edges is None:
assert len(data["grid_pos"]) % batch_size == 0
num_grid_points = len(data["grid_pos"]) // batch_size
grid_batch_idx = torch.arange(batch_size, device=self.model.device).repeat_interleave(num_grid_points)
else:
grid_batch_idx = None
# mesh_to_grid_edges
if mesh_to_grid_edges is None:
# create on GPU
assert self.trainer.radius_graph_r is not None
assert self.trainer.radius_graph_max_num_neighbors is not None
mesh_to_grid_edges = radius(
x=data["mesh_pos"],
y=data["grid_pos"],
batch_x=ctx["batch_idx"].to(self.model.device, non_blocking=True),
batch_y=grid_batch_idx,
r=self.trainer.radius_graph_r,
max_num_neighbors=self.trainer.radius_graph_max_num_neighbors,
).T
else:
assert self.trainer.radius_graph_r is None
assert self.trainer.radius_graph_max_num_neighbors is None
mesh_to_grid_edges = mesh_to_grid_edges.to(self.model.device, non_blocking=True)
data["mesh_to_grid_edges"] = mesh_to_grid_edges
return data
def forward(self, batch, reduction="mean"):
data = self.prepare(batch)
target = data.pop("target")
# forward pass
model_outputs = self.model(**data)
losses = dict(
x_hat=self.trainer.loss_function(
prediction=model_outputs["x_hat"],
target=target,
reduction=reduction,
),
)
if reduction == "mean_per_sample":
raise NotImplementedError("reduce with query_batch_idx")
# infos
infos = {"degree/input": len(data["mesh_to_grid_edges"]) / len(data["grid_pos"])}
return dict(total=losses["x_hat"], **losses), infos
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/base/sgd_trainer.py | src/trainers/base/sgd_trainer.py | import logging
from functools import partial
import kappaprofiler as kp
import torch
import torch.nn as nn
from kappadata.wrappers import KDMultiViewWrapper, XRepeatWrapper
from torch.cuda.amp import GradScaler
from torch.distributed import all_gather_object
from torch.nn.parallel import DistributedDataParallel
from callbacks import callback_from_kwargs
from callbacks.base.callback_base import CallbackBase
from callbacks.base.periodic_callback import PeriodicCallback
from callbacks.default_callbacks.copy_previous_config_callback import CopyPreviousConfigCallback
from callbacks.default_callbacks.copy_previous_summary_callback import CopyPreviousSummaryCallback
from callbacks.default_callbacks.dataset_stats_callback import DatasetStatsCallback
from callbacks.default_callbacks.eta_callback import EtaCallback
from callbacks.default_callbacks.freezer_callback import FreezerCallback
from callbacks.default_callbacks.lr_callback import LrCallback
from callbacks.default_callbacks.online_loss_callback import OnlineLossCallback
from callbacks.default_callbacks.param_count_callback import ParamCountCallback
from callbacks.default_callbacks.progress_callback import ProgressCallback
from callbacks.default_callbacks.train_time_callback import TrainTimeCallback
from distributed.config import is_distributed, get_world_size
from distributed.config import is_managed, get_rank, is_rank0
from distributed.gather import all_gather_nograd
from initializers import initializer_from_kwargs
from initializers.resume_initializer import ResumeInitializer
from providers.config_providers.base.config_provider_base import ConfigProviderBase
from providers.config_providers.noop_config_provider import NoopConfigProvider
from providers.path_provider import PathProvider
from providers.summary_providers.base.summary_provider_base import SummaryProviderBase
from providers.summary_providers.noop_summary_provider import NoopSummaryProvider
from trainers.early_stoppers import early_stopper_from_kwargs
from utils.amp_utils import get_supported_precision, get_grad_scaler_and_autocast_context
from utils.checkpoint import Checkpoint
from utils.data_container import DataContainer
from utils.factory import create
from utils.factory import create_collection
from utils.model_utils import get_paramnames_with_no_gradient
from utils.model_utils import get_trainable_param_count
from utils.seed import get_random_states
from utils.seed import set_random_states
from utils.update_counter import UpdateCounter
from .functional import (
calculate_effective_batch_size_per_device,
calculate_batch_size_and_accumulation_steps,
calculate_automatic_max_batch_size,
)
class SgdTrainer(nn.Module):
def __init__(
self,
data_container: DataContainer,
device: str,
precision,
effective_batch_size: int = None,
effective_labeled_batch_size: int = None,
max_epochs=None,
max_updates=None,
max_samples=None,
start_at_epoch=None,
stop_at_epoch=None,
stop_at_update=None,
stop_at_sample=None,
add_default_callbacks: bool = True,
add_trainer_callbacks: bool = True,
callbacks: list = None,
backup_precision: str = None,
log_every_n_epochs=None,
log_every_n_updates=None,
log_every_n_samples=None,
track_every_n_updates=50,
track_every_n_samples=None,
early_stopper=None,
exit_on_nan_loss=True,
initializer: ResumeInitializer = None,
disable_gradient_accumulation: bool = False,
max_batch_size: int = None,
sync_batchnorm: bool = True,
# find_unused_params should not be set to true if it is not needed (to avoid overhead)
# but sometimes it is required (e.g. when dynamically freezing/unfreezing parameters)
# when find_unused_params setting static_graph to true can bring speedup
find_unused_params: bool = False,
static_graph: bool = False,
use_torch_compile: bool = False,
# kwargs
main_sampler_kwargs: dict = None,
# providers
config_provider: ConfigProviderBase = None,
summary_provider: SummaryProviderBase = None,
path_provider: PathProvider = None,
**kwargs,
):
super().__init__(**kwargs)
self.logger = logging.getLogger(type(self).__name__)
self.data_container = data_container
self.config_provider = config_provider or NoopConfigProvider()
self.summary_provider = summary_provider or NoopSummaryProvider()
self.path_provider = path_provider
self.device: torch.device = torch.device(device)
if effective_batch_size is not None:
assert effective_labeled_batch_size is None
self.effective_batch_size = effective_batch_size
else:
assert "num_unlabeled_per_labeled" in main_sampler_kwargs
factor = 1 + main_sampler_kwargs["num_unlabeled_per_labeled"]
self.effective_batch_size = effective_labeled_batch_size * factor
self.effective_labeled_batch_size = effective_labeled_batch_size
self.end_checkpoint = Checkpoint(max_epochs, max_updates, max_samples)
self.stop_at_epoch = stop_at_epoch
self.stop_at_update = stop_at_update
self.stop_at_sample = stop_at_sample
self.add_default_callbacks = add_default_callbacks
self.add_trainer_callbacks = add_trainer_callbacks
self.precision = get_supported_precision(
desired_precision=precision,
backup_precision=backup_precision,
device=self.device,
)
self.logger.info(f"using precision: {self.precision} (desired={precision} backup={backup_precision})")
self.grad_scaler, self.autocast_context = get_grad_scaler_and_autocast_context(self.precision, self.device)
self.log_every_n_epochs = log_every_n_epochs
self.log_every_n_updates = log_every_n_updates
self.log_every_n_samples = log_every_n_samples
self.track_every_n_updates = track_every_n_updates
self.track_every_n_samples = track_every_n_samples
self.early_stopper = create(early_stopper, early_stopper_from_kwargs)
self.main_sampler_kwargs = main_sampler_kwargs or {}
self.train_dataset, self.main_collator = self.data_container.get_dataset("train", mode=self.dataset_mode)
self.main_sampler = self.data_container.get_main_sampler(
train_dataset=self.train_dataset,
**self.main_sampler_kwargs,
)
eff_len = self.main_sampler.effective_length
assert eff_len >= self.effective_batch_size, f"{eff_len}<{self.effective_batch_size}"
self.updates_per_epoch = int(eff_len / self.effective_batch_size)
self.max_batch_size = max_batch_size
self.disable_gradient_accumulation = disable_gradient_accumulation
self.sync_batchnorm = sync_batchnorm
self.find_unused_params = find_unused_params
self.static_graph = static_graph
self.use_torch_compile = use_torch_compile
self.exit_on_nan_loss = exit_on_nan_loss
self.initializer = create(
initializer,
initializer_from_kwargs,
path_provider=self.path_provider,
)
if self.initializer is None:
if start_at_epoch is not None:
start_epoch = start_at_epoch
start_update = self.updates_per_epoch * start_epoch
start_sample = start_update * effective_batch_size
else:
start_epoch = 0
start_update = 0
start_sample = 0
self.start_checkpoint = Checkpoint(epoch=start_epoch, update=start_update, sample=start_sample)
else:
assert start_at_epoch is None
self.start_checkpoint = self.initializer.get_start_checkpoint()
self._update_counter = UpdateCounter(
start_checkpoint=self.start_checkpoint,
end_checkpoint=self.end_checkpoint,
updates_per_epoch=self.updates_per_epoch,
effective_batch_size=self.effective_batch_size,
)
self.callbacks = create_collection(
callbacks,
callback_from_kwargs,
data_container=self.data_container,
config_provider=self.config_provider,
summary_provider=self.summary_provider,
path_provider=self.path_provider,
update_counter=self.update_counter,
)
# check that children only override their implementation methods
assert type(self).train == SgdTrainer.train
assert type(self).wrap_model == SgdTrainer.wrap_model
@property
def update_counter(self):
return self._update_counter
@property
def input_shape(self):
dataset, collator = self.data_container.get_dataset("train", mode="x")
sample, _ = dataset[0]
if collator is not None:
self.logger.warning(
"infering input_shape with a collator is not supported yet -> "
"collator is ignored"
)
multi_view_wrappers = [
w for w in self.train_dataset.all_wrappers
if isinstance(w, (KDMultiViewWrapper, XRepeatWrapper))
]
if len(multi_view_wrappers) > 1:
raise NotImplementedError
elif len(multi_view_wrappers) == 1:
input_shape = sample[0].shape
else:
input_shape = sample.shape
self.logger.info(f"input_shape: {tuple(input_shape)}")
return tuple(input_shape)
def get_all_callbacks(self, model=None):
# no default/trainer callbacks needed for eval runs
if self.end_checkpoint.epoch == 0 or self.end_checkpoint.update == 0 or self.end_checkpoint.sample == 0:
return self.callbacks
# add default/trainer callbacks
callbacks = []
if self.add_default_callbacks:
callbacks += self.get_default_callbacks()
if self.add_trainer_callbacks:
callbacks += self.get_trainer_callbacks(model=model)
callbacks += self.callbacks
return callbacks
@staticmethod
def get_trainer_callbacks(model=None):
return []
def get_default_callback_kwargs(self):
return dict(
data_container=self.data_container,
config_provider=self.config_provider,
summary_provider=self.summary_provider,
path_provider=self.path_provider,
update_counter=self.update_counter,
)
def get_default_callback_intervals(self):
return dict(
every_n_epochs=self.log_every_n_epochs,
every_n_updates=self.log_every_n_updates,
every_n_samples=self.log_every_n_samples,
)
def get_default_callbacks(self):
default_kwargs = self.get_default_callback_kwargs()
default_intervals = self.get_default_callback_intervals()
# statistic callbacks
default_callbacks = [
DatasetStatsCallback(**default_kwargs),
ParamCountCallback(**default_kwargs),
]
# copy config/summary/entries
default_callbacks += [
CopyPreviousConfigCallback(**default_kwargs),
# CopyPreviousEntriesCallback(**default_kwargs),
CopyPreviousSummaryCallback(**default_kwargs),
]
# add default training loggers (not needed for eval runs)
if not self.update_counter.is_finished:
# periodic callbacks
default_callbacks += [
ProgressCallback(**default_kwargs, **default_intervals),
TrainTimeCallback(**default_kwargs, **default_intervals),
OnlineLossCallback(**default_kwargs, **default_intervals, verbose=True),
]
# EtaCallback is pointless in managed runs
# - managed runs don't have an interactive console
if not is_managed() and is_rank0():
default_callbacks = [EtaCallback(**default_kwargs, **default_intervals)] + default_callbacks
default_callbacks += [
LrCallback(**default_kwargs, every_n_updates=self.track_every_n_updates),
FreezerCallback(**default_kwargs, every_n_updates=self.track_every_n_updates),
OnlineLossCallback(**default_kwargs, every_n_updates=self.track_every_n_updates, verbose=False)
]
for callback in default_callbacks:
self.logger.info(f"added default {callback}")
return default_callbacks
def _calculate_batch_size_and_accumulation_steps(self, model, ddp_model):
self.logger.info(
f"calculating batch_size and accumulation_steps "
f"(effective_batch_size={self.effective_batch_size})"
)
# calculate effective_batch_size_per_device
assert self.effective_batch_size % get_world_size() == 0, \
f"effective_batch_size ({self.effective_batch_size}) needs to be multiple of " \
f"world_size ({get_world_size()})"
effective_batch_size_per_device = calculate_effective_batch_size_per_device(self.effective_batch_size)
if model.is_batch_size_dependent:
self.logger.info("model is batch_size dependent -> disabled possible gradient accumulation")
return effective_batch_size_per_device, 1
if self.disable_gradient_accumulation:
self.logger.info(f"gradient accumulation disabled")
return effective_batch_size_per_device, 1
self.logger.info(f"effective_batch_size: {self.effective_batch_size}")
if is_distributed():
self.logger.info(f"effective_batch_size_per_device: {effective_batch_size_per_device}")
self.logger.info(f"world_size: {get_world_size()}")
if self.max_batch_size is None:
# calculate max_batch_size
self.logger.info("calculating automatic max_batch_size")
max_batch_size = calculate_automatic_max_batch_size(
train_dataset=self.train_dataset,
collator=self.main_collator,
# optim step is only taken on (iter_step + 1) % accumulation_steps == 0
train_step_fn=partial(
self.update,
model,
iter_step=0,
accumulation_steps=1,
ddp_model=ddp_model,
),
effective_batch_size_per_device=effective_batch_size_per_device,
device=model.device,
model=model,
)
self.logger.info(f"automatic max_batch_size: {max_batch_size}")
if is_distributed():
# check if all devices have the same max_batch_size
max_batch_sizes = all_gather_nograd(max_batch_size)
assert all(max_batch_size == mbs for mbs in max_batch_sizes)
else:
max_batch_size = calculate_effective_batch_size_per_device(self.max_batch_size)
self.logger.info(f"using provided max_batch_size {self.max_batch_size} ({max_batch_size} per device)")
# calculate batch_size and accumulation_steps
batch_size, accumulation_steps = calculate_batch_size_and_accumulation_steps(
effective_batch_size_per_device=effective_batch_size_per_device,
max_batch_size=max_batch_size,
)
self.logger.info(f"batch_size: {batch_size}")
self.logger.info(f"accumulation_steps: {accumulation_steps}")
return batch_size, accumulation_steps
def state_dict(self, *args, **kwargs):
state_dict = dict(state_dict=super().state_dict(*args, **kwargs))
if is_distributed():
random_states_per_device = [None for _ in range(get_world_size())]
all_gather_object(random_states_per_device, get_random_states())
else:
random_states_per_device = [get_random_states()]
callback_state_dicts = [callback.state_dict() for callback in self.callbacks]
state_dict.update(
random_states=random_states_per_device,
epoch=self.update_counter.cur_checkpoint.epoch,
update=self.update_counter.cur_checkpoint.update,
sample=self.update_counter.cur_checkpoint.sample,
callback_state_dicts=callback_state_dicts,
)
if isinstance(self.grad_scaler, GradScaler):
state_dict["grad_scaler"] = self.grad_scaler.state_dict()
return state_dict
def load_state_dict(self, state_dict, load_random_states=True):
# shallow copy
state_dict = {k: v for k, v in state_dict.items()}
# load random states
random_states = state_dict.pop("random_states")
if load_random_states:
if len(random_states) != get_world_size():
# if world_size is different than in the checkpoint the whole resuming run will not be deterministic
# so don't bother to load any random states
self.logger.warning(
f"trainer checkpoint has different world_size (ckpt_world_size={len(random_states)} "
f"world_size={get_world_size()}) -> can't load random states"
)
else:
cur_rank_random_state = random_states[get_rank()]
set_random_states(**cur_rank_random_state)
else:
self.logger.info(f"random states are NOT loaded")
# load callback state_dicts
callback_state_dicts = state_dict.pop("callback_state_dicts")
for callback, sd in zip(self.callbacks, callback_state_dicts):
callback.load_state_dict(sd)
# load grad_scaler
grad_scaler_state_dict = state_dict.pop("grad_scaler", None)
if isinstance(self.grad_scaler, GradScaler):
if grad_scaler_state_dict is None:
self.logger.warning(
f"trainer checkpoint has no grad_scaler but current trainer uses {self.precision} precision"
)
else:
self.grad_scaler.load_state_dict(grad_scaler_state_dict)
# load registered nn.Modules of trainer
return super().load_state_dict(state_dict=state_dict["state_dict"])
@property
def lr_scale_factor(self):
return self.effective_batch_size
def _prepare_model(self, model):
model = model.to(self.device)
model.initialize(lr_scale_factor=self.lr_scale_factor)
self.apply_resume_initializer(model)
return model
def apply_resume_initializer(self, model):
# initialize model to state where it was resumed from
if self.initializer is not None:
self.logger.info("------------------")
self.logger.info("loading trainer/model state for resuming")
assert isinstance(self.initializer, ResumeInitializer)
self.logger.info(
f"loading state from checkpoint {self.initializer.stage_id}/"
f"{self.initializer.stage_name}/{self.initializer.checkpoint}"
)
self.initializer.init_trainer(self)
self.initializer.init_weights(model)
self.initializer.init_optim(model)
def get_data_loader(self, periodic_callbacks, batch_size):
self.logger.info(f"initializing dataloader")
configs = []
for c in periodic_callbacks:
cur_configs, _ = c.register_sampler_configs(self)
for cur_config in cur_configs:
if hasattr(cur_config.sampler, "data_source"):
dataset_mode = cur_config.sampler.data_source.mode
else:
dataset_mode = "unknown"
self.logger.info(f"{c} registered {cur_config} dataset_mode='{dataset_mode}'")
configs += cur_configs
kwargs = {}
if self.start_checkpoint.epoch != 0:
kwargs["start_epoch"] = self.start_checkpoint.epoch
return self.data_container.get_data_loader(
main_sampler=self.main_sampler,
main_collator=self.main_collator,
batch_size=batch_size,
epochs=self.end_checkpoint.epoch,
updates=self.end_checkpoint.update,
samples=self.end_checkpoint.sample,
configs=configs,
**kwargs,
)
def wrap_model(self, model):
assert model.is_initialized, "Model needs to be initialized before DDP wrapping as DPP broadcasts params"
model = self._wrap_model(model=model)
trainer_model = self.get_trainer_model(model)
ddp_model = self.wrap_ddp(trainer_model)
ddp_model = self.wrap_compile(ddp_model)
return model, trainer_model, ddp_model
def get_trainer_model(self, model):
raise NotImplementedError
def _wrap_model(self, model):
return model
def wrap_ddp(self, model):
if is_distributed():
if get_trainable_param_count(model) > 0:
if self.find_unused_params:
self.logger.info(f"using find_unused_params=True")
if self.static_graph:
self.logger.info(f"using static_graph=True")
else:
assert not self.static_graph
model = DistributedDataParallel(
model,
find_unused_parameters=self.find_unused_params,
static_graph=self.static_graph,
)
if model.device != torch.device("cpu") and self.sync_batchnorm:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
else:
# DDP broadcasts weights from rank0 to other ranks but raises an error if no param requires grad
# workaround: temporarily unfreeze one parameter if all parameters are frozen to broadcast weights
self.logger.info(f"not wrapping into DDP (no trainable parameters) -> only broadcast parameters")
first_param = next(model.parameters())
first_param.requires_grad = True
DistributedDataParallel(model)
first_param.requires_grad = False
return model
def wrap_compile(self, ddp_model):
if not self.use_torch_compile:
self.logger.info(f"torch.compile not used (use_torch_compile == False)")
return ddp_model
if is_distributed():
if self.static_graph:
self.logger.info(f"torch.compile static_graph=True is not supported -> disable torch.compile")
return ddp_model
self.logger.info(f"wrapping model with torch.compile")
return torch.compile(ddp_model)
def before_training(self):
pass
@kp.profile
def train_model(self, model, callbacks=None):
model = self._prepare_model(model)
callbacks = callbacks or self.get_all_callbacks(model=model)
periodic_callbacks = [callback for callback in callbacks if isinstance(callback, PeriodicCallback)]
self.before_training()
model, trainer_model, ddp_model = self.wrap_model(model)
batch_size, accumulation_steps, train_batches_per_epoch = self._prepare_batch_size(model, ddp_model)
assert trainer_model.model == model
# TODO model is moved to GPU seperately from trainer_model because of initializers
# -> trainer_model should be moved all at once
trainer_model = trainer_model.to(model.device)
data_loader = self.get_data_loader(periodic_callbacks=periodic_callbacks, batch_size=batch_size)
self.call_before_training(trainer_model=trainer_model, batch_size=batch_size, callbacks=callbacks)
self._train(
model=model,
trainer_model=trainer_model,
ddp_model=ddp_model,
batch_size=batch_size,
accumulation_steps=accumulation_steps,
data_loader=data_loader,
train_batches_per_epoch=train_batches_per_epoch,
periodic_callbacks=periodic_callbacks,
)
self.call_after_training(trainer_model=trainer_model, callbacks=callbacks)
def _train(
self,
model,
trainer_model,
ddp_model,
batch_size,
accumulation_steps,
data_loader,
train_batches_per_epoch,
periodic_callbacks
):
self.logger.info("------------------")
self.logger.info(f"START TRAINING")
self.logger.info("initializing dataloader workers")
with kp.named_profile("iterator"):
data_iter = iter(data_loader)
self.logger.info("initialized dataloader workers")
if self.update_counter.is_finished:
if not model.is_frozen:
self.logger.warning("model has optimizer which is not used for evaluation")
# eval run
for callback in periodic_callbacks:
callback.after_epoch(
update_counter=self.update_counter,
effective_batch_size=self.effective_batch_size,
batch_size=batch_size,
trainer=self,
model=model,
trainer_model=trainer_model,
data_iter=data_iter,
)
for callback in periodic_callbacks:
callback.after_update(
update_counter=self.update_counter,
effective_batch_size=self.effective_batch_size,
batch_size=batch_size,
trainer=self,
model=model,
trainer_model=trainer_model,
data_iter=data_iter,
)
CallbackBase.flush()
else:
# train run
is_first_update = True
while True:
iter_step = -1
data_time = 0.
update_time = 0.
while True:
# check end of epoch
remaining_batches = train_batches_per_epoch - (iter_step + 1)
if remaining_batches < accumulation_steps:
# InterleavedSampler already have the next batches preloaded -> skip them
for _ in range(remaining_batches):
_ = next(data_iter)
break
is_last_update_in_epoch = remaining_batches - accumulation_steps < accumulation_steps
for callback in periodic_callbacks:
callback.before_every_update(update_counter=self.update_counter, model=model)
for _ in range(accumulation_steps):
# load next batch
with kp.named_profile("data_loading"):
batch = next(data_iter)
iter_step += 1
if iter_step % accumulation_steps == 0:
model.optim_schedule_step()
data_time = 0.
update_time = 0.
data_time += kp.profiler.last_node.last_time
for callback in periodic_callbacks:
callback.before_every_accumulation_step(model=model)
trainer_model.train()
# update contains implicit cuda synchronization points (.detach().cpu(), .item())
with kp.named_profile("update"):
losses, update_outputs = self.update(
batch=batch,
iter_step=iter_step,
model=model,
ddp_model=ddp_model,
accumulation_steps=accumulation_steps,
periodic_callbacks=periodic_callbacks,
is_first_update=is_first_update,
)
update_time += kp.profiler.last_node.last_time
for callback in periodic_callbacks:
callback.track_after_accumulation_step(
update_counter=self.update_counter,
trainer=self,
model=model,
losses=losses,
update_outputs=update_outputs,
accumulation_steps=accumulation_steps,
)
# free references to tensors
# noinspection PyUnusedLocal
update_outputs = None
is_first_update = False
# advance counter
self.update_counter.add_samples(self.effective_batch_size)
self.update_counter.next_update()
if is_last_update_in_epoch:
self.update_counter.next_epoch()
trainer_model.eval()
times = dict(data_time=data_time, update_time=update_time)
for callback in periodic_callbacks:
callback.track_after_update_step(
update_counter=self.update_counter,
trainer=self,
model=model,
times=times,
)
for callback in periodic_callbacks:
callback.after_update(
update_counter=self.update_counter,
effective_batch_size=self.effective_batch_size,
batch_size=batch_size,
trainer=self,
model=model,
trainer_model=trainer_model,
data_iter=data_iter,
)
# check end of training
if self.update_counter.is_finished:
# skip preloaded batches after training when accumulation steps > 1
if data_loader.batch_sampler.sampler.epochs is not None:
for _ in range(remaining_batches - accumulation_steps):
_ = next(data_iter)
if data_loader.batch_sampler.sampler.samples is not None:
total_batches = int(data_loader.batch_sampler.sampler.samples / batch_size)
for _ in range(total_batches % accumulation_steps):
_ = next(data_iter)
break
# no end of epoch -> flush logs from call_after_update
if not is_last_update_in_epoch:
CallbackBase.flush()
# check update/sample based early stopping
if self.early_stopper is not None:
should_stop_after_update = self.early_stopper.should_stop_after_update(
self.update_counter.cur_checkpoint,
)
if should_stop_after_update:
return
should_stop_after_sample = self.early_stopper.should_stop_after_sample(
self.update_counter.cur_checkpoint,
effective_batch_size=self.effective_batch_size,
)
if should_stop_after_sample:
return
# update based premature stopping
if self.stop_at_update is not None:
if self.stop_at_update <= self.update_counter.update:
self.logger.info(f"reached stop_at_update (={self.stop_at_update}) -> stop training")
return
# sample based premature stopping
if self.stop_at_sample is not None:
if self.stop_at_sample <= self.update_counter.sample:
self.logger.info(f"reached stop_at_sample (={self.stop_at_sample}) -> stop training")
return
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | true |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/base/functional.py | src/trainers/base/functional.py | import logging
import einops
import torch
from torch.utils.data import DataLoader
from distributed.config import get_world_size
from utils.functional import get_powers_of_two, is_power_of_two
def calculate_effective_batch_size_per_device(effective_batch_size, world_size=None):
world_size = world_size or get_world_size()
assert effective_batch_size % world_size == 0, \
f"effective_batch_size ({effective_batch_size}) needs to be multiple of world_size ({world_size})"
return int(effective_batch_size / world_size)
def calculate_batch_size_and_accumulation_steps(effective_batch_size_per_device, max_batch_size=None):
# calculate batch_size and accumulation_steps
if max_batch_size is None:
batch_size = effective_batch_size_per_device
accumulation_steps = 1
else:
if effective_batch_size_per_device <= max_batch_size:
# fits into memory
batch_size = effective_batch_size_per_device
accumulation_steps = 1
else:
# multiple accumulation steps
msg = "effective_batch_size_per_device needs to be multiple of max_batch_size"
assert effective_batch_size_per_device % max_batch_size == 0, msg
accumulation_steps = int(effective_batch_size_per_device / max_batch_size)
batch_size = int(effective_batch_size_per_device / accumulation_steps)
return batch_size, accumulation_steps
def calculate_automatic_max_batch_size(
train_dataset,
train_step_fn,
effective_batch_size_per_device,
device,
model,
collator=None,
):
if str(device) == "cpu":
return effective_batch_size_per_device
# batchsizes that are not a power of two are not supported
if not is_power_of_two(effective_batch_size_per_device):
return effective_batch_size_per_device
# backup state_dict (state_dict doesn't clone tensors -> call .clone on every tensor in the state dict)
model_state_dict = {k: v.clone() for k, v in model.state_dict().items()}
optim_state_dicts = {}
for name, submodel in model.submodels.items():
if submodel.optim is None:
continue
sd = submodel.optim.state_dict()
cloned = {}
for key in sd.keys():
if key == "state":
cloned["state"] = {
idx_key: {k: v.clone() if v is not None else v for k, v in idx_dict.items()}
for idx_key, idx_dict in sd["state"].items()
}
elif key == "param_groups":
cloned["param_groups"] = [{k: v for k, v in group.items()} for group in sd["param_groups"]]
elif key == "param_idx_to_name":
cloned["param_idx_to_name"] = {k: v for k, v in sd["param_idx_to_name"].items()}
else:
raise NotImplementedError
optim_state_dicts[name] = cloned
# compose batch_sizes to try (start from 2 because some models do batchnorm during training [e.g. barlow twins])
batch_sizes = get_powers_of_two(2, effective_batch_size_per_device)
# make a train_step with decreasing batch_sizes (faster when batchsize is actually correct)
# NOTE: this makes runs only deterministic per device if a stochastic transformation is used
# as one sample is loaded from the dataset with its stochastic transforms and the stochastic transforms
# have no seed the random generator of stochastic transforms will be progressed
sample, sample_ctx = next(iter(DataLoader(train_dataset, batch_size=1, collate_fn=collator)))
max_batch_size = 1
for batch_size in reversed(batch_sizes):
logging.info(f"trying batch_size {batch_size}")
# scale batch_size by repeating the sample
if isinstance(sample, (list, tuple)):
data = []
for item in sample:
if isinstance(item, (list, tuple)):
data.append([einops.repeat(entry, "1 ... -> bs ...", bs=batch_size) for entry in item])
else:
data.append(einops.repeat(item, "1 ... -> bs ...", bs=batch_size))
else:
data = einops.repeat(sample, "1 ... -> bs ...", bs=batch_size)
# wrap into tuple
if isinstance(data, list):
data = tuple(data)
# scale batch_size of ctx
ctx = {
k: einops.repeat(v, "1 ... -> bs ...", bs=batch_size) if torch.is_tensor(v) else v
for k, v in sample_ctx.items()
}
# try update step
try:
train_step_fn(batch=(data, ctx))
max_batch_size = batch_size
break
except RuntimeError as e:
if not str(e).startswith("CUDA out of memory"):
raise e
model.clear_buffers()
# restore state_dict
model.load_state_dict(model_state_dict)
for name, submodel in model.submodels.items():
if submodel.optim is None:
continue
submodel.optim.load_state_dict(optim_state_dicts[name])
# clear buffers if models track something during the forward pass --> e.g. NnclrQueue
model.clear_buffers()
return max_batch_size
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/base/__init__.py | src/trainers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/early_stoppers/metric_early_stopper.py | src/trainers/early_stoppers/metric_early_stopper.py | from callbacks.base.callback_base import CallbackBase
from utils.infer_higher_is_better import higher_is_better_from_metric_key
from .base.early_stopper_base import EarlyStopperBase
class MetricEarlyStopper(EarlyStopperBase):
def __init__(self, metric_key, tolerance, **kwargs):
super().__init__(**kwargs)
self.metric_key = metric_key
self.higher_is_better = higher_is_better_from_metric_key(self.metric_key)
assert tolerance is not None and tolerance >= 1, "tolerance has to be >= 1"
self.tolerance = tolerance
self.tolerance_counter = 0
self.best_metric = -float("inf") if self.higher_is_better else float("inf")
def _metric_improved(self, cur_metric):
if self.higher_is_better:
return cur_metric > self.best_metric
return cur_metric < self.best_metric
def _should_stop(self):
writer = CallbackBase.log_writer_singleton
assert writer is not None
assert self.metric_key in writer.log_cache, (
f"couldn't find metric_key {self.metric_key} (valid metric_keys={writer.log_cache.keys()}) -> "
"make sure every_n_epochs/every_n_updates/every_n_samples is aligned with the corresponding callback"
)
cur_metric = writer.log_cache[self.metric_key]
if self._metric_improved(cur_metric):
self.logger.info(f"{self.metric_key} improved: {self.best_metric} --> {cur_metric}")
self.best_metric = cur_metric
self.tolerance_counter = 0
else:
self.tolerance_counter += 1
cmp_str = "<=" if self.higher_is_better else ">="
stop_training_str = " --> stop training" if self.tolerance_counter >= self.tolerance else ""
self.logger.info(
f"{self.metric_key} stagnated: {self.best_metric} {cmp_str} {cur_metric} "
f"({self.tolerance_counter}/{self.tolerance}){stop_training_str}"
)
return self.tolerance_counter >= self.tolerance
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/early_stoppers/__init__.py | src/trainers/early_stoppers/__init__.py | from utils.factory import instantiate
def early_stopper_from_kwargs(kind, **kwargs):
return instantiate(module_names=[f"trainers.early_stoppers.{kind}"], type_names=[kind], **kwargs)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/trainers/early_stoppers/loss_divergence_early_stopper.py | src/trainers/early_stoppers/loss_divergence_early_stopper.py | from collections import deque
import numpy as np
from callbacks.base.callback_base import CallbackBase
from .base.early_stopper_base import EarlyStopperBase
class LossDivergenceEarlyStopper(EarlyStopperBase):
"""
stop training if loss diverges
organize losses into [...] + [reference_window] + [tolerance_window]
if avg(tolerance_window) > avg(reference_window) * tolerance_factor -> stop
"""
def __init__(self, reference_window, tolerance_window, tolerance_factor, **kwargs):
super().__init__(**kwargs)
assert reference_window is not None and 1 <= reference_window, f"reference_window < 1 ({reference_window})"
assert tolerance_window is not None and 1 <= tolerance_window, f"tolerance_window < 1 ({tolerance_window})"
assert tolerance_factor is not None and 1. <= tolerance_factor, f"tolerance_factor < 1 ({tolerance_factor})"
self.reference_window = reference_window
self.tolerance_window = tolerance_window
self.tolerance_factor = tolerance_factor
self.losses = deque([], maxlen=reference_window + tolerance_window)
self.window_size = reference_window + tolerance_window
def _should_stop(self):
writer = CallbackBase.log_writer_singleton
assert writer is not None
# track loss
loss = writer.log_cache[f"loss/online/total/{self.to_short_interval_string()}"]
assert isinstance(loss, float)
self.losses.append(loss)
# dont stop if training is just getting started
if len(self.losses) < self.window_size:
return False
window = list(self.losses)
reference_window = window[:self.reference_window]
tolerance_window = window[self.reference_window:]
assert len(tolerance_window) == self.tolerance_window
reference_mean = np.mean(reference_window)
tolerance_mean = np.mean(tolerance_window)
reference_mean_with_tolerance = reference_mean * self.tolerance_factor
if tolerance_mean >= reference_mean_with_tolerance:
self.logger.info(f"loss diverged -> stop training")
self.logger.info(f"reference_window={self.reference_window} reference_mean={reference_mean}")
self.logger.info(f"tolerance_window={self.tolerance_window} tolerance_mean={tolerance_mean}")
self.logger.info(
f"tolerance_factor={self.tolerance_factor} "
f"reference_mean_with_tolerance={reference_mean_with_tolerance}"
)
return True
return False
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.