repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/dataset/konect.py | xflow/dataset/konect.py | import os
import networkx as nx
import requests
import random
import tarfile
import ndlib.models.ModelConfig as mc
def create_folder(folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
def download_konect_dataset(url, filename):
response = requests.get(url)
if response.status_code == 200:
with open(filename, 'wb') as f:
f.write(response.content)
else:
raise ValueError(f"Failed to download the dataset from {url}")
def check_and_download(url, filename):
create_folder("konect_datasets")
if not os.path.exists(filename):
print(f"Downloading {filename}...")
download_konect_dataset(url, filename)
else:
print(f"{filename} already exists.")
def extract_tar_bz2(filename, extract_path):
if not os.path.exists(extract_path):
print(f"Extracting {filename}...")
with tarfile.open(filename, 'r:bz2') as tar:
tar.extractall(path=extract_path)
else:
print(f"{extract_path} already exists.")
def add_edge_weights(G, min_weight, max_weight):
config = mc.Configuration()
for a, b in G.edges():
weight = random.uniform(min_weight, max_weight)
weight = round(weight, 2)
config.add_edge_configuration("threshold", (a, b), weight)
G[a][b]['weight'] = weight
return G, config
def load_graph(filename):
G = nx.DiGraph()
with open(filename, 'rb') as f:
for line in f:
line = line.decode('utf-8', errors='ignore')
if line.startswith('%'):
continue # Skip comments
parts = line.strip().split()
if len(parts) >= 2:
G.add_edge(parts[0], parts[1])
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
def chesapeake_bay():
url = "http://www.konect.cc/files/download.tsv.dimacs10-chesapeake.tar.bz2"
tar_filename = "konect_datasets/dimacs10-chesapeake.tar.bz2"
extract_path = "konect_datasets/dimacs10-chesapeake"
check_and_download(url, tar_filename)
extract_tar_bz2(tar_filename, extract_path)
tsv_filename = os.path.join(extract_path, "dimacs10-chesapeake/out.dimacs10-chesapeake")
return load_graph(tsv_filename)
def infectious():
url = "http://www.konect.cc/files/download.tsv.infectious.tar.bz2"
tar_filename = "konect_datasets/infectious.tar.bz2"
extract_path = "konect_datasets/infectious"
check_and_download(url, tar_filename)
extract_tar_bz2(tar_filename, extract_path)
tsv_filename = os.path.join(extract_path, "sociopatterns-infectious/out.sociopatterns-infectious")
return load_graph(tsv_filename)
# def main():
# g_chesapeake, config_chesapeake = chesapeake_bay()
# print("Chesapeake Bay: Nodes = {}, Edges = {}".format(len(g_chesapeake.nodes()), len(g_chesapeake.edges())))
# g_infectious, config_infectious = infectious()
# print("Infectious: Nodes = {}, Edges = {}".format(len(g_infectious.nodes()), len(g_infectious.edges())))
# if __name__ == "__main__":
# main()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/dataset/eurostat.py | xflow/dataset/eurostat.py | import pandas as pd
import networkx as nx
import requests
from io import BytesIO, StringIO
import gzip
def eurostat_road_go_ta_tg():
# URL of the Eurostat TSV file (compressed)
eurostat_url = "https://ec.europa.eu/eurostat/api/dissemination/sdmx/2.1/data/road_go_ta_tg/?format=TSV&compressed=true"
# Download the TSV file
response = requests.get(eurostat_url)
data = gzip.decompress(response.content).decode('utf-8')
data_io = StringIO(data)
# Read the TSV file into a DataFrame with appropriate settings
eurostat_df = pd.read_csv(data_io, delimiter='\t', on_bad_lines='skip')
# Inspect the dataframe to understand its structure
print(eurostat_df.head())
print(eurostat_df.columns)
# Split the first column into multiple columns
metadata_columns = eurostat_df.iloc[:, 0].str.split(',', expand=True)
metadata_columns.columns = ['freq', 'tra_type', 'nst07', 'unit', 'geo']
# Combine metadata columns with the data columns
eurostat_df = pd.concat([metadata_columns, eurostat_df.iloc[:, 1:]], axis=1)
# Melt the DataFrame to have a long format
eurostat_df = eurostat_df.melt(id_vars=['freq', 'tra_type', 'nst07', 'unit', 'geo'],
var_name='year',
value_name='value')
# Filter out rows with missing values
eurostat_df.dropna(subset=['value'], inplace=True)
# Convert year to a proper format
eurostat_df['year'] = eurostat_df['year'].str.strip()
# Display the cleaned DataFrame
print(eurostat_df.head())
# Initialize a directed graph
G = nx.DiGraph()
# Example: Using 'geo' as origin and 'year' as destination, and 'value' as weight
for index, row in eurostat_df.iterrows():
origin = row['geo']
destination = row['year']
weight = row['value']
# Add edge with weight
G.add_edge(origin, destination, weight=weight)
print(f"eurostat_road_go_ta_tg has {G.number_of_nodes()} nodes and {G.number_of_edges()} edges.")
return G
# # Example usage
# if __name__ == "__main__":
# G = eurostat_road_go_ta_tg()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/dataset/nx.py | xflow/dataset/nx.py | import networkx as nx
import random
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
def connSW(n, beta=None):
g = nx.connected_watts_strogatz_graph(n, 10, 0.1)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
if beta:
weight = beta
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def BA():
g = nx.barabasi_albert_graph(1000, 5)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def ER():
g = nx.erdos_renyi_graph(5000, 0.002)
while nx.is_connected(g) == False:
g = nx.erdos_renyi_graph(5000, 0.002)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/dataset/snap.py | xflow/dataset/snap.py | import os
import networkx as nx
import requests
import random
import ndlib.models.ModelConfig as mc
import gzip
# TODO add CAIDA
# https://snap.stanford.edu/data/as-caida.html
def create_folder(folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
def download_snap_dataset(url, filename):
response = requests.get(url)
with open(filename, 'wb') as f:
f.write(response.content)
def check_and_download(url, filename):
create_folder("snap_datasets") # Ensure the folder is created
if not os.path.exists(filename):
print(f"Downloading {filename}...")
download_snap_dataset(url, filename)
try:
with gzip.open(filename, 'rt') as f:
f.read(1)
except (OSError, gzip.BadGzipFile):
print(f"{filename} is corrupted. Re-downloading...")
download_snap_dataset(url, filename)
def add_edge_weights(G, min_weight, max_weight):
config = mc.Configuration()
for a, b in G.edges():
weight = random.uniform(min_weight, max_weight)
weight = round(weight, 2)
config.add_edge_configuration("threshold", (a, b), weight)
G[a][b]['weight'] = weight
return G, config
def load_graph(filename):
G = nx.read_edgelist(filename)
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
def soc_epinions1():
url = "https://snap.stanford.edu/data/soc-Epinions1.txt.gz"
filename = "snap_datasets/soc-Epinions1.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def soc_livejournal1():
url = "https://snap.stanford.edu/data/soc-LiveJournal1.txt.gz"
filename = "snap_datasets/soc-LiveJournal1.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def wiki_vote():
url = "https://snap.stanford.edu/data/wiki-Vote.txt.gz"
filename = "snap_datasets/wiki-Vote.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def email_euall():
url = "https://snap.stanford.edu/data/email-EuAll.txt.gz"
filename = "snap_datasets/email-EuAll.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def email_enron():
url = "https://snap.stanford.edu/data/email-Enron.txt.gz"
filename = "snap_datasets/email-Enron.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def wiki_talk():
url = "https://snap.stanford.edu/data/wiki-Talk.txt.gz"
filename = "snap_datasets/wiki-Talk.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def cit_hepph():
url = "https://snap.stanford.edu/data/cit-HepPh.txt.gz"
filename = "snap_datasets/cit-HepPh.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def cit_hepth():
url = "https://snap.stanford.edu/data/cit-HepTh.txt.gz"
filename = "snap_datasets/cit-HepTh.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def cit_patents():
url = "https://snap.stanford.edu/data/cit-Patents.txt.gz"
filename = "snap_datasets/cit-Patents.txt.gz"
check_and_download(url, filename)
return load_graph(filename)
def preprocess_stackoverflow(filename):
with gzip.open(filename, 'rt') as f:
lines = f.readlines()
with open("snap_datasets/stackoverflow_preprocessed.txt", 'w') as f:
for line in lines:
parts = line.split()
if len(parts) == 3:
src, dst, timestamp = parts
f.write(f"{src} {dst}\n")
else:
f.write(line)
def sx_stackoverflow():
url = "https://snap.stanford.edu/data/sx-stackoverflow.txt.gz"
filename = "snap_datasets/sx-stackoverflow.txt.gz"
check_and_download(url, filename)
preprocess_stackoverflow(filename)
return load_graph("snap_datasets/stackoverflow_preprocessed.txt")
def preprocess_temporal(filename, output_filename):
with gzip.open(filename, 'rt') as f:
lines = f.readlines()
with open(output_filename, 'w') as f:
for line in lines:
parts = line.split()
if len(parts) >= 2:
src, dst = parts[:2]
f.write(f"{src} {dst}\n")
else:
f.write(line)
def sx_mathoverflow():
url = "https://snap.stanford.edu/data/sx-mathoverflow.txt.gz"
filename = "snap_datasets/sx-mathoverflow.txt.gz"
check_and_download(url, filename)
preprocess_temporal(filename, "snap_datasets/mathoverflow_preprocessed.txt")
return load_graph("snap_datasets/mathoverflow_preprocessed.txt")
def sx_superuser():
url = "https://snap.stanford.edu/data/sx-superuser.txt.gz"
filename = "snap_datasets/sx-superuser.txt.gz"
check_and_download(url, filename)
preprocess_temporal(filename, "snap_datasets/superuser_preprocessed.txt")
return load_graph("snap_datasets/superuser_preprocessed.txt")
def sx_askubuntu():
url = "https://snap.stanford.edu/data/sx-askubuntu.txt.gz"
filename = "snap_datasets/sx-askubuntu.txt.gz"
check_and_download(url, filename)
preprocess_temporal(filename, "snap_datasets/askubuntu_preprocessed.txt")
return load_graph("snap_datasets/askubuntu_preprocessed.txt")
def wiki_talk_temporal():
url = "https://snap.stanford.edu/data/wiki-talk-temporal.txt.gz"
filename = "snap_datasets/wiki-talk-temporal.txt.gz"
check_and_download(url, filename)
preprocess_temporal(filename, "snap_datasets/wiki_talk_temporal_preprocessed.txt")
return load_graph("snap_datasets/wiki_talk_temporal_preprocessed.txt")
def email_eu_core_temporal():
url = "https://snap.stanford.edu/data/email-Eu-core-temporal.txt.gz"
filename = "snap_datasets/email-Eu-core-temporal.txt.gz"
check_and_download(url, filename)
preprocess_temporal(filename, "snap_datasets/email_eu_core_temporal_preprocessed.txt")
return load_graph("snap_datasets/email_eu_core_temporal_preprocessed.txt")
def college_msg():
url = "https://snap.stanford.edu/data/CollegeMsg.txt.gz"
filename = "snap_datasets/CollegeMsg.txt.gz"
check_and_download(url, filename)
preprocess_temporal(filename, "snap_datasets/CollegeMsg_preprocessed.txt")
return load_graph("snap_datasets/CollegeMsg_preprocessed.txt")
# def main():
# g_epinions, config_epinions = soc_epinions1()
# print("Epinions: Nodes = {}, Edges = {}".format(len(g_epinions.nodes()), len(g_epinions.edges())))
# g_livejournal, config_livejournal = soc_livejournal1()
# print("LiveJournal: Nodes = {}, Edges = {}".format(len(g_livejournal.nodes()), len(g_livejournal.edges())))
# g_wiki_vote, config_wiki_vote = wiki_vote()
# print("Wiki Vote: Nodes = {}, Edges = {}".format(len(g_wiki_vote.nodes()), len(g_wiki_vote.edges())))
# g_email_euall, config_email_euall = email_euall()
# print("Email EU All: Nodes = {}, Edges = {}".format(len(g_email_euall.nodes()), len(g_email_euall.edges())))
# g_email_enron, config_email_enron = email_enron()
# print("Email Enron: Nodes = {}, Edges = {}".format(len(g_email_enron.nodes()), len(g_email_enron.edges())))
# g_wiki_talk, config_wiki_talk = wiki_talk()
# print("Wiki Talk: Nodes = {}, Edges = {}".format(len(g_wiki_talk.nodes()), len(g_wiki_talk.edges())))
# g_cit_hepph, config_cit_hepph = cit_hepph()
# print("Citations HepPh: Nodes = {}, Edges = {}".format(len(g_cit_hepph.nodes()), len(g_cit_hepph.edges())))
# g_cit_hepth, config_cit_hepth = cit_hepth()
# print("Citations HepTh: Nodes = {}, Edges = {}".format(len(g_cit_hepth.nodes()), len(g_cit_hepth.edges())))
# g_cit_patents, config_cit_patents = cit_patents()
# print("Citations Patents: Nodes = {}, Edges = {}".format(len(g_cit_patents.nodes()), len(g_cit_patents.edges())))
# g_stackoverflow, config_stackoverflow = sx_stackoverflow()
# print("Stack Overflow: Nodes = {}, Edges = {}".format(len(g_stackoverflow.nodes()), len(g_stackoverflow.edges())))
# g_mathoverflow, config_mathoverflow = sx_mathoverflow()
# print("Math Overflow: Nodes = {}, Edges = {}".format(len(g_mathoverflow.nodes()), len(g_mathoverflow.edges())))
# g_superuser, config_superuser = sx_superuser()
# print("Super User: Nodes = {}, Edges = {}".format(len(g_superuser.nodes()), len(g_superuser.edges())))
# g_askubuntu, config_askubuntu = sx_askubuntu()
# print("Ask Ubuntu: Nodes = {}, Edges = {}".format(len(g_askubuntu.nodes()), len(g_askubuntu.edges())))
# g_wiki_talk_temporal, config_wiki_talk_temporal = wiki_talk_temporal()
# print("Wiki Talk Temporal: Nodes = {}, Edges = {}".format(len(g_wiki_talk_temporal.nodes()), len(g_wiki_talk_temporal.edges())))
# g_email_eu_core_temporal, config_email_eu_core_temporal = email_eu_core_temporal()
# print("Email EU Core Temporal: Nodes = {}, Edges = {}".format(len(g_email_eu_core_temporal.nodes()), len(g_email_eu_core_temporal.edges())))
# g_college_msg, config_college_msg = college_msg()
# print("College Msg: Nodes = {}, Edges = {}".format(len(g_college_msg.nodes()), len(g_college_msg.edges())))
# if __name__ == "__main__":
# main()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/dataset/faf.py | xflow/dataset/faf.py | import pandas as pd
import networkx as nx
import requests
from zipfile import ZipFile
from io import BytesIO
def faf5_6():
# URL of the CSV file within the ZIP archive
zip_url = "https://faf.ornl.gov/faf5/data/download_files/FAF5.6.zip"
# Download the ZIP file
response = requests.get(zip_url)
zip_content = response.content
# Save the ZIP file to disk (optional)
with open("FAF5.6.zip", "wb") as zip_file:
zip_file.write(zip_content)
# Open the saved ZIP file
with ZipFile("FAF5.6.zip", 'r') as zip_file:
# Extract the relevant CSV file from the ZIP archive
csv_file = zip_file.open("FAF5.6.csv")
# Read the CSV file into a DataFrame
faf_df = pd.read_csv(csv_file)
# Inspect the dataframe to understand its structure
print(faf_df.head())
print(faf_df.columns)
# Adjust column names based on actual data structure
origin_col = 'dms_orig' # Origin column
destination_col = 'dms_dest' # Destination column
weight_col = 'tons_2017' # Weight column for the year 2017
# TODO (adjust year as needed)
# Initialize a directed graph
G = nx.DiGraph()
# Add edges to the graph from the DataFrame
for index, row in faf_df.iterrows():
origin = row[origin_col]
destination = row[destination_col]
weight = row[weight_col]
# Add edge with weight
G.add_edge(origin, destination, weight=weight)
print(f"faf5_6 has {G.number_of_nodes()} nodes and {G.number_of_edges()} edges.")
return G
# # Example usage
# G = faf5_6()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/dataset/pyg.py | xflow/dataset/pyg.py | import networkx as nx
import numpy as np
import torch_geometric.datasets as ds
import random
import ndlib
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
import torch_geometric
from torch_geometric.datasets import Planetoid, EmailEUCore, MyketDataset, BitcoinOTC, PolBlogs, KarateClub
from torch_geometric.utils import to_networkx
print(torch_geometric.__version__)
def convert_to_graph(dataset):
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
return G
def convert_temporal_to_graph(dataset):
G = nx.DiGraph() # Directed graph since temporal data often implies direction
data = dataset[0]
# Assuming 'src', 'dst', 'ts' are attributes in the temporal data
edges = zip(data.src.numpy(), data.dst.numpy())
G.add_edges_from(edges)
return G
def convert_temporal_to_graph_attr(data):
G = nx.DiGraph() # Directed graph since temporal data often implies direction
# Check available attributes and construct edges accordingly
if hasattr(data, 'src') and hasattr(data, 'dst'):
edges = zip(data.src.numpy(), data.dst.numpy())
elif hasattr(data, 'edge_index') and data.edge_index is not None:
edges = zip(data.edge_index[0].numpy(), data.edge_index[1].numpy())
else:
raise AttributeError("The dataset does not have expected edge attributes.")
G.add_edges_from(edges)
return G
def add_edge_weights(G, min_weight, max_weight):
config = mc.Configuration()
for a, b in G.edges():
weight = random.uniform(min_weight, max_weight)
weight = round(weight, 2)
config.add_edge_configuration("threshold", (a, b), weight)
G[a][b]['weight'] = weight
return G, config
def CiteSeer():
dataset = Planetoid(root='./Planetoid', name='CiteSeer') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def PubMed():
dataset = Planetoid(root='./Planetoid', name='PubMed') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def Cora():
dataset = Planetoid(root='./Planetoid', name='Cora') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def photo():
dataset = ds.Amazon(root='./geo', name = 'Photo')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
g = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(5,20)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def coms():
dataset = ds.Amazon(root='./geo', name = 'Computers')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
g = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(5,20)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def email_eu_core():
dataset = EmailEUCore(root='./EmailEUCore')
G = convert_to_graph(dataset)
G = nx.convert_node_labels_to_integers(G, first_label=0)
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
def reddit():
dataset = ds.JODIEDataset(root='./JODIE', name='Reddit')
G = convert_temporal_to_graph(dataset)
G = nx.convert_node_labels_to_integers(G, first_label=0)
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
def last_fm():
dataset = ds.JODIEDataset(root='./JODIE', name='LastFM')
G = convert_temporal_to_graph(dataset)
G = nx.convert_node_labels_to_integers(G, first_label=0)
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
def bitcoin_otc():
dataset = BitcoinOTC(root='./BitcoinOTC')
data = dataset[0]
if data.edge_index is None:
raise ValueError("The edge_index is None for BitcoinOTC dataset")
G = convert_temporal_to_graph_attr(data)
G = nx.convert_node_labels_to_integers(G, first_label=0)
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
def polblogs():
dataset = PolBlogs(root='./PolBlogs')
data = dataset[0]
G = convert_to_graph(dataset)
G = nx.convert_node_labels_to_integers(G, first_label=0)
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
def myket():
dataset = MyketDataset(root='./Myket')
G = convert_temporal_to_graph(dataset)
G = nx.convert_node_labels_to_integers(G, first_label=0)
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
def karate_club():
dataset = KarateClub()
data = dataset[0]
G = to_networkx(data, to_undirected=True)
G, config = add_edge_weights(G, 0.1, 0.5)
return G, config
# def main():
# # Generate and configure graphs for different datasets
# g_citeseer, config_citeseer = CiteSeer()
# g_pubmed, config_pubmed = PubMed()
# g_cora, config_cora = Cora()
# g_photo, config_photo = photo()
# g_coms, config_coms = coms()
# g_bitcoin_otc, config_bitcoin_otc = bitcoin_otc()
# g_email_eu_core, config_email_eu_core = email_eu_core()
# g_polblogs, config_polblogs = polblogs()
# g_reddit, config_reddit = reddit()
# g_last_fm, config_last_fm = last_fm()
# g_myket, config_myket = myket()
# g_karate, config_karate = karate_club()
# # Print the number of nodes and edges in each graph as a simple verification
# print("CiteSeer: Nodes = {}, Edges = {}".format(len(g_citeseer.nodes()), len(g_citeseer.edges())))
# print("PubMed: Nodes = {}, Edges = {}".format(len(g_pubmed.nodes()), len(g_pubmed.edges())))
# print("Cora: Nodes = {}, Edges = {}".format(len(g_cora.nodes()), len(g_cora.edges())))
# print("Photo: Nodes = {}, Edges = {}".format(len(g_photo.nodes()), len(g_photo.edges())))
# print("Computers: Nodes = {}, Edges = {}".format(len(g_coms.nodes()), len(g_coms.edges())))
# print("Bitcoin OTC: Nodes = {}, Edges = {}".format(len(g_bitcoin_otc.nodes()), len(g_bitcoin_otc.edges())))
# print("Email EU Core: Nodes = {}, Edges = {}".format(len(g_email_eu_core.nodes()), len(g_email_eu_core.edges())))
# print("PolBlogs: Nodes = {}, Edges = {}".format(len(g_polblogs.nodes()), len(g_polblogs.edges())))
# print("Reddit: Nodes = {}, Edges = {}".format(len(g_reddit.nodes()), len(g_reddit.edges())))
# print("LastFM: Nodes = {}, Edges = {}".format(len(g_last_fm.nodes()), len(g_last_fm.edges())))
# print("Myket: Nodes = {}, Edges = {}".format(len(g_myket.nodes()), len(g_myket.edges())))
# print("Karate Club: Nodes = {}, Edges = {}".format(len(g_karate.nodes()), len(g_karate.edges())))
# if __name__ == "__main__":
# main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/dataset/__init__.py | xflow/dataset/__init__.py | __all__ = ['connSW', 'BA', 'ER', 'CiteSeer', 'PubMed', 'Cora', 'photo', 'coms']
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/IBM/graph_generation.py | xflow/IBM/graph_generation.py | import networkx as nx
import torch_geometric.datasets as ds
import random
import ndlib
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
from torch_geometric.datasets import Planetoid
def connSW(n, beta=None):
g = nx.connected_watts_strogatz_graph(n, 10, 0.1)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
if beta:
weight = beta
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def BA():
g = nx.barabasi_albert_graph(1000, 5)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def ER():
g = nx.erdos_renyi_graph(5000, 0.002)
while nx.is_connected(g) == False:
g = nx.erdos_renyi_graph(5000, 0.002)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def CiteSeer():
dataset = Planetoid(root='./Planetoid', name='CiteSeer') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def PubMed():
dataset = Planetoid(root='./Planetoid', name='PubMed') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def Cora():
dataset = Planetoid(root='./Planetoid', name='Cora') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def photo():
dataset = ds.Amazon(root='./geo', name = 'Photo')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
g = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(5,20)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def coms():
dataset = ds.Amazon(root='./geo', name = 'Computers')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
g = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(5,20)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/IBM/main.py | xflow/IBM/main.py | from graph_generation import *
from IBM_baselines import *
from evaluation import *
import time
print('exp 1')
g, config = connSW(1000, 0.1)
print('connSW is on.')
seeds = random.sample(list(g.nodes()), 10)
print('seeds: ', seeds)
beta = 0.1
for budget in [5, 10, 15, 20, 25, 30]:
print('budget: ', budget)
# greedy
start = time.time()
selected = greedySI(g, config, budget, seeds, beta=beta)
end = time.time()
print('time: ', end - start)
print('greedy: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# eigen
start = time.time()
selected = eigen(g, config, budget)
end = time.time()
print('time: ', end - start)
print('eigen: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# degree
start = time.time()
selected = degree(g, config, budget)
end = time.time()
print('time: ', end - start)
print('degree: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# sigma
start = time.time()
selected = sigma(g, config, budget)
end = time.time()
print('time: ', end - start)
print('sigma: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# pi
start = time.time()
selected = pi(g, config, budget)
end = time.time()
print('time: ', end - start)
print('pi: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
####################################################################################################
print('exp 2')
g, config = connSW(1000, 0.1)
print('connSW is on.')
seeds = random.sample(list(g.nodes()), 10)
print('seeds: ', seeds)
for beta in [0.1,0.2,0.3,0.4,0.5]:
print('beta: ', beta)
# greedy
start = time.time()
selected = greedySI(g, config, budget, seeds, beta=beta)
end = time.time()
print('time: ', end - start)
print('greedy: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# eigen
start = time.time()
selected = eigen(g, config, budget)
end = time.time()
print('time: ', end - start)
print('eigen: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# degree
start = time.time()
selected = degree(g, config, budget)
end = time.time()
print('time: ', end - start)
print('degree: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# sigma
start = time.time()
selected = sigma(g, config, budget)
end = time.time()
print('time: ', end - start)
print('sigma: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# pi
start = time.time()
selected = pi(g, config, budget)
end = time.time()
print('time: ', end - start)
print('pi: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
####################################################################################################
print('exp 3')
for n in [200,400,600,800,1000]:
g, config = connSW(n, 0.1)
print('connSW is on.')
print('n: ', n)
seeds = random.sample(list(g.nodes()), 10)
print('seeds: ', seeds)
budget = 5
beta = 0.1
# greedy
start = time.time()
selected = greedySI(g, config, budget, seeds, beta=beta)
end = time.time()
print('time: ', end - start)
print('greedy: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# eigen
start = time.time()
selected = eigen(g, config, budget)
end = time.time()
print('time: ', end - start)
print('eigen: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# degree
start = time.time()
selected = degree(g, config, budget)
end = time.time()
print('time: ', end - start)
print('degree: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# sigma
start = time.time()
selected = sigma(g, config, budget)
end = time.time()
print('time: ', end - start)
print('sigma: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
# pi
start = time.time()
selected = pi(g, config, budget)
end = time.time()
print('time: ', end - start)
print('pi: ', selected)
mean, std = blocking_effect_SI(g, config, seeds, selected, beta=beta)
print('blocked: ', mean, '+-', std)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/IBM/evaluation.py | xflow/IBM/evaluation.py | import statistics as s
from IBM_baselines import IC, LT, SI
def blocking_effect_IC(g, config, seeds, selected_to_block):
g_block = g.__class__()
g_block.add_nodes_from(g)
g_block.add_edges_from(g.edges)
for a, b in g_block.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_block[a][b]['weight'] = weight
for node in selected_to_block:
g_block.remove_node(node)
after = IC(g_block, config, seeds)
before = IC(g, config, seeds)
blocking_effect = []
for i in range(len(before)):
blocking_effect.append(before[i] - after[i])
return s.mean(blocking_effect), s.stdev(blocking_effect)
def blocking_effect_SI(g, config, seeds, selected_to_block, beta=0.1):
g_block = g.__class__()
g_block.add_nodes_from(g)
g_block.add_edges_from(g.edges)
for a, b in g_block.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_block[a][b]['weight'] = weight
for node in selected_to_block:
g_block.remove_node(node)
after = SI(g_block, config, seeds, beta=beta)
before = SI(g, config, seeds, beta=beta)
blocking_effect = []
for i in range(len(before)):
blocking_effect.append(before[i] - after[i])
return s.mean(blocking_effect), s.stdev(blocking_effect)
def blocking_effect_LT(g, config, seeds, selected_to_block):
g_block = g.__class__()
g_block.add_nodes_from(g)
g_block.add_edges_from(g.edges)
for a, b in g_block.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_block[a][b]['weight'] = weight
for node in selected_to_block:
g_block.remove_node(node)
after = LT(g_block, config, seeds)
before = LT(g, config, seeds)
blocking_effect = []
for i in range(len(before)):
blocking_effect.append(before[i] - after[i])
return s.mean(blocking_effect), s.stdev(blocking_effect) | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/IBM/IBM_baselines.py | xflow/IBM/IBM_baselines.py | import networkx as nx
import numpy as np
import ndlib
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
import statistics as s
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
import random
# random
# baselines: simulation based
# greedy
def greedySI(g, config, budget, seeds, beta=0.1):
selected = []
candidates = list(g.nodes())
for i in range(budget):
min = float('inf')
index = -1
for node in candidates:
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
removed = selected + [node]
for node in removed:
g_greedy.remove_node(node)
result = SI(g_greedy, config, seeds, beta=beta)
# result = LT(g, config, seed)
if s.mean(result) < min:
min = s.mean(result)
index = node
selected.append(index)
candidates.remove(index)
return selected
def greedyIC(g, config, budget, seeds):
selected = []
candidates = list(g.nodes())
for i in range(budget):
min = float('inf')
index = -1
for node in candidates:
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
removed = selected + [node]
for node in removed:
g_greedy.remove_node(node)
result = IC(g_greedy, config, seeds)
# result = LT(g, config, seed)
if s.mean(result) < min:
min = s.mean(result)
index = node
selected.append(index)
candidates.remove(index)
return selected
# baselines: proxy based
# eigen centrality
def eigen(g, config, budget):
g_eig = g.__class__()
g_eig.add_nodes_from(g)
g_eig.add_edges_from(g.edges)
for a, b in g_eig.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_eig[a][b]['weight'] = weight
eig = []
for k in range(budget):
eigen = nx.eigenvector_centrality_numpy(g_eig)
selected = sorted(eigen, key=eigen.get, reverse=True)[0]
eig.append(selected)
g_eig.remove_node(selected)
return eig
# degree
def degree(g, config, budget):
g_deg = g.__class__()
g_deg.add_nodes_from(g)
g_deg.add_edges_from(g.edges)
for a, b in g_deg.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_deg[a][b]['weight'] = weight
deg = []
for k in range(budget):
degree = nx.centrality.degree_centrality(g_deg)
selected = sorted(degree, key=degree.get, reverse=True)[0]
deg.append(selected)
g_deg.remove_node(selected)
return deg
# pi
def pi(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
result = []
for k in range(budget):
n = g_greedy.number_of_nodes()
I = np.ones((n, 1))
C = np.ones((n, n))
N = np.ones((n, n))
A = nx.convert_matrix.to_numpy_array(g_greedy, nodelist=list(g_greedy.nodes()))
for i in range(5):
B = np.power(A, i + 1)
D = C - B
N = np.multiply(N, D)
P = C - N
pi = np.matmul(P, I)
value = {}
for i in range(n):
value[list(g_greedy.nodes())[i]] = pi[i, 0]
selected = sorted(value, key=value.get, reverse=True)[0]
result.append(selected)
g_greedy.remove_node(selected)
return result
# sigma
def sigma(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
result = []
for k in range(budget):
n = g_greedy.number_of_nodes()
I = np.ones((n, 1))
F = np.ones((n, n))
N = np.ones((n, n))
A = nx.convert_matrix.to_numpy_array(g, nodelist=g_greedy.nodes())
sigma = I
for i in range(5):
B = np.power(A, i + 1)
C = np.matmul(B, I)
sigma += C
value = {}
for i in range(n):
value[list(g_greedy.nodes())[i]] = sigma[i, 0]
selected = sorted(value, key=value.get, reverse=True)[0]
result.append(selected)
g_greedy.remove_node(selected)
return result
def Netshield(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
A = nx.adjacency_matrix(g_greedy)
lam, u = np.linalg.eigh(A.toarray())
lam = list(lam)
lam = lam[-1]
u = u[:, -1]
u = np.abs(np.real(u).flatten())
v = (2 * lam * np.ones(len(u))) * np.power(u, 2)
nodes = []
for i in range(budget):
B = A[:, nodes]
b = B * u[nodes]
score = v - 2 * b * u
score[nodes] = -1
nodes.append(np.argmax(score))
return nodes
# diffusion models
def IC(g, config, seed):
# number of Monte Carlo simulations to be run for the IC model
mc_number = 100
result = []
for iter in range(mc_number):
model_temp = ep.IndependentCascadesModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seed)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
# g_temp[a][b]['weight'] = weight
config_temp.add_edge_configuration('threshold', (a, b), weight)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
total_no = 0
for j in range(5):
a = iterations[j]['node_count'][1]
total_no += a
result.append(total_no)
return result
def LT(g, config, seed):
# number of Monte Carlo simulations to be run for the LT model
mc_number = 100
result = []
for iter in range(mc_number):
model_temp = ep.ThresholdModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seed)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
# g_temp[a][b]['weight'] = weight
config_temp.add_edge_configuration('threshold', (a, b), weight)
for i in g.nodes():
threshold = random.randrange(1, 20)
threshold = round(threshold / 100, 2)
config_temp.add_node_configuration("threshold", i, threshold)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
total_no = iterations[4]['node_count'][1]
result.append(total_no)
return result
def SI(g, config, seeds, rounds=100, beta=0.1):
result = []
for iter in range(rounds):
model_temp = ep.SIModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seeds)
config_temp.add_model_parameter('beta', beta)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
config_temp.add_edge_configuration('threshold', (a, b), weight)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
result.append(iterations[4]['node_count'][1])
return result | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/IM/graph_generation.py | xflow/IM/graph_generation.py | import networkx as nx
import torch_geometric.datasets as ds
import random
import ndlib
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
from torch_geometric.datasets import Planetoid
def connSW(n, beta=None):
g = nx.connected_watts_strogatz_graph(n, 10, 0.1)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
if beta:
weight = beta
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def BA():
g = nx.barabasi_albert_graph(1000, 5)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def ER():
g = nx.erdos_renyi_graph(5000, 0.002)
while nx.is_connected(g) == False:
g = nx.erdos_renyi_graph(5000, 0.002)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def CiteSeer():
dataset = Planetoid(root='./Planetoid', name='CiteSeer') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def PubMed():
dataset = Planetoid(root='./Planetoid', name='PubMed') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def Cora():
dataset = Planetoid(root='./Planetoid', name='Cora') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def photo():
dataset = ds.Amazon(root='./geo', name = 'Photo')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
g = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(5,20)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def coms():
dataset = ds.Amazon(root='./geo', name = 'Computers')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
g = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(5,20)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/IM/IM_baselines.py | xflow/IM/IM_baselines.py | import networkx as nx
import numpy as np
import ndlib
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
import statistics as s
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
from random import uniform, seed
from collections import Counter
import operator
import copy
from random import uniform, seed
# random
# baselines: simulation based
# greedy
def greedy(g, config, budget, rounds=100, model='SI', beta=0.1):
model = model.upper()
selected = []
candidates = list(g.nodes())
for i in range(budget):
max_spread = 0
index = -1
for node in candidates:
seed = selected + [node]
if model == "IC":
result = IC(g, config, seed, rounds)
elif model == "LT":
result = LT(g, config, seed, rounds)
elif model == "SI":
result = SI(g, config, seed, rounds, beta=beta)
else:
raise ValueError(f"Unknown model: {model}")
mean_result = s.mean(result)
if mean_result > max_spread:
max_spread = mean_result
index = node
if index == -1:
raise ValueError("No valid node found to select. Check the model implementation and input graph.")
selected.append(index)
candidates.remove(index)
print(selected)
return selected
def celf(g, config, budget, rounds=100, model='SI', beta=0.1):
model = model.upper()
# Find the first node with greedy algorithm
# Compute marginal gain for each node
candidates = list(g.nodes())
#, start_time = list(g.nodes()), time.time()
# step 1, call a diffusion function, get the result of list
# step 2, calculate the margin gain
if (model == "IC"):
marg_gain = [s.mean(IC(g, config, [node])) for node in candidates]
elif (model == "LT"):
marg_gain = [s.mean(LT(g, config, [node])) for node in candidates]
elif (model == "SI"):
marg_gain = [s.mean(SI(g, config, [node], beta)) for node in candidates]
# Create the sorted list of nodes and their marginal gain
Q = sorted(zip(candidates,marg_gain), key = lambda x: x[1],reverse=True)
# Select the first node and remove from candidate list
selected, spread, Q = [Q[0][0]], Q[0][1], Q[1:]
# Find the next budget-1 nodes using the CELF list-sorting procedure
for _ in range(budget-1):
check = False
while not check:
# Recalculate spread of top node
current = Q[0][0]
# Evaluate the spread function and store the marginal gain in the list
if (model == "IC"):
Q[0] = (current, s.mean(IC(g, config, selected+[current]), rounds) - spread)
elif (model == "LT"):
Q[0] = (current, s.mean(LT(g, config, selected+[current]), rounds) - spread)
elif (model == "SI"):
Q[0] = (current, s.mean(SI(g, config, selected+[current]), rounds, beta) - spread)
# Re-sort the list
Q = sorted(Q, key = lambda x: x[1], reverse=True)
# Check if previous top node stayed on top after the sort
check = Q[0][0] == current
# Select the next node
selected.append(Q[0][0])
spread = Q[0][1]
# Remove the selected node from the list
Q = Q[1:]
print(selected)
return(selected)
# return(sorted(S),timelapse)
def celfpp(g, config, budget, rounds=100, model='SI', beta=0.1):
model = model.upper()
# Compute marginal gain for each node
candidates = list(g.nodes())
if (model == "IC"):
marg_gain = [s.mean(IC(g, config, [node], rounds)) for node in candidates]
elif (model == "LT"):
marg_gain = [s.mean(LT(g, config, [node], rounds)) for node in candidates]
elif (model == "SI"):
marg_gain = [s.mean(SI(g, config, [node], rounds, beta)) for node in candidates]
# Create the sorted list of nodes and their marginal gain
Q = sorted(zip(candidates, marg_gain), key = lambda x: x[1], reverse=True)
# Select the first node and remove from candidate list
selected, spread, Q = [Q[0][0]], Q[0][1], Q[1:]
# Initialize last_seed as the first selected node
last_seed = selected[0]
# Find the next budget-1 nodes using the CELF++ procedure
for _ in range(budget - 1):
check = False
while not check:
# Get current node and its previous computed marginal gain
current, old_gain = Q[0][0], Q[0][1]
# Check if the last added seed has changed
if current != last_seed:
# Compute new marginal gain
if (model == "IC"):
new_gain = s.mean(IC(g, config, selected+[current], rounds)) - spread
elif (model == "LT"):
new_gain = s.mean(LT(g, config, selected+[current], rounds)) - spread
elif (model == "SI"):
new_gain = s.mean(SI(g, config, selected+[current], rounds, beta)) - spread
else:
# If the last added seed hasn't changed, the marginal gain remains the same
new_gain = old_gain
# Update the marginal gain of the current node
Q[0] = (current, new_gain)
# Re-sort the list
Q = sorted(Q, key = lambda x: x[1], reverse=True)
# Check if previous top node stayed on top after the sort
check = Q[0][0] == current
# Select the next node
selected.append(Q[0][0])
spread += Q[0][1] # Update the spread
last_seed = Q[0][0] # Update the last added seed
# Remove the selected node from the list
Q = Q[1:]
print(selected)
return selected
# baselines: proxy based
# eigen centrality
def eigen(g, config, budget):
g_eig = g.__class__()
g_eig.add_nodes_from(g)
g_eig.add_edges_from(g.edges)
for a, b in g_eig.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_eig[a][b]['weight'] = weight
eig = []
for k in range(budget):
eigen = nx.eigenvector_centrality_numpy(g_eig)
selected = sorted(eigen, key=eigen.get, reverse=True)[0]
eig.append(selected)
g_eig.remove_node(selected)
print(eig)
return eig
# degree
def degree(g, config, budget):
g_deg = g.__class__()
g_deg.add_nodes_from(g)
g_deg.add_edges_from(g.edges)
for a, b in g_deg.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_deg[a][b]['weight'] = weight
deg = []
for k in range(budget):
degree = nx.centrality.degree_centrality(g_deg)
selected = sorted(degree, key=degree.get, reverse=True)[0]
deg.append(selected)
g_deg.remove_node(selected)
print(deg)
return deg
# pi
def pi(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
result = []
for k in range(budget):
n = g_greedy.number_of_nodes()
I = np.ones((n, 1))
C = np.ones((n, n))
N = np.ones((n, n))
A = nx.convert_matrix.to_numpy_array(g_greedy, nodelist=list(g_greedy.nodes()))
for i in range(5):
B = np.power(A, i + 1)
D = C - B
N = np.multiply(N, D)
P = C - N
pi = np.matmul(P, I)
value = {}
for i in range(n):
value[list(g_greedy.nodes())[i]] = pi[i, 0]
selected = sorted(value, key=value.get, reverse=True)[0]
result.append(selected)
g_greedy.remove_node(selected)
print(result)
return result
# sigma
def sigma(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
result = []
for k in range(budget):
n = g_greedy.number_of_nodes()
I = np.ones((n, 1))
F = np.ones((n, n))
N = np.ones((n, n))
A = nx.convert_matrix.to_numpy_array(g, nodelist=g_greedy.nodes())
sigma = I
for i in range(5):
B = np.power(A, i + 1)
C = np.matmul(B, I)
sigma += C
value = {}
for i in range(n):
value[list(g_greedy.nodes())[i]] = sigma[i, 0]
selected = sorted(value, key=value.get, reverse=True)[0]
result.append(selected)
g_greedy.remove_node(selected)
print(result)
return result
def Netshield(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
A = nx.adjacency_matrix(g_greedy)
lam, u = np.linalg.eigh(A.toarray())
lam = list(lam)
lam = lam[-1]
u = u[:, -1]
u = np.abs(np.real(u).flatten())
v = (2 * lam * np.ones(len(u))) * np.power(u, 2)
nodes = []
for i in range(budget):
if nodes:
B = A[:, nodes].toarray()
b = np.dot(B, u[nodes])
else:
b = np.zeros_like(u)
score = v - 2 * b * u
score[nodes] = -1
nodes.append(np.argmax(score))
return nodes
####################################
# IMRank
# https://github.com/Braylon1002/IMTool
def IMRank(g, config, budget):
"""
IMRank algorithm to rank the nodes based on their influence.
"""
# Obtain adjacency matrix from the graph
adjacency_matrix = nx.adjacency_matrix(g).todense()
# Normalize the adjacency matrix
row_sums = adjacency_matrix.sum(axis=1)
# Check for zero entries in row_sums (which could correspond to isolated nodes)
# and replace them with 1 to prevent division by zero errors
row_sums[row_sums == 0] = 1
adjacency_matrix = adjacency_matrix / row_sums
start = time.perf_counter()
t = 0
r0 = [i for i in range(len(adjacency_matrix))]
r = [0 for i in range(len(adjacency_matrix))]
# Loop until the ranks converge
while True:
t = t + 1
r = LFA(adjacency_matrix)
r = np.argsort(-np.array(r))
if operator.eq(list(r0), list(r)):
break
r0 = copy.copy(r)
# Select top nodes up to the budget
selected = r[:budget].tolist()
print(selected)
return selected
# baselines: sketch based
#RIS
# https://github.com/Braylon1002/IMTool
def RIS(g, config, budget, rounds=100):
# mc = 100
# Generate mc RRSs
R = [get_RRS(g, config) for _ in range(rounds)]
selected = []
for _ in range(budget):
# Collect all nodes from all RRSs
flat_map = [item for subset in R for item in subset]
# Only proceed if there are nodes in the flat_map
if flat_map:
seed = Counter(flat_map).most_common()[0][0]
selected.append(seed)
R = [rrs for rrs in R if seed not in rrs]
# For every removed RRS, generate a new one
while len(R) < rounds:
R.append(get_RRS(g, config))
print(selected)
return (selected)
def LFA(matrix):
"""
Linear Feedback Algorithm to update the ranks of the nodes.
"""
n = len(matrix)
Mr = [1 for _ in range(n)]
Mr_next = Mr.copy()
for i_ in range(1, n):
i = n - i_
for j in range(0, i + 1):
Mr_next[j] = Mr_next[j] + matrix[j][i] * Mr[i]
Mr_next[i] = (1 - matrix[j][i]) * Mr_next[i]
Mr = Mr_next.copy()
return Mr
############### IMM ################
# https://github.com/snowgy/Influence_Maximization/blob/master/IMP.py
import torch
import random
import time
import sys
import math
def sampling(epsoid, l, graph, node_num, seed_size, model):
R = []
LB = 1
n = node_num
k = seed_size
epsoid_p = epsoid * math.sqrt(2)
for i in range(1, int(math.log2(n-1))+1):
s = time.time()
x = n/(math.pow(2, i))
lambda_p = ((2+2*epsoid_p/3)*(logcnk(n, k) + l*math.log(n) + math.log(math.log2(n)))*n)/pow(epsoid_p, 2)
theta = lambda_p/x
for _ in range(int(theta) - len(R)):
v = random.randint(0, node_num - 1)
rr = generate_rr(v, graph, node_num, model)
R.append(rr)
end = time.time()
print('time to find rr', end - s)
start = time.time()
Si, f = node_selection(R, k, node_num)
print(f)
end = time.time()
print('node selection time', time.time() - start)
if n * f >= (1 + epsoid_p) * x:
LB = n * f / (1 + epsoid_p)
break
alpha = math.sqrt(l * math.log(n) + math.log(2))
beta = math.sqrt((1 - 1 / math.e) * (logcnk(n, k) + l * math.log(n) + math.log(2)))
lambda_aster = 2 * n * pow(((1 - 1 / math.e) * alpha + beta), 2) * pow(epsoid, -2)
theta = lambda_aster / LB
length_r = len(R)
diff = int(theta - length_r)
if diff > 0:
for _ in range(diff):
v = random.randint(0, node_num - 1)
rr = generate_rr(v, graph, node_num, model)
R.append(rr)
return R
def generate_rr(v, graph, node_num, model):
if model == 'IC':
return generate_rr_ic(v, graph)
elif model == 'LT':
return generate_rr_lt(v, graph)
elif model == 'SI':
return generate_rr_si(v, graph)
def node_selection(R, k, node_num):
Sk = []
rr_degree = [0 for _ in range(node_num)]
node_rr_set = dict()
matched_count = 0
for j in range(len(R)):
rr = R[j]
for rr_node in rr:
rr_degree[rr_node] += 1
if rr_node not in node_rr_set:
node_rr_set[rr_node] = list()
node_rr_set[rr_node].append(j)
for _ in range(k):
max_point = rr_degree.index(max(rr_degree))
Sk.append(max_point)
matched_count += len(node_rr_set[max_point])
index_set = list(node_rr_set[max_point])
for jj in index_set:
rr = R[jj]
for rr_node in rr:
rr_degree[rr_node] -= 1
node_rr_set[rr_node].remove(jj)
return Sk, matched_count / len(R)
def generate_rr_ic(node, graph):
activity_set = [node]
activity_nodes = [node]
while activity_set:
new_activity_set = []
for seed in activity_set:
for neighbor in graph.neighbors(seed):
weight = graph.edges[seed, neighbor].get('weight', 1.0)
if neighbor not in activity_nodes and random.random() < weight:
activity_nodes.append(neighbor)
new_activity_set.append(neighbor)
activity_set = new_activity_set
return activity_nodes
def generate_rr_lt(node, graph):
activity_nodes = [node]
activity_set = node
while activity_set != -1:
new_activity_set = -1
neighbors = list(graph.neighbors(activity_set))
if len(neighbors) == 0:
break
candidate = random.sample(neighbors, 1)[0]
if candidate not in activity_nodes:
activity_nodes.append(candidate)
new_activity_set = candidate
activity_set = new_activity_set
return activity_nodes
def generate_rr_si(node, graph):
activity_set = [node]
activity_nodes = [node]
while activity_set:
new_activity_set = []
for seed in activity_set:
for neighbor in graph.neighbors(seed):
if neighbor not in activity_nodes:
activity_nodes.append(neighbor)
new_activity_set.append(neighbor)
activity_set = new_activity_set
return activity_nodes
def logcnk(n, k):
res = 0
for i in range(n - k + 1, n + 1):
res += math.log(i)
for i in range(1, k + 1):
res -= math.log(i)
return res
def IMM(graph, config, seed_size, model):
model = model.upper()
l = 1
epsoid = 0.5
n = graph.number_of_nodes()
k = seed_size
l = l * (1 + math.log(2) / math.log(n))
R = sampling(epsoid, l, graph, n, seed_size, model)
Sk, z = node_selection(R, k, n)
return Sk
####################
# diffusion models
def IC(g, config, seed, rounds=100):
result = []
for iter in range(rounds):
model_temp = ep.IndependentCascadesModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seed)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
# g_temp[a][b]['weight'] = weight
config_temp.add_edge_configuration('threshold', (a, b), weight)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
total_no = 0
for j in range(5):
a = iterations[j]['node_count'][1]
total_no += a
result.append(total_no)
return result
def LT(g, config, seed, rounds=100):
result = []
for iter in range(rounds):
model_temp = ep.ThresholdModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seed)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
# g_temp[a][b]['weight'] = weight
config_temp.add_edge_configuration('threshold', (a, b), weight)
for i in g.nodes():
threshold = random.randrange(1, 20)
threshold = round(threshold / 100, 2)
config_temp.add_node_configuration("threshold", i, threshold)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
total_no = iterations[4]['node_count'][1]
result.append(total_no)
return result
# Zonghan's code
def SI(g, config, seeds, rounds=100, beta=0.1):
result = []
for iter in range(rounds):
model_temp = ep.SIModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seeds)
config_temp.add_model_parameter('beta', beta)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
config_temp.add_edge_configuration('threshold', (a, b), weight)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
result.append(iterations[4]['node_count'][1])
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/IM/main.py | xflow/IM/main.py | import networkx as nx
from time import time
from graph_generation import Cora, CiteSeer, PubMed, connSW, ER, coms, photo
from IM_baselines import eigen, degree, pi, sigma, greedy, celf, celfpp, IMRank, RIS
from evaluation import effectSI
def analyze(seed, beta, size):
g, config = connSW(size, beta)
print('beta', beta)
print('seed', seed)
print('size', size)
print('------------------------------------------------')
print('pi')
start = time()
set = pi(g,config,seed)
end = time()
print("time: ", end-start)
ie,var = effectSI(g, config, set, beta)
print('IE:', ie, " +_ ", var)
# print('------------------------------------------------')
# print('degree')
# start = time()
# set = degree(g,config,seed)
# end = time()
# print('time: ', end - start)
# ie,var = effectSI(g, config, set, beta)
# print('IE:', ie, " +_ ", var)
# print('------------------------------------------------')
# print('eigen-centrality')
# start = time()
# set = eigen(g, config, seed)
# end = time()
# print('time: ', end - start)
# ie,var = effectSI(g, config, set, beta)
# print('IE:', ie, " +_ ", var)
# print('------------------------------------------------')
# print('RIS')
# start = time()
# set = RIS(g, config, seed)
# end = time()
# print('time: ', end - start)
# ie,var = effectSI(g, config, set, beta)
# print('IE:', ie, " +_ ", var)
# print('------------------------------------------------')
# print('celfpp')
# start = time()
# set = celfpp(g,config,seed, rounds=100, model='SI', beta=beta)
# end = time()
# print('time: ', end - start)
# ie,var = effectSI(g, config, set, beta)
# print('IE:', ie, " +_ ", var)
# print('------------------------------------------------')
# print('IMRank')
# start = time()
# set = IMRank(g,config,seed)
# end = time()
# print('time: ', end - start)
# ie,var = effectSI(g, config, set, beta)
# print('IE:', ie, " +_ ", var)
# print('------------------------------------------------')
# print('IMM')
# start = time()
# set = IMM(g, config, seed, rounds=100, model='SI', beta=beta)
# end = time()
# print('time: ', end - start)
# ie,var = effectSI(g, config, set, beta)
# print('IE:', ie, " +_ ", var)
# for chart 1
print("seed = [5, 10, 15, 20, 25, 30]; beta = 0.1; size = 1000")
analyze(5, 0.1, 1000)
analyze(10, 0.1, 1000)
analyze(15, 0.1, 1000)
analyze(20, 0.1, 1000)
analyze(25, 0.1, 1000)
analyze(30, 0.1, 1000)
# for chart 2
print("seed = 5; beta = [0.1, 0.2, 0.3, 0.4, 0.5]; size = 1000")
analyze(5, 0.1, 1000)
analyze(5, 0.2, 1000)
analyze(5, 0.3, 1000)
analyze(5, 0.4, 1000)
analyze(5, 0.5, 1000)
# # for chart 3
print("seed = 5; beta = 0.1; size = [200, 400, 600, 800, 1000]")
analyze(5, 0.1, 200)
analyze(5, 0.1, 400)
analyze(5, 0.1, 600)
analyze(5, 0.1, 800)
analyze(5, 0.1, 1000)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/IM/evaluation.py | xflow/IM/evaluation.py | import networkx as nx
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from torch_geometric.nn.inits import reset
import random
import numpy as np
from torch_geometric import utils
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
import statistics as s
def effectIC(g, config, result):
input = []
for i in range(1000):
g_mid = g.__class__()
g_mid.add_nodes_from(g)
g_mid.add_edges_from(g.edges)
model_mid = ep.IndependentCascadesModel(g_mid)
config_mid = mc.Configuration()
config_mid.add_model_initial_configuration('Infected', result)
for a, b in g_mid.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_mid[a][b]['weight'] = weight
config_mid.add_edge_configuration('threshold', (a, b), weight)
model_mid.set_initial_status(config_mid)
iterations = model_mid.iteration_bunch(5)
trends = model_mid.build_trends(iterations)
total_no = 0
for j in range(5):
a = iterations[j]['node_count'][1]
total_no += a
input.append(total_no)
e = s.mean(input)
v = s.stdev(input)
return e,v
def effectLT(g, config, result):
input = []
for i in range(1000):
g_mid = g.__class__()
g_mid.add_nodes_from(g)
g_mid.add_edges_from(g.edges)
model_mid = ep.ThresholdModel(g_mid)
config_mid = mc.Configuration()
config_mid.add_model_initial_configuration('Infected', result)
for a, b in g_mid.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_mid[a][b]['weight'] = weight
config_mid.add_edge_configuration('threshold', (a, b), weight)
for i in g.nodes():
threshold = random.randrange(1, 20)
threshold = round(threshold / 100, 2)
config_mid.add_node_configuration("threshold", i, threshold)
model_mid.set_initial_status(config_mid)
iterations = model_mid.iteration_bunch(5)
trends = model_mid.build_trends(iterations)
total_no = iterations[4]['node_count'][1]
input.append(total_no)
e = s.mean(input)
v = s.stdev((input))
return e,v
def effectSI(g, config, result, beta=0.01):
input = []
for i in range(1000):
g_mid = g.__class__()
g_mid.add_nodes_from(g)
g_mid.add_edges_from(g.edges)
model_mid = ep.SIModel(g_mid)
config_mid = mc.Configuration()
config_mid.add_model_initial_configuration('Infected', result)
config_mid.add_model_parameter('beta', beta) # set beta parameter
for a, b in g_mid.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_mid[a][b]['weight'] = weight
config_mid.add_edge_configuration('threshold', (a, b), weight)
model_mid.set_initial_status(config_mid)
iterations = model_mid.iteration_bunch(5)
trends = model_mid.build_trends(iterations)
total_no = iterations[4]['node_count'][1]
input.append(total_no)
e = s.mean(input)
v = s.stdev((input))
return e,v
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/graph_generation.py | xflow/SL/graph_generation.py | import networkx as nx
import torch_geometric.datasets as ds
import ndlib.models.ModelConfig as mc
import numpy as np
import random
from torch_geometric.datasets import Planetoid
def CiteSeer():
dataset = Planetoid(root='./Planetoid', name='CiteSeer') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
g = nx.convert_node_labels_to_integers(g, first_label=0, ordering='default', label_attribute=None)
return g
def PubMed():
dataset = Planetoid(root='./Planetoid', name='PubMed') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
g = nx.convert_node_labels_to_integers(g, first_label=0, ordering='default', label_attribute=None)
return g
def Cora():
dataset = Planetoid(root='./Planetoid', name='Cora') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
g = nx.convert_node_labels_to_integers(g, first_label=0, ordering='default', label_attribute=None)
return g
def photo():
dataset = ds.Amazon(root='./geo', name = 'Photo')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
g = nx.convert_node_labels_to_integers(g, first_label=0, ordering='default', label_attribute=None)
return g
def coms():
dataset = ds.Amazon(root='./geo', name = 'Computers')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
g = nx.convert_node_labels_to_integers(g, first_label=0, ordering='default', label_attribute=None)
return g
def connSW(n, k, p):
# n The number of nodes
# k Each node is joined with its k nearest neighbors in a ring topology.
# p The probability of rewiring each edge
g = nx.connected_watts_strogatz_graph(n, k, p)
while nx.is_connected(g) == False:
g = nx.connected_watts_strogatz_graph(n,k, p)
return g
def rand(n, p, seed):
# n The number of nodes
# p Probability for edge creation
# seed Seed for random number generator (default=None)
random.seed(seed)
np.random.seed(seed)
g = nx.fast_gnp_random_graph(n, p, seed=seed)
return g
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/Gaussian.py | xflow/SL/Gaussian.py | import networkx as nx
from time import time
from graphGeneration import Cora, CiteSeer, PubMed, connSW, ER, coms, photo
from IM import eigen, degree, pi, sigma, Netshield, Soboldeg, Soboleigen, SobolPi, SobolSigma, SobolNS, greedyIC, degreeDis,SoboldegreeDis
from score import effectIC
import ndlib.models.ModelConfig as mc
import ndlib.models.epidemics as ep
import matplotlib.pyplot as plt
import pandas as pd
g = nx.karate_club_graph()
config = mc.Configuration()
for a, b in g.edges():
weight = 0.1
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
seeds = degree(g,config,2)
print(seeds)
g_mid = g.__class__()
g_mid.add_nodes_from(g)
g_mid.add_edges_from(g.edges)
model_mid = ep.SIModel(g_mid) # Model SI
config_mid = mc.Configuration()
config_mid.add_model_initial_configuration('Infected', seeds)
config_mid.add_model_parameter('beta', 0.1) # Beta
for a, b in g_mid.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_mid[a][b]['weight'] = weight
config_mid.add_edge_configuration('threshold', (a, b), weight)
model_mid.set_initial_status(config_mid)
iterations = model_mid.iteration_bunch(5)
trends = model_mid.build_trends(iterations)
result = []
for item in seeds:
result.append(item)
for j in range(1, 5):
snapshot = list(iterations[j]['status'].keys())
for item in snapshot:
result.append(item)
observation = []
for node in range(g.number_of_nodes()):
if node in result:
observation.append(1)
else:
observation.append(0)
print(observation)
overlaps = []
observations = []
for i in range(10000):
g_mid = g.__class__()
g_mid.add_nodes_from(g)
g_mid.add_edges_from(g.edges)
model_mid = ep.SIModel(g_mid) # Model SI
config_mid = mc.Configuration()
config_mid.add_model_initial_configuration('Infected', seeds)
config_mid.add_model_parameter('beta', 0.1) # Beta
for a, b in g_mid.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_mid[a][b]['weight'] = weight
config_mid.add_edge_configuration('threshold', (a, b), weight)
model_mid.set_initial_status(config_mid)
iterations = model_mid.iteration_bunch(10) # 10 time steps
trends = model_mid.build_trends(iterations)
result = []
for item in seeds:
result.append(item)
for j in range(1, 10):
snapshot = list(iterations[j]['status'].keys())
for item in snapshot:
result.append(item)
obs = []
for node in range(g.number_of_nodes()):
if node in result:
obs.append(1)
else:
obs.append(0)
result.sort()
overlap = 0
for node in range(g.number_of_nodes()):
if obs[node] == observation[node]:
overlap += 1
overlaps.append(overlap / g.number_of_nodes())
observations.append(obs)
plt.hist(overlaps)
plt.show() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/main.py | xflow/SL/main.py | import networkx as nx
import cosasi
import random
import numpy as np
from graph_generation import CiteSeer, PubMed, Cora, coms, photo, connSW, rand
from time import time
import tracemalloc
import logging
# from memory_profiler import profile
tracemalloc.start()
# Create a logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Create a file handler
file_handler = logging.FileHandler('output.log')
file_handler.setLevel(logging.INFO)
# Create a console handler
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
# Create a formatter and set it for both handlers
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# Now, logging.info() etc. will write to both the file and the console:
# logger.info("This is an info message.")
def get_result(sims, true_source):
# start = time()
# Rank nodes by score
# logger.info('Rank : %s', sims.rank())
# Returns the top n item indices by rank
# logger.info('Top n item : %s', sims.topn(n=5))
# Finds the rank of the true source, by the algorithm's scoring protocol.
# logger.info('Evaluate solution rank : %s', sims.evaluate_solution_rank(true_source))
# Finds the shortest path length between each node in the solution set and the true souce.
# logger.info('Shortest Distance : %s', sims.evaluate_distance(true_source))
# Runs evaluation algorithms and returns a dictionary of results
evals = sims.evaluate(true_source)
# logger.info('evals', evals)
# solution rank
# Where feasible, cosasi enhances localization algorithms to execute ranking across multiple hypotheses. This approach enables us to rank all hypotheses based on the algorithm's inherent scoring criteria and provide the rank of the actual source amongst all hypotheses. This resembles the commonly used "precision at k" metric in the field of information retrieval.
#Therefore, according to this algorithm, the real source was the nth most probable hypothesis.
# logger.info('solution rank of nth : %s', evals["rank"])
# rank / len(self.data["scores"])
# print('evals rank % :', evals["rank %"])
# a metric that assesses the minimum graph distance between vertex sets that may be of different sizes
# The top-scoring hypothesis was close to the true source.
top_dis= evals["distance"]["top score's distance"]
logger.info('top score distance : %s', top_dis)
# evaluate the distance from true source of all computed hypotheses
# distances = evals["distance"]["all distances"].values()
# logger.info('all distances', distances)
# the distance from true source of these hypotheses ranged from min to max
# logger.info('min distances : %s',min(distances))
# logger.info('max distances : %s',max(distances))
# todo: add more metrics
# end = time()
# logger.info('time : %s', end - start)
# @profile
def analyze_graph(G, seed):
# record memory usage
# snapshot = tracemalloc.take_snapshot()
# top_stats = snapshot.statistics('lineno')
# for stat in top_stats[:10]:
# logger.info(stat)
random.seed(seed)
np.random.seed(seed)
contagion = cosasi.StaticNetworkContagion(
G=G,
model="si",
infection_rate=0.1,
# recovery_rate=0.005, # for SIS/SIR models
number_infected = 3,
seed=seed
)
contagion.forward(steps = 16)
step = 15
# This obtains the indices of all vertices in the infected category at the 15th step of the simulation.
I = contagion.get_infected_subgraph(step=step)
logger.info('Infected Subgraph : %s',I)
# #benchmark
# benchmark = cosasi.BenchmarkFromSimulation(
# contagion=contagion,
# information_type="single snapshot",
# t=step
# )
# logger.info('benchmark for sims : %s', benchmark)
# results = benchmark.go()
# logger.info('benchmark result for sims : %s',results)
# benchmark = cosasi.BenchmarkFromDetails(
# true_source=true_source,
# G=G,
# I=I,
# t=step,
# number_sources=len(true_source),
# information_type="single snapshot"
# )
# logger.info('benchmark : %s', benchmark)
# results = benchmark.go()
# logger.info('benchmark result : %s', results)
# estimate the number of sources
# number_sources = cosasi.utils.estimators.number_sources(I=I, number_sources=None, return_source_subgraphs=False, number_sources_method="eigengap")
# logger.info('estimated number of sources : %s', number_sources)
# alternative estimate the number of sources
# alt_number_sources = cosasi.utils.estimators.number_sources(I=I, number_sources=None, return_source_subgraphs=False, number_sources_method="netsleuth", G=G)
# logger.info('alternative estimated number of sources : %s', alt_number_sources)
# estimate the number of sources and return the source subgraphs
# opt_number_sources, subgraphs = cosasi.utils.estimators.number_sources(I=I, number_sources=None, return_source_subgraphs=True)
# logger.info('optional estimated number of sources : %s', opt_number_sources)
# logger.info('subgraphs', subgraphs)
# The most frequently encountered data type in literature concerning source inference is known as a "snapshot." This refers to a comprehensive set of infection data provided for a specific point in time.
# infected_indices = contagion.get_infected_indices(step=step)
# logger.info('Infected indices', infected_indices)
# On the other hand, certain algorithms utilize "observers," a select group of vertices designated to keep track of their infection status. In cosasi's implementation, the user determines the number of these observers, and their specific selection is carried out in a uniformly random manner.
# observers = contagion.get_observers(observers=5)
# logger.info('Observers', observers)
# Some algorithms make use of an analytical concept known as the infection frontier, which represents the collection of infected vertices that have infected neighbors. This concept is particularly relevant to the Susceptible-Infected (SI) epidemic model, where vertices do not recover from the infection. Under these circumstances, the frontier set is comprised of nodes that could have been the most recently infected by the given point in time.
# frontier = contagion.get_frontier(step=step)
# logger.info('Frontier',frontier)
# Objects of the StaticNetworkContagion class store their compartmental histories. These histories can be accessed through their 'history' attribute.
# logger.info('history', contagion.history)
true_source = contagion.get_source()
logger.info('True Source : %s',true_source)
# implementation of NETSLEUTH providing no information about the number of sources
start = time()
logger.info('multisource netsleuth hypotheses_per_step=1')
sims = cosasi.source_inference.multiple_source.netsleuth(I=I, G=G, hypotheses_per_step=1)
get_result(sims, true_source)
end = time()
logger.info('time : %s', end - start)
# logger.info('multisource netsleuth hypotheses_per_step=2')
# sims = cosasi.source_inference.multiple_source.netsleuth(I=I, G=G, hypotheses_per_step=2)
# get_result(sims, true_source)
# logger.info('multisource netsleuth hypotheses_per_step=3')
# sims = cosasi.source_inference.multiple_source.netsleuth(I=I, G=G, hypotheses_per_step=3)
# get_result(sims, true_source)
# implementation of LISN providing no information about the number of sources
# logger.info('fast multisource lisn')
# sims = cosasi.source_inference.multiple_source.fast_multisource_lisn(I=I, G=G, t=step)
# get_result(sims, true_source)
# unofficial implementation of NETSLEUTH providing no information about the number of sources
# logger.info('fast multisource netsleuth')
# sims = cosasi.multiple_source.fast_multisource_netsleuth(I=I, G=G)
# get_result(sims, true_source)
# implementation of jordan centrality providing no information about the number of sources
# logger.info('fast multisource jordan centrality')
# sims = cosasi.multiple_source.fast_multisource_jordan_centrality(I=I, G=G)
# get_result(sims, true_source)
###############################
# unofficial implementation of NETSLEUTH assuming 2 sources
start = time()
logger.info('fast multisource netsleuth assuming 2 sources')
sims = cosasi.source_inference.multiple_source.fast_multisource_netsleuth(I=I, G=G, number_sources=2)
get_result(sims, true_source)
end = time()
logger.info('time : %s', end - start)
# implementation of jordan centrality assuming 2 sources
start = time()
logger.info('fast multisource jordan centrality assuming 2 sources')
sims = cosasi.source_inference.multiple_source.fast_multisource_jordan_centrality(I=I, G=G, number_sources=2)
get_result(sims, true_source)
end = time()
logger.info('time : %s', end - start)
# implementation of LISN assuming 3 sources
start = time()
logger.info('fast multisource lisn assuming 2 sources')
sims = cosasi.source_inference.multiple_source.fast_multisource_lisn(I=I, G=G, t=step, number_sources=2)
get_result(sims, true_source)
end = time()
logger.info('time : %s', end - start)
contagion.reset_sim()
for i in range(5, 10):
# logger.info('------------------------------------------------')
# logger.info('Analyzing CiteSeer')
# logger.info('round : %s', i+1)
# seed = 10 * (i+1)
# logger.info('seed : %s', seed)
# G = CiteSeer()
# analyze_graph(G, seed)
# logger.info('------------------------------------------------')
# logger.info('Analyzing Cora')
# logger.info('round : %s', i+1)
# seed = 10 * (i+1)
# logger.info('seed : %s', seed)
# G = Cora()
# analyze_graph(G, seed)
logger.info('------------------------------------------------')
logger.info('Analyzing connected small world')
logger.info('round : %s', i+1)
seed = 10 * (i+1)
logger.info('seed : %s', seed)
G = connSW(1000, 20, 0.1)
analyze_graph(G, seed)
# logger.info('------------------------------------------------')
# logger.info('Analyzing random')
# logger.info('round : %s', i+1)
# seed = 10 * (i+1)
# logger.info('seed : %s', seed)
# G = rand(1000, 0.25, seed)
# analyze_graph(G, seed)
# logger.info('------------------------------------------------')
# logger.info('Analyzing PubMed')
# logger.info('round : %s', i+1)
# seed = 10 * (i+1)
# logger.info('seed : %s', seed)
# G = PubMed()
# analyze_graph(G, seed)
# logger.info('------------------------------------------------')
# logger.info('Analyzing coms')
# logger.info('round : %s', i+1)
# seed = 10 * (i+1)
# logger.info('seed : %s', seed)
# G = coms()
# analyze_graph(G, seed)
# logger.info('------------------------------------------------')
# logger.info('Analyzing photo')
# logger.info('round : %s', i+1)
# seed = 10 * (i+1)
# logger.info('seed : %s', seed)
# G = photo()
# analyze_graph(G, seed)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/__init__.py | xflow/SL/cosasi/__init__.py | from .contagion import *
from .source_inference import *
from .benchmark import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/benchmark/benchmark.py | xflow/SL/cosasi/benchmark/benchmark.py | import random
import os, sys
import json
sys.path.insert(0, os.getcwd())
import numpy as np
import networkx as nx
import cosasi
MODULE_PATH = __file__[: -len("benchmark.py")]
MODULE_PATH = (
MODULE_PATH
if len(MODULE_PATH) > 0 and (MODULE_PATH[-1] == "/" or MODULE_PATH[-1] == "\\")
else MODULE_PATH + "/"
)
ALGORITHMS_PATH = MODULE_PATH[: -len("benchmark/")] + "source_inference/"
def _get_relevant_namespaces(
source_type=None, information_type="single snapshot", epidemic_model=None
):
"""Retrieves the functional names of all applicable source inference algorithms.
Parameters
----------
source_type : str or None (optional)
one of None, "single-source", or "multi-source"
If None, we consider any source type
information_type : str
describes the information the source inference algorithm receives
e.g. "single snapshot"
epidemic_model : str or None (optional)
specifies the epidemic model, e.g. SI, SIS, SIR
if None, ignores this constraint
"""
valid_namespaces = []
algorithms_dict = json.load(open(ALGORITHMS_PATH + "algorithm_details.json"))
if isinstance(source_type, type(None)):
source_type_iter = list(algorithms_dict.keys())
else:
source_type_iter = [source_type]
for source_type in source_type_iter:
for alg_name in algorithms_dict[source_type]:
if not isinstance(epidemic_model, type(None)):
if (
epidemic_model.lower()
not in algorithms_dict[source_type][alg_name]["epidemic model"]
):
continue
if (
algorithms_dict[source_type][alg_name]["information type"]
== information_type
and algorithms_dict[source_type][alg_name]["status"] == "complete"
):
valid_namespaces.append(
eval(algorithms_dict[source_type][alg_name]["namespace"])
)
return valid_namespaces
def _get_namespace_params(name, return_defaults=True):
"""Retrieves the names of the parameters and their default values.
Parameters
----------
name : function
function namespace
return_defaults : bool
if True, also includes
"""
arg_num = name.__code__.co_argcount
param_names = name.__code__.co_varnames[:arg_num]
if not return_defaults:
return param_names
params = {}
if isinstance(name.__defaults__, type(None)):
defaults = []
else:
defaults = list(name.__defaults__)[::-1]
param_names = param_names[::-1]
for i in range(len(param_names)):
if i < len(defaults):
arg = defaults[i]
else:
arg = ""
params[param_names[i]] = arg
return params
def _execute_algorithm_from_namespace(name, what_we_know):
"""Runs a source inference algorithm, passing what we know as arguments.
Parameters
----------
name : function
function namespace
what_we_know : dict
dictionary of arguments we want to pass to the algorithm
"""
function_args = _get_namespace_params(name=name, return_defaults=True)
for param in what_we_know:
if param in function_args:
function_args[param] = what_we_know[param]
if "" in function_args.values():
raise ValueError(
"Insufficient arguments provided.",
function_args,
what_we_know,
name,
name.__defaults__,
)
return name(**function_args)
class BenchmarkFromDetails:
"""Benchmarking tool using provided class args to pass to algorithms when available.
Parameters
----------
true_source : node or tuple of nodes
the true source of the diffusion process
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
information_type : str
describes the information the source inference algorithm receives
e.g. "single snapshot"
I : NetworkX Graph
The infection subgraph observed at a particular time step
t : int
the timestep corresponding to I
observer_dict : dict or None (optional)
takes a dict of observers and the timestamps at which they become infected.
epidemic_model : str or None (optional)
specifies the epidemic model, e.g. SI, SIS, SIR
if None, ignores this constraint
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
infection_rate : float or None (optional)
Inter-node infection efficiency. If a float, must be in [0,1]
if None, ignores this parameter
"""
def __init__(
self,
true_source,
G,
information_type,
I=None,
t=None,
observer_dict=None,
epidemic_model=None,
number_sources=None,
infection_rate=None,
):
"""Benchmarking tool using provided class args to pass to algorithms when available.
Parameters
----------
true_source : node or tuple of nodes
the true source of the diffusion process
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
information_type : str
describes the information the source inference algorithm receives
e.g. "single snapshot"
I : NetworkX Graph
The infection subgraph observed at a particular time step
t : int
the timestep corresponding to I
observer_dict : dict or None (optional)
takes a dict of observers and the timestamps at which they become infected.
epidemic_model : str or None (optional)
specifies the epidemic model, e.g. SI, SIS, SIR
if None, ignores this constraint
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
infection_rate : float or None (optional)
Inter-node infection efficiency. If a float, must be in [0,1]
if None, ignores this parameter
"""
self.epidemic_model = epidemic_model
self.number_sources = number_sources
self.information_type = information_type
if isinstance(t, (int, float, type(None))):
self.t = t
else:
raise ValueError("Time parameter must be an integer or float or None")
self.observer_dict = observer_dict
if information_type == "single snapshot" and (
isinstance(I, type(None)) or isinstance(t, type(None))
):
raise ValueError(
"If information type is single snapshot, we need the infection subgraph and its corresponding timestep"
)
if information_type == "observers" and (isinstance(observer_dict, type(None))):
raise ValueError(
"If the information type is observers, we need the observer_dict"
)
if isinstance(G, nx.classes.graph.Graph):
self.G = G
else:
raise ValueError("G must be a NetworkX graph.")
if all(v in G for v in true_source):
self.true_source = true_source
elif true_source in G:
self.true_source = true_source
else:
raise ValueError("All members of true_source must be in G.")
if isinstance(I, (nx.classes.graph.Graph, type(None))):
self.I = I
else:
raise ValueError("I must be a NetworkX graph.")
if (
isinstance(infection_rate, float) and 0.0 <= infection_rate <= 1.0
) or isinstance(infection_rate, type(None)):
self.infection_rate = infection_rate
else:
raise ValueError("Infection rate must be a float between 0 and 1.")
self.namespaces = self.get_namespaces()
return None
def get_namespaces(self):
"""Finds all source localization algorithms applicable to the contagion task
specified in the class constructor.
"""
if isinstance(self.number_sources, type(None)):
source_type = None
elif self.number_sources > 1:
source_type = "multi-source"
elif self.number_sources == 1:
source_type = "single-source"
else:
raise NotImplementedError
namespaces = _get_relevant_namespaces(
source_type=source_type,
information_type=self.information_type,
epidemic_model=self.epidemic_model,
)
return namespaces
def go(self):
"""Runs all available algorithms with the information we have on hand."""
result_dict = {}
what_we_know = {
"G": self.G,
"I": self.I,
"observer_dict": self.observer_dict,
"t": self.t,
"number_sources": self.number_sources,
}
for alg in self.namespaces:
result = _execute_algorithm_from_namespace(
name=alg, what_we_know=what_we_know
)
inference_method = result.data["inference method"]["name"]
source_type = result.data["inference method"]["source_type"]
result_dict[source_type + " " + inference_method] = {
"source result": result,
"evaluation": result.evaluate(true_source=self.true_source),
}
return result_dict
class BenchmarkFromSimulation:
"""Benchmarking tool using provided simulation object to pass to algorithms when available.
Parameters
----------
contagion : cosasi.contagion.static_network_contagion.StaticNetworkContagion
an already-run contagion object
t : int
the timestep corresponding to I
information_type : str or None (optional)
describes the information the source inference algorithm receives
e.g. "single snapshot"
observers : int or list
If int, observers specifies the number of observation nodes
If list, observers specifies the observation nodes directly
"""
def __init__(self, contagion, t=None, information_type=None, observers=None):
"""Benchmarking tool using provided simulation object to pass to algorithms when available.
Parameters
----------
contagion : cosasi.contagion.static_network_contagion.StaticNetworkContagion
an already-run contagion object
t : int
the timestep corresponding to I
information_type : str or None (optional)
describes the information the source inference algorithm receives
e.g. "single snapshot"
observers : int or list
If int, observers specifies the number of observation nodes
If list, observers specifies the observation nodes directly
"""
true_source = contagion.get_source()
if information_type == "single snapshot":
if isinstance(t, type(None)):
raise ValueError("If information type is snapshot, t is required")
if not isinstance(t, int):
raise ValueError("t must be an int")
self.benchmarker = BenchmarkFromDetails(
true_source=true_source,
G=contagion.G,
I=contagion.get_infected_subgraph(step=t),
t=t,
epidemic_model=contagion.model,
number_sources=len(true_source),
information_type=information_type,
infection_rate=contagion.infection_rate,
)
elif information_type == "observers":
if isinstance(observers, type(None)):
raise ValueError(
"If information type is observers, the number of observers is required"
)
if not isinstance(observers, (int, list)):
raise ValueError("observers must be an int or a list")
self.benchmarker = BenchmarkFromDetails(
true_source=true_source,
G=contagion.G,
observer_dict=contagion.get_observers(observers=observers),
epidemic_model=contagion.model,
number_sources=len(true_source),
information_type=information_type,
infection_rate=contagion.infection_rate,
)
else:
raise NotImplementedError
return None
def go(self):
"""Runs all available algorithms with the information we have on hand."""
return self.benchmarker.go()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/benchmark/__init__.py | xflow/SL/cosasi/benchmark/__init__.py | from .benchmark import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/benchmark/tests/__init__.py | xflow/SL/cosasi/benchmark/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/benchmark/tests/test_benchmark.py | xflow/SL/cosasi/benchmark/tests/test_benchmark.py | import os, sys
sys.path.insert(0, os.getcwd())
import pytest
from unittest import TestCase
import networkx as nx
import numpy as np
import cosasi
class Test_BenchmarkFromSimulation(TestCase):
def setUp(self):
self.number_infected_init = 3
self.sim_steps = 100
self.G = nx.fast_gnp_random_graph(100, 0.25)
self.contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.01,
number_infected=self.number_infected_init,
)
self.contagion.forward(self.sim_steps)
self.t = 15
return None
def test_inputs_contagion(self):
with pytest.raises((AttributeError, ValueError)):
cosasi.BenchmarkFromSimulation(
contagion="BAD INPUT", information_type="single snapshot", t=self.t
)
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="single snapshot", t=self.t
)
benchmark.go()
assert True
def test_inputs_information_type(self):
with pytest.raises(NotImplementedError):
cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="BAD INPUT", t=self.t
)
with pytest.raises(ValueError):
cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="observers", t=self.t
)
with pytest.raises(ValueError):
cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="observers",
t=self.t,
observers="BAD INPUT",
)
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="observers",
t=self.t,
observers=2,
)
# benchmark.go()
assert True
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="observers",
t=self.t,
observers=[0, 1],
)
# benchmark.go()
assert True
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="single snapshot", t=self.t
)
benchmark.go()
assert True
def test_inputs_t(self):
with pytest.raises(ValueError):
cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="single snapshot",
t="BAD INPUT",
)
with pytest.raises(ValueError):
# invalid step
cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="single snapshot",
t=self.sim_steps + 1,
)
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="single snapshot", t=self.t
)
benchmark.go()
assert True
def test_go_output(self):
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="single snapshot", t=self.t
)
results = benchmark.go()
assert isinstance(results, dict)
results_keys = results.keys()
assert all(isinstance(k, str) for k in results_keys)
assert all(isinstance(results[k], dict) for k in results_keys)
assert all(
isinstance(
results[k]["source result"],
(
cosasi.source_inference.source_results.SingleSourceResult,
cosasi.source_inference.source_results.MultiSourceResult,
),
)
for k in results_keys
)
class Test_BenchmarkFromDetails(TestCase):
def setUp(self):
self.number_infected_init = 3
self.sim_steps = 100
self.G = nx.fast_gnp_random_graph(100, 0.25)
self.contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.01,
number_infected=self.number_infected_init,
)
self.contagion.forward(self.sim_steps)
self.t = 15
self.I = self.contagion.get_infected_subgraph(step=self.t)
self.true_source = self.contagion.get_source()
return None
def test_inputs_true_source(self):
with pytest.raises(ValueError):
benchmark = cosasi.BenchmarkFromDetails(
true_source="BAD INPUT",
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_inputs_G(self):
with pytest.raises(ValueError):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G="BAD INPUT",
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_inputs_I(self):
with pytest.raises(ValueError):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I="BAD INPUT",
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_inputs_t(self):
with pytest.raises(ValueError):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t="BAD INPUT",
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_inputs_number_sources(self):
with pytest.raises(TypeError):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources="BAD INPUT",
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_get_namespaces(self):
# single source should not have any multisource algorithms
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=1,
information_type="single snapshot",
)
namespaces = [n.__name__ for n in benchmark.get_namespaces()]
for n in namespaces:
if "fast_multisource" in n:
assert False
assert True
# multi-source should have multisource algorithms
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=3,
information_type="single snapshot",
)
namespaces = [n.__name__ for n in benchmark.get_namespaces()]
temp = False
for n in namespaces:
if "fast_multisource" in n:
temp = True
break
assert temp
def test_go_output(self):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=None,
information_type="single snapshot",
)
results = benchmark.go()
assert isinstance(results, dict)
results_keys = results.keys()
assert all(isinstance(k, str) for k in results_keys)
assert all(isinstance(results[k], dict) for k in results_keys)
assert all(
isinstance(
results[k]["source result"],
(
cosasi.source_inference.source_results.SingleSourceResult,
cosasi.source_inference.source_results.MultiSourceResult,
),
)
for k in results_keys
)
def test_go_with_observers(self):
G = nx.fast_gnp_random_graph(100, 0.25)
contagion = cosasi.StaticNetworkContagion(
G=G, model="si", infection_rate=0.01, number_infected=1
)
contagion.forward(100)
I = contagion.get_infected_subgraph(step=25)
observers = contagion.get_observers(10)
true_source = contagion.get_source()
benchmark = cosasi.BenchmarkFromDetails(
true_source=true_source,
G=G,
I=I,
t=15,
number_sources=len(true_source),
information_type="observers",
observer_dict=observers,
)
results = benchmark.go()
assert "single-source earliest infection first" in results.keys()
assert "single-source lisn" not in results.keys()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/utils/estimators.py | xflow/SL/cosasi/utils/estimators.py | import math
import random
import warnings
import scipy
import numpy as np
import networkx as nx
from sklearn.cluster import SpectralClustering
from .helpers import attack_degree, attack_degree_partition
from ..source_inference.multiple_source import netsleuth
def source_subgraphs(I, number_sources=2):
"""Subdivides the provided graph into specified number of subgraphs
via spectral clustering.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
number_sources : int
The hypothesized number of infection sources
"""
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
I_node_list = list(I.nodes)
A = nx.adjacency_matrix(I)
subgraphs = []
sc = SpectralClustering(number_sources, affinity="precomputed", n_init=100)
sc.fit(scipy.sparse.csr_matrix(A))
subgraph_labels = sc.labels_
unique_subgraph_labels = set(subgraph_labels)
for i in unique_subgraph_labels:
subgraph_nodes = [I_node_list[j] for j in np.where(subgraph_labels == i)[0]]
subgraphs.append(I.subgraph(subgraph_nodes))
return subgraphs
def number_sources(
I,
number_sources=None,
return_source_subgraphs=True,
number_sources_method="eigengap",
G=None,
):
"""Manages source subgraph estimation, mostly via spectral analysis and clustering.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources via Eigengap heuristic
return_source_subgraphs : bool
if True, returns subgraphs of I corresponding to each hypothesized infection source
if False, does not return subgraphs of I corresponding to each hypothesized infection source
number_sources_method : str
method for estimating the number of sources. one of the following options:
- "eigengap" : uses the Eigengap of the normalized graph Laplacian to estimate the number of clusters
- "netsleuth" : runs the multi-source NETSLEUTH algorithm and reports the number of seeds
- "chatter" : invokes a spectral method based on the Chatter algorithm
if number_sources != None, this doesn't do anything
G : NetworkX Graph (optional)
the original network the contagion process was run on
generally optional (e.g. not needed for eigengap), occassionally required (e.g. needed for netsleuth)
Notes
-----
If the diffusion process is brief or observation is early, and infection sources
are sufficiently sparse, then the infected subgraphs corresponding to each infection
source may be the connected components of the input graph. This is described in
Section 2.6 of [1]_.
We estimate the number of infection sources by the minimum of the number of connected
components and the Eigengap heuristic of the provided graph. The Eigengap heuristic
is described in [2]_.
With a hypothesized number of infection sources in hand, we partition the graph via
spectral clustering to provide a list of subgraphs corresponding to each infection
source [3]_.
References
----------
.. [1] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
.. [2] U. von Luxburg,
"A Tutorial on Spectral Clustering"
Statistics and Computing, 2007
https://link.springer.com/article/10.1007/s11222-007-9033-z
.. [3] A. Damle and V. Minden and L. Ying
"Simple, direct and efficient multi-way spectral clustering"
Information and Inference: A Journal of the IMA, 2019
https://academic.oup.com/imaiai/article/8/1/181/5045955
"""
if isinstance(number_sources, int):
if return_source_subgraphs:
return number_sources, source_subgraphs(I, number_sources=number_sources)
else:
return number_sources
elif isinstance(number_sources, type(None)):
if number_sources_method.lower() == "eigengap":
m = eigengap(I)
elif number_sources_method.lower() == "netsleuth":
if isinstance(G, type(None)):
raise ValueError("Need `G` for NETSLEUTH method.")
netsleuth_result = netsleuth(I=I, G=G, hypotheses_per_step=1)
m = len(netsleuth_result.topn(1)[0])
elif number_sources_method.lower() == "chatter":
if isinstance(G, type(None)):
raise ValueError("Need `G` for chatter method.")
m = chatter(I, G)
else:
raise NotImplementedError
if m <= nx.number_connected_components(I):
subgraphs = [I.subgraph(c) for c in nx.connected_components(I)]
m = len(subgraphs)
else:
subgraphs = source_subgraphs(I, number_sources=m)
if return_source_subgraphs:
return m, subgraphs
else:
return m
else:
raise ValueError("number_sources not recognized: must be an integer or None.")
def chatter(I, G):
"""Estimates the number of sources of a graph diffusion process via the Chatter algorithm.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The graph the diffusion process was originally run on
"""
# T = list(I.nodes)
# S = [v for v in G if v not in T]
# frontier = nx.node_boundary(G=G, nbunch1=S, nbunch2=T)
# frontier_idx = [T.index(v) for v in frontier]
freq = chatter_frequency(I)
np.fill_diagonal(freq, 0)
w, v = np.linalg.eig(freq)
return int(np.argmax((1 / (w + 1))[1 : len(I)]) + 1)
def eigengap(G):
"""Returns the estimated number of clusters of G, based on the Eigengap
of the normalized graph Laplacian.
Parameters
----------
G : NetworkX Graph
The graph to analyze
Notes
-----
The Eigengap heuristic is described in [1]_.
References
----------
.. [1] U. von Luxburg,
"A Tutorial on Spectral Clustering"
Statistics and Computing, 2007
https://link.springer.com/article/10.1007/s11222-007-9033-z
"""
warnings.filterwarnings("ignore", category=FutureWarning)
L = nx.normalized_laplacian_matrix(G).toarray()
eigenvalues, eigenvectors = np.linalg.eig(L)
eigenvalues.sort()
k = np.argmax(np.diff(eigenvalues)) + 1
return k
def bits_encode_integer(n):
"""Estimates the number of bits required to encode an integer n>=1.
Parameters
----------
n : int
an integer at least 1
Notes
-----
Calculation is from Section 4.1 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
if n < 1:
raise ValueError("n must be at least 1")
l = math.log(2.865064)
c = math.log(n)
while c > 0:
l += c
c = math.log(c)
return l
def bits_encode_seed(s, G):
"""Number of bits required to identify a seed set (hypothesized
infection source set).
Parameters
----------
s : array-like
seed set
G : NetworkX Graph
The original graph the infection process was run on.
Notes
-----
Calculation is from Section 4.1 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
n = len(s)
return bits_encode_integer(n) + math.log(math.comb(len(G), n))
def bits_encode_ripple(s, G, beta=0.01):
"""Total description length of a seed set and its corresponding maximum likelihood
propagation ripple.
Parameters
----------
s : array-like
seed set
G : NetworkX Graph
The original graph the infection process was run on.
beta : float
infection probability
Notes
-----
Calculation is from Section 4.3 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
def probability_infection(m_d, f_d, d):
"""Probability of m_d nodes being infected in a subset of the frontier.
Parameters
----------
m_d : int
number of nodes infected
f_d :
number of nodes in a frontier subset F_d
d : int
degree
Notes
-----
Calculation is from Section 4.2.3 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
p_d = 1 - (1 - beta) ** d # attack probability in the set
return math.comb(f_d, m_d) * (p_d**m_d) * (1 - p_d) ** (f_d - m_d)
def l_frontier(f, infected=s):
"""Calculates the code length for encoding the infectious in the frontier
set at a snapshot of time.
Parameters
----------
f : array-like
frontier set
infected : array-like
infected nodes
Notes
-----
Calculation is Equation 3 from Section 4.2.4 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
l = 0
partition = attack_degree_partition(f, infected, G)
for d in partition:
f_d = len(partition[d])
m_d = int(min(math.floor(p_d * (f_d + 1)), f_d))
if m_d == 0 or f_d == 0:
continue
l -= (
math.log(probability_infection(m_d, f_d, d))
+ m_d * math.log(m_d / f_d)
+ (f_d - m_d) * math.log(1 - m_d / f_d)
)
return l
infected = s
frontier = set([j for i in infected for j in G.neighbors(i) if j not in infected])
bits_ripple = 0
t = 0 # index starts at 0 per p. 42 / Section 4.2.2
while len(frontier) > 0 and len(infected) < len(G):
# ripple step, get new frontier
partition = attack_degree_partition(frontier, infected, G)
for d in partition:
f_d = len(partition[d])
p_d = 1 - (1 - beta) ** d # attack probability in the set
n_d = math.floor((f_d / beta + 1) * p_d)
infected += random.sample(partition[d], min(n_d, f_d))
frontier = set(
[j for i in infected for j in G.neighbors(i) if j not in infected]
)
infected = list(set(infected))
bits_ripple += l_frontier(frontier, infected)
t += 1
return bits_encode_integer(t) + bits_ripple
def description_length(s, G, beta=0.01):
"""Implements a greedy heuristic to estimate the two-part minimal infection
description length of a proposed set of infection sources.
Parameters
----------
s : array-like
seed set
G : NetworkX Graph
The original graph the infection process was run on.
beta : float
infection probability
Notes
-----
The minimal description length, as applied to source localization, is introduced
in [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
return bits_encode_seed(s, G) + bits_encode_ripple(s=s, G=G, beta=0.01)
def chatter_frequency(G, t=None):
"""Implements the Chatter Algorithm described in Notes.
Parameters
----------
G : NetworkX Graph
The graph to analyze
t : int or None (optional)
number of rounds to complete
if None, the algorithm runs until every node's message is received by
every other node at least 5 times.
Notes
-----
Each node starts with a message bank consisting of its own ID.
For `t` many rounds, each node broadcasts its message bank to its neighbors,
and all nodes receiving messages append them to their own message bank.
message_frequency[i][j] is the number of times i received j's message.
A "naive"/pure message-passing formulation of this would be along the lines of:
.. code-block:: python
def chatter_distance_slow(G, t):
messages = {i:[i] for i in G}
for _ in range(t):
new_messages = copy.deepcopy(messages)
for i in range(len(G)):
for j in G.neighbors(i):
new_messages[j] += messages[i]
messages = new_messages
return messages
where messages[i].count(j) is the number of times i received j's message. But
this is very slow and easily re-written as matrix multiplication, as is done
here.
"""
warnings.filterwarnings("ignore", category=FutureWarning)
A = nx.adjacency_matrix(G).toarray()
message_frequency = scipy.sparse.identity(len(G)).toarray()
if isinstance(t, type(None)):
if not nx.is_connected(G):
return chatter_frequency(G, t=len(G))
while np.min(message_frequency) < 5:
for i in range(len(G)):
message_frequency[i] += A.dot(message_frequency[i])
else:
for _ in range(t):
for i in range(len(G)):
message_frequency[i] += A.dot(message_frequency[i])
return message_frequency
def chatter_distance(G, t, u=None, v=None, normalized=True):
"""Invokes the Chatter Algorithm/chatter frequency to obtain chatter distance,
a graph topology metric.
Parameters
----------
G :NetworkX Graph
The graph to analyze
t : int
number of rounds to complete
u : node (optional)
starting node. if not provided, we return an array of distances
v : node (optional)
end node. if not provided, we return an array of distances
normalized : bool
if True, all distances are scaled to have a max value of 1
Notes
-----
The chatter distance between nodes `u` and `v` reflects the difficulty node `u`
is expected to have in transmitting a message to node `v`.
"""
message_frequency = chatter_frequency(G, t)
distance = 1 / message_frequency
if normalized:
distance /= np.max(distance)
if isinstance(u, type(None)) and isinstance(v, type(None)):
return distance
if isinstance(v, type(None)):
return distance[u]
if isinstance(u, type(None)):
return distance[:][v]
return distance[u][v]
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/utils/helpers.py | xflow/SL/cosasi/utils/helpers.py | import operator
import functools
import numpy as np
import networkx as nx
def list_product(l):
"""Returns the product the elements of a list.
Parameters
----------
l : list
list of elements you want to multiply
"""
return functools.reduce(operator.mul, l, 1)
def longest_list(l):
"""Returns the longest list in an array-like of lists.
Parameters
----------
l : list or array-like
stores the lists of interest
"""
return max(l, key=len)
def longest_list_len(l):
"""Returns the length of the longest list in an array-like
of lists.
Parameters
----------
l : list or array-like
stores the lists of interest
"""
return max(map(len, l))
def soft_eccentricity(G, v):
"""A more flexible calculation of vertex eccentricity.
Parameters
----------
G : NetworkX graph
A graph
v : node
Return value of specified node
Notes
-----
If `G` is connected and has more than one node, this is regular eccentricity. If `G`
has only one node, returns 1. If `G` is disconnected, returns infinite eccentricity.
"""
if nx.number_connected_components(G) > 1:
return np.inf
if len(G) == 1:
return 1
return nx.eccentricity(G, v=v)
def attack_degree(infected, G, v):
"""Calculates the attack degree of node v in G.
Parameters
----------
infected : array-like
infected nodes in G at a particular time step
G : NetworkX graph
A graph
v : node
Return value of specified node
Notes
-----
Attack degree is the number of infected neighbor nodes a node has.
Attack degree is defined in Section 4.2.2 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
infected_neighbors = [i for i in G.neighbors(v) if i in infected]
return len(infected_neighbors)
def attack_degree_partition(node_set, infected, G):
"""Divides a node_set into disjoint subsets based on their attack degree.
Parameters
----------
node_set : array-like
nodes to partition, e.g. a frontier set
infected : array-like
infected nodes in G at a particular time step
G : NetworkX graph
A graph
Notes
-----
Attack degree and this partitioning method are outlined in Section 4.2.2 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
partitions = {}
for i in node_set:
d = attack_degree(infected, G, i)
if d in partitions.keys():
partitions[d].append(i)
else:
partitions[d] = [i]
return partitions
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/utils/__init__.py | xflow/SL/cosasi/utils/__init__.py | from .helpers import *
from . import estimators
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/utils/tests/test_estimators.py | xflow/SL/cosasi/utils/tests/test_estimators.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import cosasi
class TestEstimators(TestCase):
def setUp(self):
self.G = self.G = nx.gnp_random_graph(50, 0.2)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(30)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_source_subgraphs(self):
for i in range(1, 10):
subgraphs = cosasi.utils.estimators.source_subgraphs(
self.G, number_sources=i
)
assert len(subgraphs) == i
def test_number_sources(self):
for method in ["eigengap", "netsleuth", "chatter"]:
# check that number of sources matches when provided
for number_sources in range(1, 5):
n, subgraphs = cosasi.utils.estimators.number_sources(
I=self.I,
number_sources=number_sources,
return_source_subgraphs=True,
number_sources_method=method,
G=self.G,
)
for g in subgraphs:
assert type(g) == nx.Graph
assert n == number_sources
# estimating numbers as expected
n, subgraphs = cosasi.utils.estimators.number_sources(
I=self.I,
number_sources=None,
return_source_subgraphs=True,
number_sources_method=method,
G=self.G,
)
m = cosasi.utils.estimators.number_sources(
I=self.I,
number_sources=None,
return_source_subgraphs=False,
number_sources_method=method,
G=self.G,
)
assert n == m and (isinstance(n, np.int64) or isinstance(n, int))
# just return number_sources back
n = 2
m = cosasi.utils.estimators.number_sources(
I=self.I,
number_sources=n,
return_source_subgraphs=False,
number_sources_method=method,
G=self.G,
)
assert n == m
# check error-handling
with pytest.raises(NotImplementedError):
assert cosasi.utils.estimators.number_sources(
self.I,
number_sources=None,
return_source_subgraphs=False,
number_sources_method="BAD INPUT",
)
with pytest.raises(ValueError):
# Need `G` for NETSLEUTH method
assert cosasi.utils.estimators.number_sources(
G=None,
I=self.I,
number_sources=None,
return_source_subgraphs=False,
number_sources_method="netsleuth",
)
with pytest.raises(ValueError):
# Need `G` for chatter method
assert cosasi.utils.estimators.number_sources(
G=None,
I=self.I,
number_sources=None,
return_source_subgraphs=False,
number_sources_method="chatter",
)
with pytest.raises(ValueError):
# Need `G` for chatter method
assert cosasi.utils.estimators.number_sources(
G=self.G,
I=self.I,
number_sources="BAD INPUT",
return_source_subgraphs=False,
number_sources_method="chatter",
)
def test_eigengap(self):
assert isinstance(cosasi.utils.estimators.eigengap(self.G), np.int64)
# two disjoint complete graphs should have a spectral gap of 2
K = nx.complete_graph(10)
H = nx.disjoint_union(K, K)
assert cosasi.utils.estimators.eigengap(H) == 2
def test_bits_encode_integer(self):
last = 0
for i in range(-10, 10):
if i < 1:
with pytest.raises(ValueError):
cosasi.utils.estimators.bits_encode_integer(i)
else:
# number of bits should increase w/ integer
bits = cosasi.utils.estimators.bits_encode_integer(i)
assert bits > last
last = bits
assert cosasi.utils.estimators.bits_encode_integer(1) == pytest.approx(1.05259)
def test_bits_encode_seed(self):
seed = [1, 2, 3]
assert cosasi.utils.estimators.bits_encode_seed(
seed, self.G
) > cosasi.utils.estimators.bits_encode_integer(len(seed))
def test_bits_encode_ripple(self):
bits_ripple_1 = cosasi.utils.estimators.bits_encode_ripple(
list(range(1)), self.G
)
bits_ripple_2 = cosasi.utils.estimators.bits_encode_ripple(
list(range(5)), self.G
)
assert 0 < min(bits_ripple_1, bits_ripple_2)
def test_description_length(self):
seed = [1, 2, 3]
assert (
min(
cosasi.utils.estimators.description_length(seed, self.G),
cosasi.utils.estimators.description_length(seed, self.G),
)
> 0
)
def test_chatter_frequency(self):
message_frequency = cosasi.utils.estimators.chatter_frequency(self.G, 5)
assert message_frequency.size == len(self.G) * len(self.G)
assert np.min(message_frequency) >= 0
def test_chatter_distance(self):
for t in [1, 5, None]:
for G in [self.G, nx.disjoint_union(self.G, self.G)]:
# distances are non-negative
assert (
np.min(
cosasi.utils.estimators.chatter_distance(
G=G, t=t, normalized=False
)
)
>= 0
)
dist = cosasi.utils.estimators.chatter_distance(
G=G, t=t, normalized=True
)
if (
dist[0][1] != dist[0][1]
or np.max(dist) != np.max(dist)
or np.min(dist) != np.min(dist)
):
pass
else:
assert dist[0][1] == cosasi.utils.estimators.chatter_distance(
G=G, t=t, u=0, v=1, normalized=True
)
# check normalization
assert 1 >= np.max(dist) >= np.min(dist) >= 0
# array ops work right
assert len(
cosasi.utils.estimators.chatter_distance(G=G, t=t, u=0)
) == len(G)
assert len(
cosasi.utils.estimators.chatter_distance(self.G, 5, v=0)
) == len(G)
# should be symmetric
for i in range(len(G)):
for j in range(len(G)):
assert cosasi.utils.estimators.chatter_distance(
G=G, t=t, u=i, v=j, normalized=False
) == cosasi.utils.estimators.chatter_distance(
G=G, t=t, u=j, v=i, normalized=False
)
def test_chatter(self):
result = cosasi.utils.estimators.chatter(self.I, self.G)
assert isinstance(result, int)
assert result > 0
assert result <= len(self.I)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/utils/tests/__init__.py | xflow/SL/cosasi/utils/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/utils/tests/test_helpers.py | xflow/SL/cosasi/utils/tests/test_helpers.py | import os, sys
sys.path.insert(0, os.getcwd())
import pytest
import networkx as nx
import numpy as np
import random
from cosasi import utils
def test_list_product():
l = [1]
assert utils.list_product(l) == 1
l += [2]
assert utils.list_product(l) == 2
l += [-3]
assert utils.list_product(l) == -6
l += [0]
assert utils.list_product(l) == 0
return None
def test_longest_list():
n = 10
l = []
for i in range(n):
l.append(list(range(i)))
longest = utils.longest_list(l)
assert longest == list(range(n - 1))
def test_longest_list_len():
n = 10
l = []
for i in range(n):
l.append(list(range(i)))
assert utils.longest_list_len(l) == n - 1
def test_soft_eccentricity():
G = nx.complete_graph(4)
assert utils.soft_eccentricity(G, 1) < np.inf
H = nx.disjoint_union(G, G)
assert utils.soft_eccentricity(H, 1) == np.inf
G = nx.complete_graph(1)
assert utils.soft_eccentricity(G, 0) == 1
def test_attack_degree():
G = nx.complete_graph(4)
infected = [3]
for i in range(3):
assert utils.attack_degree(infected, G, i) == 1
def attack_degree_partition():
G = nx.gnp_random_graph(50, 0.2)
node_set = [1, 3, 4]
infected = [1, 16, 17, 19, 24, 34, 36, 41, 43, 49]
partition = utils.attack_degree_partition(node_set, infected, G)
vals = []
for v in partition.values():
vals += v
assert sorted(vals) == sorted(node_set)
assert max(partition) <= max(G.degree())[1]
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/contagion/static_network_contagion.py | xflow/SL/cosasi/contagion/static_network_contagion.py | import random
import numpy as np
import operator
import networkx as nx
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
class StaticNetworkContagion:
"""A stochastic epidemic process defined on a static network.
Parameters
----------
G : NetworkX Graph
The network for the diffusion process to run on
model : str
Specifies the epidemic model. Currently handles the following diffusion models:
- SI (susceptible-infected)
- SIS (susceptible-infected-susceptible)
- SIR (susceptible-infected-recovered)
infection_rate : float
Inter-node infection efficiency
must be in [0, 1]
recovery_rate : float or None
The recovery rate
must be in [0, 1] (or None if diffusion model is SI)
fraction_infected : float or None
fraction of nodes to initialize as infected (selected uniformly at random)
if both fraction_infected and number_infected are None, initializes with 1 infected node
number_infected : float or None
number of nodes to initialize as infected (selected uniformly at random)
if both fraction_infected and number_infected are None, initializes with 1 infected node
seed : integer, random_state, or None (default)
random number generation state.
Notes
-----
A wrapper for `ndlib` with convenience utilities added.
"""
def __init__(
self,
G,
model="si",
infection_rate=0.01,
recovery_rate=None,
fraction_infected=None,
number_infected=None,
seed=None,
model_config=None # added by Zhiqian
):
"""A stochastic epidemic process defined on a static network.
Parameters
----------
G : NetworkX Graph
The network for the diffusion process to run on
model : str
Specifies the epidemic model. Currently handles the following diffusion models:
SI
SIS
SIR
infection_rate : float
Inter-node infection efficiency
must be in [0, 1]
recovery_rate : float or None
The recovery rate
must be in [0, 1] (or None if diffusion model is SI)
fraction_infected : float or None
fraction of nodes to initialize as infected (selected uniformly at random)
if both fraction_infected and number_infected are None, initializes with 1 infected node
number_infected : float or None
number of nodes to initialize as infected (selected uniformly at random)
if both fraction_infected and number_infected are None, initializes with 1 infected node
seed : integer, random_state, or None (default)
random number generation state.
Notes
-----
A wrapper for `ndlib` with convenience utilities added.
"""
self.model = model.lower()
self.seed = seed
self.model_config = model_config if model_config else mc.Configuration() # added by Zhiqian
if not isinstance(self.seed, type(None)):
random.seed(self.seed)
np.random.seed(self.seed)
if isinstance(G, nx.classes.graph.Graph):
self.G = G
else:
raise ValueError("G must be a NetworkX instance.")
if isinstance(infection_rate, float) and 0.0 <= infection_rate <= 1.0:
self.infection_rate = infection_rate
else:
raise ValueError("Infection rate must be a float between 0 and 1.")
if not recovery_rate or (
isinstance(recovery_rate, float) and 0.0 <= recovery_rate <= 1.0
):
self.recovery_rate = recovery_rate
else:
raise ValueError("Recovery rate must be a float between 0 and 1.")
if fraction_infected and number_infected:
raise ValueError(
"User can only provide one of fraction_infected, number_infected."
)
elif not fraction_infected and not number_infected:
self.fraction_infected = fraction_infected
self.number_infected = 1
else:
self.fraction_infected = fraction_infected
self.number_infected = number_infected
self._init_sim()
self.history = []
return None
def _init_sim(self):
"""Initializes the diffusion process properties and initial infectivity."""
config = self.model_config # added by Zhiqian
config.add_model_parameter("beta", self.infection_rate)
if self.model == "sir":
self.sim = ep.SIRModel(graph=self.G, seed=self.seed)
if not self.recovery_rate:
raise ValueError("Recovery rate must be defined for SIR model.")
config.add_model_parameter("gamma", self.recovery_rate)
elif self.model == "si":
self.sim = ep.SIModel(graph=self.G, seed=self.seed)
elif self.model == "sis":
self.sim = ep.SISModel(graph=self.G, seed=self.seed)
if not self.recovery_rate:
raise ValueError("Recovery rate must be defined for SIS model.")
config.add_model_parameter("lambda", self.recovery_rate)
else:
raise NotImplementedError("Diffusion model not recognized.")
if self.number_infected:
if not isinstance(self.seed, type(None)):
random.seed(self.seed)
infected = random.sample(range(len(self.G)), self.number_infected)
config.add_model_initial_configuration("Infected", infected)
elif self.fraction_infected:
config.add_model_parameter("fraction_infected", self.fraction_infected)
elif self.mc: # added by Zhiqian
config = self.model_config
else:
raise NotImplementedError
self.sim.set_initial_status(config)
return None
def forward(self, steps=100, verbose=False):
"""Executes specified number of diffusion process steps. Records simulation history.
Parameters
----------
steps : int
Number of simulation steps.
verbose : bool (default False)
Specifies whether to return the simulation history.
Notes
-----
Can be run more than once; this just adds steps to the simulation history.
"""
self.history += self.sim.iteration_bunch(steps)
if verbose:
return self.history
return None
def reset_sim(self):
"""Resets the simulation to its initialized states. Does not preserve compartmental histories."""
self.history = []
self.sim.reset()
return None
def get_infected_indices(self, step=0):
"""Retrieves the indices of all vertices in the infected compartment at the provided step.
Parameters
----------
step : int
Iteration step
Returns
-------
list
"""
nodes = list(self.G)
def status_to_delta(status):
"""Converts the history's status to a vector representing movement in
(+1) and out (-1) of the infected compartment
Parameters
----------
status : dict
status dictionary from history, e.g. self.history[step]["status"]
"""
delta = np.zeros(len(self.G))
for idx in status:
s = status[idx]
if s == 1:
# node became infected this step
delta[idx] = 1
if s == 2:
# node became removed this step
delta[idx] = -1
return delta
if step >= len(self.history):
raise ValueError(
"Invalid step. Continue the simulation to reach this step."
)
infected = np.zeros(len(self.G))
for s in range(step + 1):
infected += status_to_delta(self.history[s]["status"])
return [nodes[i] for i in np.where(infected == 1)[0]]
def get_infected_subgraph(self, step=0):
"""Returns the subgraph of the contact network whose vertices are marked infected.
Parameters
----------
step : int
Iteration step
Returns
-------
NetworkX Graph
Notes
-----
This is only guaranteed to be connected in the SI model.
"""
infected_indices = self.get_infected_indices(step=step)
not_infected_indices = set(self.G.nodes) - set(infected_indices)
H = self.G.copy()
H.remove_nodes_from(not_infected_indices)
return H
def get_observers(self, observers=1):
"""Observers record the step number when they become infected. For a specified number
or list of observers, returns a dict of observers and the timestamps at which they
become infected.
Parameters
----------
observers : int or list
If int, observers specifies the number of observation nodes
If list, observers specifies the observation nodes directly
Notes
-----
If self.model == "sis", nodes may be reinfected, so observers record a list of the timestamps
at which they are infected. Otherwise, observers record one timestamp (step number) only.
If an observer is not infected during the simulation history, its corresponding infection
timestamp is recorded as infinity.
"""
if not self.history:
raise ValueError(
"Simulation must be run before retrieving observer information."
)
timestamp_placeholder = np.inf if self.model == "si" else list()
if isinstance(observers, int):
if not isinstance(self.seed, type(None)):
random.seed(self.seed)
np.random.seed(self.seed)
observer_dict = {
i: timestamp_placeholder for i in random.sample(self.G.nodes, observers)
}
elif isinstance(observers, list):
observer_dict = {i: timestamp_placeholder for i in observers}
else:
raise NotImplementedError
for i in range(len(self.history)):
status = self.history[i]["status"]
if self.model == "sis":
for j in observer_dict:
if j in status and status[j] == 1:
observer_dict[j].append(i)
else:
for j in observer_dict:
if j in status and status[j] == 1:
observer_dict[j] = i
return observer_dict
def get_source(self, return_subgraph=False):
"""Returns the vertices marked infected at initialization.
Parameters
----------
return_subgraph : bool
If True, returns a subgraph of infected vertices.
If False, returns a list of indices.
Returns
-------
list or NetworkX Graph
"""
if not isinstance(return_subgraph, bool):
raise ValueError("return_subgraph param must be a bool")
if return_subgraph:
return self.get_infected_subgraph(step=0)
return self.get_infected_indices(step=0)
def get_frontier(self, step=0):
"""Retrieves the frontier set of a given step. This is the set of infected nodes
with an uninfected neighbor.
Parameters
----------
step : int
Iteration step
Returns
-------
NetworkX Graph
Notes
-----
In the SI model, the frontier set consists of nodes likely to have been
infected last, by the given timestep.
"""
T = self.get_infected_indices(step=step)
S = [v for v in self.G if v not in T]
frontier = nx.node_boundary(G=self.G, nbunch1=S, nbunch2=T)
return frontier
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/contagion/__init__.py | xflow/SL/cosasi/contagion/__init__.py | from .static_network_contagion import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/contagion/tests/__init__.py | xflow/SL/cosasi/contagion/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/contagion/tests/test_static_network_contagion.py | xflow/SL/cosasi/contagion/tests/test_static_network_contagion.py | import os, sys
import collections
sys.path.insert(0, os.getcwd())
import pytest
from unittest import TestCase
import networkx as nx
import numpy as np
import cosasi
class Test_StaticNetworkContagion(TestCase):
def setUp(self):
self.number_infected_init = 10
self.sim_steps = 10
self.G = nx.fast_gnp_random_graph(200, 0.25)
self.contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.1,
number_infected=self.number_infected_init,
)
self.contagion.forward(self.sim_steps)
return None
def test_argument_exceptions(self):
with pytest.raises(ValueError):
# G must be a NetworkX graph
cosasi.StaticNetworkContagion(G="BAD INPUT", model="si", infection_rate=0.1)
with pytest.raises(NotImplementedError):
# model must be "sir", "si", or "sis"
cosasi.StaticNetworkContagion(
G=self.G, model="BAD INPUT", infection_rate=0.1
)
with pytest.raises(ValueError):
# infection_rate must be between 0 and 1
cosasi.StaticNetworkContagion(G=self.G, model="si", infection_rate=10)
with pytest.raises(ValueError):
# recovery_rate must be between 0 and 1
cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, recovery_rate=10
)
with pytest.raises(ValueError):
# can only provide one of fraction_infected, number_infected
cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.1,
fraction_infected=0.1,
number_infected=self.number_infected_init,
)
for m in ["sir", "sis"]:
with pytest.raises(ValueError):
# requires recovery rate
cosasi.StaticNetworkContagion(G=self.G, model=m, infection_rate=0.1)
cosasi.StaticNetworkContagion(
G=self.G, model=m, infection_rate=0.1, recovery_rate=0.05
)
assert True
def test_fraction_infected(self):
contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.1,
fraction_infected=self.number_infected_init / len(self.G),
)
contagion.forward(5)
assert self.number_infected_init == len(contagion.get_source())
def test_get_infected_indices(self):
assert len(self.contagion.get_infected_indices()) == self.number_infected_init
temp_contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.1,
fraction_infected=None,
number_infected=None,
)
temp_contagion.forward()
assert len(temp_contagion.get_infected_indices()) == 1
return None
def test_forward(self):
assert len(self.contagion.history) == self.sim_steps
return None
def test_reset_sim(self):
self.contagion.reset_sim()
assert len(self.contagion.history) == 0
self.contagion.forward(self.sim_steps)
return None
def test_get_infected_subgraph(self):
sg = self.contagion.get_infected_subgraph(step=self.sim_steps - 1)
assert isinstance(sg, nx.Graph)
assert len(sg) == len(
self.contagion.get_infected_indices(step=self.sim_steps - 1)
)
assert set(sg.nodes) == set(
self.contagion.get_infected_indices(step=self.sim_steps - 1)
) # sets are unordered
return None
def test_get_observers(self):
num_observers = 5
self.contagion.forward(steps=100)
observers = self.contagion.get_observers(observers=num_observers)
assert len(observers) == num_observers # check size
for i in observers.keys():
assert i in self.G.nodes
# check types
assert isinstance(observers[i], (int, float, type(None)))
if isinstance(observers[i], float):
observers[i] == np.inf
return None
def test_get_source(self):
source_verts = self.contagion.get_source()
assert isinstance(source_verts, list)
assert len(source_verts) <= len(self.G)
source_graph = self.contagion.get_source(return_subgraph=True)
assert isinstance(source_graph, nx.Graph)
assert set(source_graph.nodes) == set(source_verts)
return None
def test_get_frontier(self):
s = 15
G = nx.fast_gnp_random_graph(100, 0.25)
contagion = cosasi.StaticNetworkContagion(
G=G, model="si", infection_rate=0.01, number_infected=3
)
contagion.forward(500)
I = contagion.get_infected_subgraph(step=s)
frontier = contagion.get_frontier(step=s)
assert isinstance(frontier, (list, set)) # basic type checking
assert all(
[v in I for v in frontier]
) # frontier is a subset of infection subgraph
beyond_frontier = []
for i in I:
if i not in frontier:
# every node not in the frontier has all neighbors in I
assert all([v in I for v in G.neighbors(v)])
# frontier at step 0 should just be the initially-infected indices
frontier_0 = contagion.get_frontier(step=0)
assert collections.Counter(frontier_0) == collections.Counter(
contagion.get_source()
)
# when the graph is saturated/maximally-infected, frontier should be empty
largest_cc_size = len(max(nx.connected_components(G), key=len))
while len(contagion.get_infected_indices(step=s)) < largest_cc_size:
s += 1
assert contagion.get_frontier(step=s) == set()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/__init__.py | xflow/SL/cosasi/source_inference/__init__.py | from . import single_source
from . import multiple_source
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/source_results.py | xflow/SL/cosasi/source_inference/source_results.py | """Generic objects for the result of single-source and multi-source localization.
All inference algorithms should return an instance of one of these classes.
"""
import json
from collections import Counter
from collections.abc import Iterable
import itertools
import numpy as np
import networkx as nx
MODULE_PATH = __file__[: -len("source_results.py")]
MODULE_PATH = (
MODULE_PATH
if len(MODULE_PATH) > 0 and (MODULE_PATH[-1] == "/" or MODULE_PATH[-1] == "\\")
else MODULE_PATH + "/"
)
def node_set_distance(s1, s2, G):
"""Implements a distance measure between vertex sets (of possibly different sizes).
Parameters
----------
s1 : array-like
first vertex set
s2 : array-like
second vertex set
G : NetworkX Graph
graph to search on
"""
perm_scores = {}
if isinstance(s1, Iterable):
s1 = list(s1)
else:
s1 = [s1]
if isinstance(s2, Iterable):
s2 = list(s2)
else:
s2 = [s2]
for s2_perm in itertools.permutations(s2):
perm_scores[s2_perm] = 0
for i in range(min(len(s1), len(s2))):
perm_scores[s2_perm] += nx.shortest_path_length(
G, source=s1[i], target=s2_perm[i]
)
if len(s1) > len(s2):
for j in range(i, len(s1)):
min_add = np.inf
for s in s2_perm:
d = nx.shortest_path_length(G, source=s1[j], target=s)
if d < min_add:
min_add = d
perm_scores[s2_perm] += min_add
if len(s2) > len(s1):
for j in range(i, len(s2_perm)):
min_add = np.inf
for s in s1:
d = nx.shortest_path_length(G, source=s2_perm[j], target=s)
if d < min_add:
min_add = d
perm_scores[s2_perm] += min_add
return min(perm_scores.values())
class SourceResult:
"""Abstract class outlining response object for the result of a source inference algorithm.
Parameters
----------
source_type : str
either "single-source" or "multi-source"
inference_method : str
name of the source localization algorithm used
scores : dict
per-item scores for ranking, retrieval, etc.
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
def __init__(
self,
source_type,
inference_method,
scores,
G,
algorithm_details=True,
reverse=True,
):
"""Abstract class outlining response object for the result of a source inference algorithm.
Parameters
----------
source_type : str
either "single-source" or "multi-source"
inference_method : str
name of the source localization algorithm used
scores : dict
per-item scores for ranking, retrieval, etc.
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
if not isinstance(G, nx.classes.graph.Graph):
raise ValueError("G must be a NetworkX graph.")
else:
self.G = G
source_type = source_type.lower()
if source_type not in ["single-source", "multi-source"]:
raise ValueError("Source type must be single- or multi-source.")
self.data = {
"scores": scores,
"inference method": {"name": inference_method, "source_type": source_type},
"G": G,
}
if algorithm_details:
algorithms = json.load(open(MODULE_PATH + "algorithm_details.json"))
for k in algorithms[source_type][inference_method]:
self.data["inference method"][k] = algorithms[source_type][
inference_method
][k]
self.reverse = reverse
return None
def rank(self):
"""Rank nodes by score.
Returns
-------
list of item indices
"""
scores = self.data["scores"]
return sorted(scores, key=scores.get, reverse=self.reverse)
def topn(self, n=1):
"""Returns the top n item indices by rank.
Rank can be highest-first (reverse==True) or lowest-first (reverse==False)
Parameters
----------
n : int
number of item indices to return
Returns
-------
list of item indices
"""
if not isinstance(n, int):
raise ValueError("n must be an integer.")
rank = self.rank()
return rank[:n]
def evaluate_solution_rank(self, true_source):
"""Finds the rank of the true source, by the algorithm's scoring protocol.
Parameters
----------
true_source : graph index - str, int, etc.
the actual source node
"""
single_source = len(self.topn(n=1)) == 1
if isinstance(true_source, (list, tuple)) and len(true_source) == 1:
true_source = true_source[0]
return self.get_rank(true_source, soft_rank=True)
def evaluate_distance(self, true_source):
"""Finds the shortest path length between each node in the solution set and the
true souce.
Parameters
----------
true_source : tuple
the actual source set
"""
eval_scores = {h: np.inf for h in self.data["scores"]}
for s in eval_scores.keys():
eval_scores[s] = node_set_distance(G=self.G, s1=s, s2=true_source)
return eval_scores
def evaluate(self, true_source):
"""Runs evaluation algorithms and returns a dictionary of results.
Parameters
----------
true_source : graph index - str, int, etc.
the actual source node
"""
dist = self.evaluate_distance(true_source=true_source)
top_sol = self.topn(n=1)[0]
rank = self.evaluate_solution_rank(true_source=true_source)
evaluation_results = {
"true source": true_source,
"distance": {
"top score's distance": {top_sol: dist[top_sol]},
"all distances": dist,
},
"rank": rank,
"rank %": rank / len(self.data["scores"]),
}
return evaluation_results
class SingleSourceResult(SourceResult):
"""Response object for the result of single-source inference.
Parameters
----------
inference_method : str
name of the source localization algorithm used
scores : dict
per-node scores for ranking, retrieval, etc.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
def __init__(self, *args, **kwargs):
"""Response object for the result of single-source inference.
Parameters
----------
inference_method : str
name of the source localization algorithm used
scores : dict
per-node scores for ranking, retrieval, etc.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
super().__init__(*args, **kwargs)
return None
def get_rank(self, v, soft_rank=False):
"""Returns the rank of vertex (1 = "best")
Parameters
----------
v : graph index - str, int, etc.
vertex of interest
soft_rank : bool
if True and v is not in the list of hypotheses, returns 1 more
than the number of hypotheses
Returns
-------
int
"""
rank = self.rank()
if soft_rank and v not in rank:
return len(rank) + 1
return rank.index(v) + 1
class MultiSourceResult(SourceResult):
"""Response object for the result of mutli-source inference.
Parameters
----------
inference_method : str
name of the source localization algorithm used
scores : dict
per-item scores for ranking, retrieval, etc.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
def __init__(self, *args, **kwargs):
"""Response object for the result of mutli-source inference.
Parameters
----------
inference_method : str
name of the source localization algorithm used
scores : dict
per-item scores for ranking, retrieval, etc.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
super().__init__(*args, **kwargs)
return None
def get_rank(self, s, soft_rank=False):
"""Returns the rank of the provided node set (1 = "best")
Parameters
----------
s : list
node set of graph indices
soft_rank : bool
if True and v is not in the list of hypotheses, returns 1 more
than the number of hypotheses
Returns
-------
int
"""
rank = self.rank()
r = 1
for q in rank:
if Counter(s) == Counter(q):
break
r += 1
if not soft_rank and r > len(self.data["scores"]):
raise ValueError("Proposed source set not found among top hypotheses.")
return r
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/tests/test_source_results.py | xflow/SL/cosasi/source_inference/tests/test_source_results.py | import os, sys
import pytest
import itertools
import random
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import networkx as nx
import numpy as np
import cosasi
from ..source_results import SourceResult, SingleSourceResult, MultiSourceResult, node_set_distance
def test_node_set_distance():
G = nx.karate_club_graph()
# sets are individual nodes
assert node_set_distance(5, 2, G) == 2
# mixed
assert node_set_distance(5, [2, 3], G) == 6
assert node_set_distance([5, 6, 7], 2, G) == 7
# bother iterable
assert node_set_distance([5, 6], [2, 3], G) == 4
# overlapping
assert node_set_distance([5, 6], [5, 6], G) == 0
assert node_set_distance([5, 6], [2, 6], G) == node_set_distance(5, 2, G) == 2
class Test_SourceResult(TestCase):
def setUp(self):
self.inference_method = "rumor centrality"
self.scores = {1: 1, 2: 10, 3: 0.025}
self.G = nx.fast_gnp_random_graph(50, 0.25)
self.result = SourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G=self.G,
)
return None
def test_rank(self):
ranked = self.result.rank()
assert self.result.rank() == [2, 1, 3]
def test_topn(self):
with pytest.raises(ValueError):
assert self.result.topn("BAD INPUT")
assert self.result.topn(n=1) == [2]
def test_bad_graph_input(self):
with pytest.raises(ValueError):
assert SourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G="BAD INPUT",
)
def test_bad_source_type_input(self):
with pytest.raises(ValueError):
assert SourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="BAD INPUT",
G=self.G,
)
class Test_SingleSourceResult(TestCase):
def setUp(self):
self.inference_method = "rumor centrality"
self.scores = {1: 1, 2: 10, 3: 0.025}
self.G = nx.fast_gnp_random_graph(50, 0.25)
self.result = SingleSourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G=self.G,
)
return None
def test_get_rank(self):
assert self.result.get_rank(2) == 1
assert self.result.get_rank(3) == 3
with pytest.raises(ValueError):
assert self.result.get_rank("BAD INPUT")
def test_bad_graph_input(self):
with pytest.raises(ValueError):
assert SingleSourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G="BAD INPUT",
)
class Test_MultiSourceResult(TestCase):
def setUp(self):
self.inference_method = "fast multi-source jordan centrality"
self.scores = {}
self.G = nx.fast_gnp_random_graph(50, 0.25)
for s in list(itertools.combinations(range(5), 2)):
self.scores[s] = random.random()
self.result = MultiSourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="multi-source",
G=self.G,
)
return None
def test_get_rank(self):
max_key = max(self.scores, key=self.scores.get)
assert self.result.get_rank(max_key) == 1
def test_bad_graph_input(self):
with pytest.raises(ValueError):
assert MultiSourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G="BAD INPUT",
)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/tests/__init__.py | xflow/SL/cosasi/source_inference/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/multiple_source/lisn.py | xflow/SL/cosasi/source_inference/multiple_source/lisn.py | import itertools
import networkx as nx
import numpy as np
from ..source_results import MultiSourceResult
from ...utils import estimators
from .. import single_source
def fast_multisource_lisn(I, G, t, number_sources=None):
"""Greedily runs single-source LISN algorithm on each estimated infection
subgraph attributable to each of the hypothesized number of sources.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
t : int
the observation timestep corresponding to I
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
Notes
-----
The Jordan infection center is the vertex with minimum infection eccentricity.
This is described in [1]_ and [2]_.
Examples
--------
>>> result = cosasi.multiple_source.fast_multisource_jordan_centrality(I, G)
References
----------
.. [1] L. Ying and K. Zhu,
"On the Universality of Jordan Centers for Estimating Infection Sources in Tree Networks"
IEEE Transactions of Information Theory, 2017
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
if not number_sources:
number_sources, subgraphs = estimators.number_sources(
I, return_source_subgraphs=True
)
else:
number_sources, subgraphs = estimators.number_sources(
I, number_sources=number_sources, return_source_subgraphs=True
)
sources_scores = [
{
k: v
for k, v in single_source.lisn(I=subgraphs[i], G=G, t=t)
.data["scores"]
.items()
if v != -np.inf
}
for i in range(number_sources)
]
data = [list(d.keys()) for d in sources_scores]
product_scores = {}
for item in itertools.product(*data):
idx = tuple(item)
product_scores[idx] = 0
for i in range(len(idx)):
product_scores[idx] += sources_scores[i][idx[i]]
result = MultiSourceResult(
source_type="multi-source",
inference_method="fast multi-source lisn",
scores=product_scores,
G=G,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/multiple_source/jordan.py | xflow/SL/cosasi/source_inference/multiple_source/jordan.py | import itertools
import networkx as nx
import numpy as np
from ..source_results import MultiSourceResult
from ...utils import estimators
from .. import single_source
def fast_multisource_jordan_centrality(I, G, number_sources=None):
"""Greedily runs single-source Jordan centrality on each estimated infection
subgraph attributable to each of the hypothesized number of sources.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
Notes
-----
The Jordan infection center is the vertex with minimum infection eccentricity.
This is described in [1]_ and [2]_.
Examples
--------
>>> result = cosasi.multiple_source.fast_multisource_jordan_centrality(I, G)
References
----------
.. [1] L. Ying and K. Zhu,
"On the Universality of Jordan Centers for Estimating Infection Sources in Tree Networks"
IEEE Transactions of Information Theory, 2017
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
if not number_sources:
number_sources, subgraphs = estimators.number_sources(
I, return_source_subgraphs=True
)
else:
number_sources, subgraphs = estimators.number_sources(
I, number_sources=number_sources, return_source_subgraphs=True
)
sources_scores = [
{
k: v
for k, v in single_source.jordan_centrality(subgraphs[i], G)
.data["scores"]
.items()
if v != -np.inf
}
for i in range(number_sources)
]
data = [list(d.keys()) for d in sources_scores]
product_scores = {}
for item in itertools.product(*data):
idx = tuple(item)
product_scores[idx] = 0
for i in range(len(idx)):
product_scores[idx] += sources_scores[i][idx[i]]
result = MultiSourceResult(
source_type="multi-source",
inference_method="fast multi-source jordan centrality",
scores=product_scores,
G=G,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/multiple_source/netsleuth.py | xflow/SL/cosasi/source_inference/multiple_source/netsleuth.py | import itertools
import networkx as nx
import numpy as np
from ..source_results import MultiSourceResult
from ...utils import estimators
from .. import single_source
def netsleuth(I, G, hypotheses_per_step=1):
"""Implements the multi-source NETSLEUTH algorithm to score combinations
of nodes in G.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
hypotheses_per_step : int (default 1)
number of candidate sources to be kept per iteration of NETSLEUTH.
Particular usage is described in greater detail in `Notes` section.
Notes
-----
The number of source hypotheses returned will be hypotheses_per_step*[number of seed nodes],
the latter of which is automatically determined via minimum description length
calculations.
NETSLEUTH is described in [1]_ and [2]_.
NETSLEUTH has linear complexity with the number of edges of the infected subgraph,
edges of the frontier set, and vertices of the infected subgraph.
The standard n-source version of NETSLEUTH operates as follows:
1. Obtain Source 1 via single-source method
2. Delete Source 1 from infection subgraph; obtain Source 2 via single-source method
...
n. Delete Source n-1 from infection subgraph; obtain Source n via single-source method.
This does not lend itself to ranking alternative hypotheses, so we implement a
more general variant:
1. Obtain top ``hypotheses_per_step``-many candidates for Source 1 via single-source
method; each corresponds to one hypothesis source set, each of size 1
2. For each hypothesis source set, delete these nodes from a copy of the infection subgraph,
then obtain top ``hypotheses_per_step``-many candidates for Source 2 via single-source
method; construct ``|source sets| * hypotheses_per_step`` new source sets to replace the old
source sets, each of size 2
...
n. For each hypothesis source set, delete these nodes from a copy of the infection subgraph,
then obtain top ``hypotheses_per_step``-many candidates for Source n via single-source
method; construct |source sets|*``hypotheses_per_step`` new source sets to replace the old
source sets, each of size n
Examples
--------
>>> result = cosasi.multiple_source.netsleuth(I, G, number_sources=3, hypotheses_per_step=3)
References
----------
.. [1] B. Prakash, J. Vreeken, C. Faloutsos,
"Spotting Culprits in Epidemics: How Many and Which Ones?"
IEEE 12th International Conference on Data Mining, 2012
https://ieeexplore.ieee.org/document/6413787
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
multisource_scores = {}
mdl_decreasing = True
this_mdl = np.inf
last_mdl = np.inf
i = 1
while mdl_decreasing:
if i == 1:
step_result = single_source.netsleuth(I, G)
for s in step_result.topn(hypotheses_per_step):
multisource_scores[(s)] = estimators.description_length([s], G)
else:
new_multisource_scores = {}
for j in multisource_scores.keys():
H = I.copy()
if i == 2:
H.remove_nodes_from([j])
else:
H.remove_nodes_from(j)
step_result = single_source.netsleuth(H, G)
for s in step_result.topn(hypotheses_per_step):
if i == 2:
new_s = tuple([j] + [s])
else:
new_s = tuple(list(j) + [s])
new_multisource_scores[new_s] = estimators.description_length(
list(new_s), G
)
multisource_scores = new_multisource_scores
# update mdl tracker
last_mdl = this_mdl
this_mdl = min(multisource_scores.values())
mdl_decreasing = this_mdl < last_mdl
i += 1
result = MultiSourceResult(
source_type="multi-source",
inference_method="netsleuth",
scores=multisource_scores,
G=G,
reverse=False,
)
return result
def fast_multisource_netsleuth(I, G, number_sources=None):
"""Greedily runs single-source NETSLEUTH on each estimated infection subgraph attributable
to each of the hypothesized number of sources.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
Examples
--------
>>> result = cosasi.multiple_source.fast_multisource_netsleuth(I, G)
Notes
-----
Unofficial variant of multisource NETSLEUTH intended for fast computation and ranking,
because the typical multisource version does not lend itself to scoring many possible
source sets.
NETSLEUTH is described in [1]_ and [2]_. More authoritative implementation is found in
`multisource.netsleuth`.
References
----------
.. [1] B. Prakash, J. Vreeken, C. Faloutsos,
"Spotting Culprits in Epidemics: How Many and Which Ones?"
IEEE 12th International Conference on Data Mining, 2012
https://ieeexplore.ieee.org/document/6413787
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
if not number_sources:
number_sources, subgraphs = estimators.number_sources(
I, return_source_subgraphs=True
)
else:
number_sources, subgraphs = estimators.number_sources(
I, number_sources=number_sources, return_source_subgraphs=True
)
sources_scores = [
{
k: v
for k, v in single_source.netsleuth(subgraphs[i], G).data["scores"].items()
if v != -np.inf
}
for i in range(number_sources)
]
data = [list(d.keys()) for d in sources_scores]
product_scores = {}
for item in itertools.product(*data):
idx = tuple(item)
product_scores[idx] = 0
for i in range(len(idx)):
product_scores[idx] += sources_scores[i][idx[i]]
result = MultiSourceResult(
source_type="multi-source",
inference_method="fast multi-source netsleuth",
scores=product_scores,
G=G,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/multiple_source/__init__.py | xflow/SL/cosasi/source_inference/multiple_source/__init__.py | from .netsleuth import *
from .jordan import *
from .lisn import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/multiple_source/tests/test_netsleuth.py | xflow/SL/cosasi/source_inference/multiple_source/tests/test_netsleuth.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import math
import cosasi
class TestNETSLEUTH(TestCase):
def setUp(self):
self.G = nx.complete_graph(n=100)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_fast_multisource_netsleuth(self):
result = cosasi.source_inference.multiple_source.fast_multisource_netsleuth(
self.I, self.G, number_sources=3
)
assert isinstance(
result, cosasi.source_inference.source_results.MultiSourceResult
)
top5 = result.topn(5)
assert [len(i) == 3 for i in top5]
result = cosasi.source_inference.multiple_source.fast_multisource_netsleuth(
self.I, self.G
)
l = None
for k in result.data["scores"].keys():
if not l:
l = len(k)
else:
assert len(k) == l
def test_netsleuth(self):
result = cosasi.source_inference.multiple_source.netsleuth(self.I, self.G)
assert isinstance(
result, cosasi.source_inference.source_results.MultiSourceResult
)
result = cosasi.source_inference.multiple_source.netsleuth(self.I, self.G)
l = None
for k in result.data["scores"].keys():
if not l:
l = len(k)
else:
assert len(k) == l
assert result.data["scores"][k] > 0
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/multiple_source/tests/test_lisn.py | xflow/SL/cosasi/source_inference/multiple_source/tests/test_lisn.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import cosasi
class TestLISN(TestCase):
def setUp(self):
self.G = nx.complete_graph(n=100)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_fast_multisource_lisn(self):
result = cosasi.source_inference.multiple_source.fast_multisource_lisn(
self.I, self.G, self.t, 3
)
assert isinstance(
result, cosasi.source_inference.source_results.MultiSourceResult
)
top5 = result.topn(5)
assert [len(i) == 3 for i in top5]
result = cosasi.source_inference.multiple_source.fast_multisource_lisn(
self.I, self.G, self.t
)
l = None
for k in result.data["scores"].keys():
if not l:
l = len(k)
else:
assert len(k) == l
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/multiple_source/tests/__init__.py | xflow/SL/cosasi/source_inference/multiple_source/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/multiple_source/tests/test_jordan.py | xflow/SL/cosasi/source_inference/multiple_source/tests/test_jordan.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import cosasi
class TestJordan(TestCase):
def setUp(self):
self.G = nx.complete_graph(n=100)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_fast_multisource_jordan_centrality(self):
result = (
cosasi.source_inference.multiple_source.fast_multisource_jordan_centrality(
self.I, self.G, 3
)
)
assert isinstance(
result, cosasi.source_inference.source_results.MultiSourceResult
)
top5 = result.topn(5)
assert [len(i) == 3 for i in top5]
result = (
cosasi.source_inference.multiple_source.fast_multisource_jordan_centrality(
self.I, self.G
)
)
l = None
for k in result.data["scores"].keys():
if not l:
l = len(k)
else:
assert len(k) == l
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/lisn.py | xflow/SL/cosasi/source_inference/single_source/lisn.py | import math
import networkx as nx
import numpy as np
import scipy as sp
from ..source_results import SingleSourceResult
def lisn(I, G, t=None, infection_rate=0.1):
"""Implements the algorithm from Localizing the Information Source in a Network to
score all nodes in G [1]_.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
t : int (optional)
the observation timestep corresponding to I
infection_rate : float (optional)
Inter-node infection efficiency from the original contagion process
must be in [0, 1]
Notes
-----
Because the probabilities can be quite small, we report the log-score, rather than the
raw score itself.
To our knowledge, this algorithm has no official name; it is referred to as "Algorithm 1"
in its corresponding publication [1]_. We dub it LISN, the acronym of the publication
title (Localizing the Information Source in a Network).
Nodes outside the infection subgraph receive a score of negative infinity.
Examples
--------
>>> result = cosasi.single_source.lisn(I, G)
References
----------
.. [1] G. Nie and C. Quinn,
"Localizing the Information Source in a Network"
TrueFact 2019: KDD 2019 Workshop on Truth Discovery and Fact Checking: Theory and Practice, 2019
"""
scores = {v: -np.inf for v in G.nodes}
for v in I.nodes:
scores[v] = 0
for u in G.nodes:
if u == v:
continue
n = nx.shortest_path_length(G, v, u)
if u in I.nodes:
scores[v] += math.log(distance_prob(t, n, infection_rate))
else:
scores[v] += math.log(1 - distance_prob(t, n, infection_rate))
result = SingleSourceResult(
source_type="single-source", inference_method="lisn", scores=scores, G=G
)
return result
def distance_prob(t, n, infection_rate=0.1):
"""Approximates the probability of one node receiving the rumor/contagion from another node
n edges away within time t.
Parameters
----------
t : int (optional)
the observation timestep corresponding to I
This is not actually used, but exists to match the format of other algorithms
n : int
shortest path distance
infection_rate : float (optional)
Inter-node infection efficiency from the original contagion process
must be in [0, 1]
Notes
-----
This function is defined in Section 3 of [1]_.
References
----------
.. [1] G. Nie and C. Quinn,
"Localizing the Information Source in a Network"
TrueFact 2019: KDD 2019 Workshop on Truth Discovery and Fact Checking: Theory and Practice, 2019
"""
def gamma(s, x):
def gamma_integrand(x, s):
return x ** (s - 1) * math.e ** (-x)
return sp.integrate.quad(gamma_integrand, 0, x, args=s)[0]
return gamma(n, infection_rate * t) / sp.special.gamma(n)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/jordan.py | xflow/SL/cosasi/source_inference/single_source/jordan.py | import networkx as nx
import numpy as np
from ...utils import soft_eccentricity
from ..source_results import SingleSourceResult
def jordan_centrality(I, G):
"""Computes the infection eccentricity of each node in the infection subgraph. To
produce a score with highest value corresponding to the Jordan center, we return
the inverse of the infection eccentricity.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
Notes
-----
The Jordan infection center is the vertex with minimum infection eccentricity.
This is described in [1]_ and [2]_.
Nodes outside the infection subgraph receive a score of negative infinity.
Examples
--------
>>> result = cosasi.single_source.jordan_centrality(I, G)
References
----------
.. [1] L. Ying and K. Zhu,
"On the Universality of Jordan Centers for Estimating Infection Sources in Tree Networks"
IEEE Transactions of Information Theory, 2017
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
scores = {
v: 1 / soft_eccentricity(I, v=v) if v in I.nodes else -np.inf for v in G.nodes
}
result = SingleSourceResult(
source_type="single-source",
inference_method="jordan centrality",
scores=scores,
G=G,
reverse=True,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/netsleuth.py | xflow/SL/cosasi/source_inference/single_source/netsleuth.py | import networkx as nx
import numpy as np
import warnings
from ..source_results import SingleSourceResult
def netsleuth(I, G):
"""Implements the single-source NETSLEUTH algorithm to score all nodes in G.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
Notes
-----
NETSLEUTH is described in [1]_. General idea is that, under mean field
approximation, the probability of observing an infection subgraph given a
particular source s is proportional to the sth entry of the largest eigenvector
of the infection subgraph Laplacian. The implementation below is described in
[2]_.
Nodes outside the infection subgraph (i.e. the frontier set) receive a score of
negative infinity.
NETSLEUTH has linear complexity with the number of edges of the infected subgraph,
edges of the frontier set, and vertices of the infected subgraph.
Examples
--------
>>> result = cosasi.single_source.netsleuth(I, G)
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
warnings.filterwarnings("ignore", module="networkx\..*")
L = nx.laplacian_matrix(G).toarray()
infection_indices = [i for i in I.nodes]
L_I = L[np.ix_(infection_indices, infection_indices)]
eigenvalues, eigenvectors = np.linalg.eig(L_I)
largest_eigenvalue = max(eigenvalues)
largest_eigenvector = eigenvectors[:, list(eigenvalues).index(largest_eigenvalue)]
scores = {
v: largest_eigenvector[infection_indices.index(v)]
if v in infection_indices
else -np.inf
for v in G.nodes
}
result = SingleSourceResult(
source_type="single-source", inference_method="netsleuth", scores=scores, G=G
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/__init__.py | xflow/SL/cosasi/source_inference/single_source/__init__.py | from .rumor_centrality import *
from .short_fat_tree import *
from .netsleuth import *
from .jordan import *
from .lisn import *
from .earliest_infection_first import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/short_fat_tree.py | xflow/SL/cosasi/source_inference/single_source/short_fat_tree.py | import math
import random
import networkx as nx
import numpy as np
from ...utils import longest_list_len
from ..source_results import SingleSourceResult
def short_fat_tree(I, G, infection_rate=0.1):
"""Implements the Short-Fat-Tree (SFT) algorithm to score all nodes in G.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
infection_rate : float (optional)
Inter-node infection efficiency from the original contagion process
must be in [0, 1]
Examples
--------
>>> result = cosasi.single_source.short_fat_tree(I, G)
Notes
-----
Algorithm attempts to find infection center by identifying the vertex with
largest weighted boundary node degree. The algorithm was introduced in [1]_.
Nodes outside the infection subgraph receive a score of negative infinity.
References
----------
.. [1] K. Zhu and L. Ying,
"Information source detection in the SIR model: A sample-path-based approach."
IEEE/ACM Transactions on Networking, 2014
https://ieeexplore.ieee.org/document/6962907
"""
N = len(I)
# each node receives its own node ID at time 0
t_messages = {i: list() for i in I.nodes} # timestep t
t_minus_messages = {i: [i] for i in I.nodes} # timestep t-1
earlier_messages = {i: set() for i in I.nodes} # timesteps earlier than t-1
all_messages = {i: {i} for i in I.nodes} # full history
t = 1
while longest_list_len(all_messages.values()) < N:
for v in I.nodes:
new_ids = set(t_minus_messages[v]) - earlier_messages[v]
if new_ids: # v received new node IDs in t-1 time slot
for u in I.neighbors(v):
# v broadcasts the new node IDs to its neighbors
t_messages[u] += new_ids
t += 1
# update message history
earlier_messages = {
i: earlier_messages[i].union(t_minus_messages[i]) for i in I.nodes
}
all_messages = {
i: all_messages[i].union(t_minus_messages[i]).union(t_messages[i])
for i in I.nodes
}
# push back recent message record
t_minus_messages = t_messages
t_messages = {i: list() for i in I.nodes}
# S keys are the set of nodes that receive |I| distinct node IDs
S = {
v: weighted_boundary_node_degree(I=I, G=G, v=v, infection_rate=infection_rate)
if v in I.nodes and len(all_messages[v]) >= N
else -np.inf
for v in G.nodes
}
result = SingleSourceResult(
source_type="single-source", inference_method="short-fat-tree", scores=S, G=G
)
return result
def weighted_boundary_node_degree(I, G, v, infection_rate=0.01, return_boundary=False):
"""Computes the weighted boundary node degree (WBND) with respect to node v and
the set of infected nodes I.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
infection_rate : float (optional)
Inter-node infection efficiency from the original contagion process
must be in [0, 1]
return_boundary : bool
if True, you get both the weighted boundary node degree and the involved boundary nodes
if False, you only get the weighted boundary node degree
Notes
-----
This implementation is based on the WBND Algorithm, described in Algorithm 2.2
on p. 10 of [1]_.
References
----------
.. [1] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
wbnd = 0
v_infection_eccentricity = nx.eccentricity(I, v=v)
v_boundary = [
w
for w in I.nodes
if nx.shortest_path_length(G, source=v, target=w) == v_infection_eccentricity
]
v_boundary_len = len(v_boundary)
wbnd = sum([G.degree(u) - v_boundary_len for u in v_boundary]) * abs(
math.log(1 - infection_rate)
)
if return_boundary:
return wbnd, v_boundary
return wbnd
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/rumor_centrality.py | xflow/SL/cosasi/source_inference/single_source/rumor_centrality.py | import math
import random
import networkx as nx
from ...utils import list_product
from ..source_results import SingleSourceResult
def rumor_centrality_root(I, v, return_all_values=True):
"""Computes rumor centrality for all nodes, assuming a spanning tree rooted at v.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
v : graph index - str, int, etc.
The vertex rooting
return_all_values : bool
Specifies whether you want the full rumor centrality dict.
If False, returns only the value for node v
Notes
-----
Rumor centrality was introduced in the seminal work [1]_. This is a more "literal"
interpretation of their algorithm. `rumor_centrality` averages these results over all
possible BFS rooting schemes.
References
----------
.. [1] S. Devavrat and T. Zaman,
"Rumors in a network: Who's the culprit?."
IEEE Transactions on Informatidon Theory, 2011
https://devavrat.mit.edu/wp-content/uploads/2017/10/Rumors-in-a-network-whos-the-culprit.pdf
"""
N = len(I)
G = nx.bfs_tree(I, v)
# sort nodes by depth from leaves to root
depths = nx.shortest_path_length(G, v)
nodes_by_depth = sorted(depths, key=depths.get, reverse=True)
# message-passing data objects; indexing is dict[destination][source]
t = {
i: {j: 0 for j in nodes_by_depth} for i in nodes_by_depth
} # subtree size messages
p = {
i: {j: 0 for j in nodes_by_depth} for i in nodes_by_depth
} # subtree product messages
r = {i: 0 for i in nodes_by_depth} # rumor centrality values
for u in nodes_by_depth:
children_u = [e[1] for e in G.out_edges(u)]
if u != v:
parent_u = list(G.in_edges(u))[0][0]
if G.out_degree(u) == 0:
# u is a leaf
if "parent_u" in locals():
t[parent_u][u] = 1
p[parent_u][u] = 1
else:
if u != v:
# u is not root
if "parent_u" in locals():
t[parent_u][u] = 1 + sum([t[u][j] for j in children_u])
p[parent_u][u] = t[parent_u][u] * list_product(
[p[u][j] for j in children_u]
)
for u in nodes_by_depth[::-1]:
children_u = [e[1] for e in G.out_edges(u)]
if u == v:
# u is root
r[u] = math.factorial(N) / list_product([p[u][j] for j in children_u])
else:
parent_u = list(G.in_edges(u))[0][0]
r[u] = r[parent_u] * t[parent_u][u] / (N - t[parent_u][u])
for u in nodes_by_depth:
r[u] /= len(G)
if not return_all_values:
return r[v]
return r
def rumor_centrality(I, G=None, v=None, normalize=True, only_roots=False):
"""Computes rumor centrality for all nodes in G.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph (optional)
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
This is not actually used, but exists to match the format of other algorithms
v : graph index - str, int, etc. (optional)
if provided, returns the rumor centrality of v only.
normalize : bool
If True, scales all rumor centrality values to between 0 and 1
only_roots : bool
Aggregation strategy, as we compute rumor_centrality_root over all possible
root nodes.
If True, we only keep the rumor_centrality_root value for the root node
If False, we keep the rumor_centrality_root values for all nodes
Notes
-----
Rumor centrality was introduced in the seminal work [1]_. `rumor_centrality_root` is a
more "literal" interpretation of their algorithm. `rumor_centrality` (this function)
averages these results over all possible BFS rooting schemes.
Examples
--------
>>> result = cosasi.single_source.rumor_centrality(I, G)
References
----------
.. [1] S., Devavrat and T. Zaman,
"Rumors in a network: Who's the culprit?."
IEEE Transactions on Information Theory, 2011
https://devavrat.mit.edu/wp-content/uploads/2017/10/Rumors-in-a-network-whos-the-culprit.pdf
"""
if v and v not in I:
raise ValueError("Provided node is not in I.")
# iterate over possible roots, and average over spanning trees
rumor_centrality_dict = {i: 0 for i in I.nodes}
for root in rumor_centrality_dict:
if only_roots:
rumor_centrality_dict[root] = rumor_centrality_root(
I, root, return_all_values=False
)
else:
r = rumor_centrality_root(I, root, return_all_values=True)
for node in I.nodes:
if node in r:
rumor_centrality_dict[node] += r[node]
for node in rumor_centrality_dict:
rumor_centrality_dict[node] /= len(I)
if normalize:
max_val = max(rumor_centrality_dict.values())
for node in rumor_centrality_dict:
rumor_centrality_dict[node] /= max_val
if v:
return rumor_centrality_dict[v]
result = SingleSourceResult(
source_type="single-source",
inference_method="rumor centrality",
scores=rumor_centrality_dict,
G=G,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/earliest_infection_first.py | xflow/SL/cosasi/source_inference/single_source/earliest_infection_first.py | import random
import networkx as nx
import numpy as np
from ...utils import soft_eccentricity
from ..source_results import SingleSourceResult
def earliest_infection_first(I, G, observer_dict):
"""Implements the Earliest Infection First algorithm to score all nodes in I.
This algorithm is useful if some infection timestamp information is available.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
observer_dict : dict
observers dictionary, a la contagion.get_observers()
Notes
-----
This is the greedy algorithm outlined in Section 3 of [1]_. We iterate over rooting
schemes and score each source hypothesis by the cost of their corresponding EIF
spreading tree. In particular, we implement the "cost-based ranking" approach described
in Section 4 of [1]_.
References
----------
.. [1] K. Zhu, Z. Chen, L. Ying,
"Locating Contagion Sources in Networks with Partial Timestamps"
Data Mining and Knowledge Discovery, 2016
https://link.springer.com/article/10.1007/s10618-015-0435-9
"""
if not nx.is_connected(G):
raise ValueError("G must be connected for EIF algorithm.")
observers = observer_dict.copy()
# SI-ify the observers dict
for k in observers:
if isinstance(observers[k], list):
observers[k] = observers[k][0]
mu = _estimate_mu(G, observers)
alpha = [i[0] for i in sorted(observers.items(), key=lambda j: j[1]) if i[0] in I]
scores = {i: np.inf for i in G}
for v in I:
scores[v] = eif_root(
root=v,
I=I,
G=G,
observers=observers,
mu=mu,
alpha=alpha,
only_return_cost=True,
)
result = SingleSourceResult(
source_type="single-source",
inference_method="earliest infection first",
scores=scores,
G=G,
reverse=False,
)
return result
def eif_root(root, I, G, observers, mu, alpha, only_return_cost=True):
"""Computes the cost of a greedy EIF spreading tree whose "patient zero" is root.
Parameters
----------
root : graph index - str, int, etc.
The vertex rooting
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
observers : dict
observers dictionary, a la contagion.get_observers()
mu : float
a constant, estimated by _estimate_mu()
alpha : list
list of vertices in observers dictionary, sorted from earliest to latest timestamp-wise
only_return_cost : bool
if True, only returns the calculated spreading tree's cost
Notes
-----
This is the greedy algorithm outlined in Section 3 of [1]_.
References
----------
.. [1] K. Zhu, Z. Chen, L. Ying,
"Locating Contagion Sources in Networks with Partial Timestamps"
Data Mining and Knowledge Discovery, 2016
https://link.springer.com/article/10.1007/s10618-015-0435-9
"""
timestamps = observers.copy()
if root not in timestamps:
timestamps[root] = min(timestamps.values()) - mu
spreading_tree = nx.Graph()
spreading_tree.add_nodes_from([root])
spreading_tree_cost = 0
for a in alpha:
path = None
path_cost = np.inf
for m in spreading_tree:
# find a modified shortest path (msp) from m to a
surrogate = G.copy()
to_remove = [v for v in alpha if v != a] + [
v for v in spreading_tree if v != m
]
surrogate.remove_nodes_from(to_remove)
try:
msp = nx.shortest_path(surrogate, source=m, target=a)
except:
# no msp exists
continue
# calculate msp's cost
msp_len = len(msp)
msp_cost = msp_len * (
(((timestamps[a] - timestamps[m]) / msp_len) - mu) ** 2
)
# compare cost to existing minimum path cost
if msp_cost < path_cost:
path = msp
path_cost = msp_cost
if isinstance(path, type(None)):
continue
# add path to spreading tree
for i in range(len(path) - 1):
spreading_tree.add_edge(path[i], path[i + 1])
# update observers/timestamps
path_len_iter = 1
path_factor = (timestamps[path[-1]] - timestamps[path[0]]) / len(path)
for g in path:
timestamps[g] = timestamps[path[0]] + (path_len_iter - 1) * path_factor
path_len_iter += 1
# update tree cost
spreading_tree_cost += path_cost
# add remaining nodes
not_in_tree = [v for v in G if v not in spreading_tree]
new_len = len(not_in_tree)
while new_len > 0:
for v in not_in_tree:
breaker = False
for p in G.neighbors(v):
if breaker:
break
if p in spreading_tree:
spreading_tree.add_edge(v, p)
timestamps[v] = (
timestamps[p] + mu
) # cost does not change in this step
breaker = True
old_len = new_len
not_in_tree = [v for v in G if v not in spreading_tree]
new_len = len(not_in_tree)
if new_len == old_len:
break
if only_return_cost:
return spreading_tree_cost
return spreading_tree, spreading_tree_cost, timestamps
def _estimate_mu(G, observers):
"""Estimates the constant mu from the quadratic tree cost function.
Parameters
----------
G : NetworkX Graph
The network for the diffusion process to run on
observers : dict
observers dictionary, a la contagion.get_observers()
Notes
-----
The mu parameter is introduced in Equation 2 of [1]_.
Some very minor details are modified for extensibility throughout cosasi.
For instance, the observers record is a dictionary of time steps, rather than
a list of time stamps. Non-observers are recorded with an infection time of infinity
rather than "*", as described in the paper [1]_.
References
----------
.. [1] K. Zhu, Z. Chen, L. Ying,
"Locating Contagion Sources in Networks with Partial Timestamps"
Data Mining and Knowledge Discovery, 2016
https://link.springer.com/article/10.1007/s10618-015-0435-9
"""
num_val = 0
denom_val = 0
for v in observers:
for w in observers:
if v != w and v != np.inf and w != np.inf:
num_val += abs(observers[v] - observers[w])
denom_val += nx.shortest_path_length(G, source=v, target=w)
return num_val / denom_val
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/tests/test_netsleuth.py | xflow/SL/cosasi/source_inference/single_source/tests/test_netsleuth.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import math
import cosasi
class TestNETSLEUTH(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_netsleuth(self):
result = cosasi.source_inference.single_source.netsleuth(self.I, self.G)
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
noninf_vals = [i for i in result.data["scores"].values() if i != -np.inf]
assert len(noninf_vals) == len(self.I)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/tests/test_lisn.py | xflow/SL/cosasi/source_inference/single_source/tests/test_lisn.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import cosasi
class TestLISN(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_distance_prob(self):
# for constant distance, probability should weakly increase with time
last_prob = -np.inf
n = random.randint(1, 10)
for t in range(1, 10):
prob = cosasi.source_inference.single_source.distance_prob(t, n, 0.05)
assert prob >= last_prob
last_prob = prob
# for constant time, probability should weakly decrease with distance
last_prob = np.inf
t = random.randint(1, 10)
for n in range(1, 10):
prob = cosasi.source_inference.single_source.distance_prob(t, n, 0.05)
assert prob <= last_prob
last_prob = prob
def test_lisn(self):
result = cosasi.source_inference.single_source.lisn(self.I, self.G, self.t)
# type check
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
# -inf only for nodes outside infection subgraph
vals = list(result.data["scores"].values())
noninf_vals = [i for i in result.data["scores"].values() if i != -np.inf]
assert len(noninf_vals) == len(self.I) and len(vals) == len(self.G)
# scores are log probabilities
assert -np.inf <= max(noninf_vals) <= max(noninf_vals) <= 0
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/tests/test_earliest_infection_first.py | xflow/SL/cosasi/source_inference/single_source/tests/test_earliest_infection_first.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import cosasi
class TestEarliestInfectionFirst(TestCase):
def setUp(self):
self.G = nx.fast_gnp_random_graph(100, 0.25)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.01, number_infected=1
)
contagion.forward(50)
self.t = 20
self.I = contagion.get_infected_subgraph(self.t)
self.observers = contagion.get_observers(10)
return None
def test_earliest_infection_first_disconnected(self):
H = nx.disjoint_union(self.G, self.G)
with pytest.raises(ValueError):
cosasi.single_source.earliest_infection_first(
I=self.G, G=H, observer_dict=self.observers
)
def test_earliest_infection_first(self):
result = cosasi.source_inference.single_source.earliest_infection_first(
I=self.I, G=self.G, observer_dict=self.observers
)
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
result_data = result.data["scores"]
assert isinstance(result_data, dict)
for i in result_data.keys():
assert i in self.G.nodes()
assert isinstance(result_data[i], (float, int)) and result_data[i] > 0
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/tests/test_short_fat_tree.py | xflow/SL/cosasi/source_inference/single_source/tests/test_short_fat_tree.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import math
import cosasi
class TestShortFatTree(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_weighted_boundary_node_degree(self):
# basic type check
wbnd = cosasi.source_inference.single_source.weighted_boundary_node_degree(
self.I, self.G, random.choice(list(self.I.nodes()))
)
assert isinstance(wbnd, (int, float))
# double-check worked example
G = nx.Graph()
G.add_edges_from(
[
(1, 2),
(2, 5),
(2, 6),
(2, 7),
(1, 4),
(4, 8),
(4, 9),
(4, 10),
(1, 3),
(3, 11),
]
)
I = G.subgraph([1, 2, 3, 4, 5])
wbnd_1 = cosasi.source_inference.single_source.weighted_boundary_node_degree(
I, G, 1, abs(math.log(0.5))
)
assert wbnd_1 == 0
(
wbnd_2,
v_boundary,
) = cosasi.source_inference.single_source.weighted_boundary_node_degree(
I, G, 2, abs(math.log(0.5)), True
)
assert sorted(v_boundary) == [3, 4]
assert wbnd_2 > wbnd_1
def test_short_fat_tree(self):
result = cosasi.source_inference.single_source.short_fat_tree(self.I, self.G)
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
noninf_vals = [i for i in result.data["scores"].values() if i != -np.inf]
assert len(noninf_vals) <= len(
self.I
) # score vals are wbnd, these are checked in test_weighted_boundary_node_degree
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/tests/test_rumor_centrality.py | xflow/SL/cosasi/source_inference/single_source/tests/test_rumor_centrality.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import cosasi
class TestRumorCentrality(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_rumor_centrality_root(self):
for _ in range(5):
v = random.choice(list(self.I.nodes()))
result_v = cosasi.single_source.rumor_centrality_root(self.I, v, False)
assert isinstance(result_v, (int, float)) and result_v > 0
result_dict = cosasi.single_source.rumor_centrality_root(self.I, v, True)
for u in result_dict.keys():
assert u in self.I.nodes()
assert result_dict[u] > 0
return None
def test_rumor_centrality(self):
with pytest.raises(ValueError):
cosasi.single_source.rumor_centrality(self.I, self.G, "BAD INPUT")
result = cosasi.single_source.rumor_centrality(
self.I, self.G, None, False, False
)
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
result_data = result.data["scores"]
assert isinstance(result_data, dict)
for i in result_data.keys():
assert i in self.G.nodes()
assert isinstance(result_data[i], (float, int)) and result_data[i] > 0
def test_rumor_centrality_root_example(self):
"""Verifies worked example from Section III.A of [1]_.
References
----------
.. [1] S., Devavrat and T. Zaman,
"Rumors in a network: Who's the culprit?."
IEEE Transactions on Information Theory, 2011
https://devavrat.mit.edu/wp-content/uploads/2017/10/Rumors-in-a-network-whos-the-culprit.pdf
"""
I = nx.Graph()
I.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)])
assert (
cosasi.single_source.rumor_centrality_root(I, 1, False)
== cosasi.single_source.rumor_centrality_root(I, 1, True)[1]
== 8
)
# assert cosasi.single_source.rumor_centrality_root(I, 1, True)[1] == 8
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/tests/__init__.py | xflow/SL/cosasi/source_inference/single_source/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/SL/cosasi/source_inference/single_source/tests/test_jordan.py | xflow/SL/cosasi/source_inference/single_source/tests/test_jordan.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import cosasi
class TestJordan(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_jordan_centrality(self):
result = cosasi.source_inference.single_source.jordan_centrality(self.I, self.G)
# type check
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
# soft eccentricity values should either be -inf or in [0, 1]
noninf_vals = [i for i in result.data["scores"].values() if i != -np.inf]
assert all(0 <= val <= 1 for val in noninf_vals)
# confirm the set of nodes w/ highest score is the infection graph center
center = list(nx.center(self.I))
result_center = [
i
for i in result.data["scores"].keys()
if result.data["scores"][i] == max(result.data["scores"].values())
]
assert sorted(center) == sorted(result_center)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/create_safe_traffic_lights.py | SUMOxPyPSA/create_safe_traffic_lights.py | #!/usr/bin/env python3
"""
Create safe traffic light logic with proper coordination and random offsets
"""
import gzip
import xml.etree.ElementTree as ET
import os
import random
def create_safe_traffic_light_logic(signal_count, tl_id, junction_offset=0):
"""Create safe traffic light logic with proper coordination"""
# Base durations (same for all traffic lights initially)
green_duration = 30
yellow_duration = 3
red_buffer_duration = 2
if signal_count == 2:
# For 2 signals: simple alternating (opposing movements)
return {
'type': 'static',
'programID': '1',
'offset': str(junction_offset),
'phases': [
{'duration': green_duration, 'state': 'Gr'}, # First signal green, second red
{'duration': yellow_duration, 'state': 'yr'}, # First signal yellow, second red
{'duration': red_buffer_duration, 'state': 'rr'}, # All red
{'duration': green_duration, 'state': 'rG'}, # First signal red, second green
{'duration': yellow_duration, 'state': 'ry'}, # First signal red, second yellow
{'duration': red_buffer_duration, 'state': 'rr'}, # All red
]
}
elif signal_count == 3:
# For 3 signals: cycle through them (T-intersection)
return {
'type': 'static',
'programID': '1',
'offset': str(junction_offset),
'phases': [
{'duration': green_duration, 'state': 'Grr'}, # First signal green
{'duration': yellow_duration, 'state': 'yrr'}, # First signal yellow
{'duration': red_buffer_duration, 'state': 'rrr'}, # All red
{'duration': green_duration, 'state': 'rGr'}, # Second signal green
{'duration': yellow_duration, 'state': 'ryr'}, # Second signal yellow
{'duration': red_buffer_duration, 'state': 'rrr'}, # All red
{'duration': green_duration, 'state': 'rrG'}, # Third signal green
{'duration': yellow_duration, 'state': 'rry'}, # Third signal yellow
{'duration': red_buffer_duration, 'state': 'rrr'}, # All red
]
}
elif signal_count == 4:
# For 4 signals: opposing movements (cross intersection)
# North-South vs East-West coordination
return {
'type': 'static',
'programID': '1',
'offset': str(junction_offset),
'phases': [
{'duration': green_duration, 'state': 'GGrr'}, # North-South green (opposing)
{'duration': yellow_duration, 'state': 'yyrr'}, # North-South yellow
{'duration': red_buffer_duration, 'state': 'rrrr'}, # All red
{'duration': green_duration, 'state': 'rrGG'}, # East-West green (opposing)
{'duration': yellow_duration, 'state': 'rryy'}, # East-West yellow
{'duration': red_buffer_duration, 'state': 'rrrr'}, # All red
]
}
elif signal_count == 6:
# For 6 signals: complex intersection with left turns
# Assume: signals 0,1 = North straight/left, 2,3 = South straight/left, 4,5 = East/West
return {
'type': 'static',
'programID': '1',
'offset': str(junction_offset),
'phases': [
{'duration': green_duration, 'state': 'GGrrrr'}, # North straight + left
{'duration': yellow_duration, 'state': 'yyrrrr'}, # North yellow
{'duration': red_buffer_duration, 'state': 'rrrrrr'}, # All red
{'duration': green_duration, 'state': 'rrGGrr'}, # South straight + left
{'duration': yellow_duration, 'state': 'rryyrr'}, # South yellow
{'duration': red_buffer_duration, 'state': 'rrrrrr'}, # All red
{'duration': green_duration, 'state': 'rrrrGG'}, # East-West
{'duration': yellow_duration, 'state': 'rrrryy'}, # East-West yellow
{'duration': red_buffer_duration, 'state': 'rrrrrr'}, # All red
]
}
elif signal_count == 8:
# For 8 signals: complex intersection with left turns for all directions
# Assume: 0,1=North straight/left, 2,3=South straight/left, 4,5=East straight/left, 6,7=West straight/left
return {
'type': 'static',
'programID': '1',
'offset': str(junction_offset),
'phases': [
{'duration': green_duration, 'state': 'GGrrrrrr'}, # North straight + left
{'duration': yellow_duration, 'state': 'yyrrrrrr'}, # North yellow
{'duration': red_buffer_duration, 'state': 'rrrrrrrr'}, # All red
{'duration': green_duration, 'state': 'rrGGrrrr'}, # South straight + left
{'duration': yellow_duration, 'state': 'rryyrrrr'}, # South yellow
{'duration': red_buffer_duration, 'state': 'rrrrrrrr'}, # All red
{'duration': green_duration, 'state': 'rrrrGGrr'}, # East straight + left
{'duration': yellow_duration, 'state': 'rrrryyrr'}, # East yellow
{'duration': red_buffer_duration, 'state': 'rrrrrrrr'}, # All red
{'duration': green_duration, 'state': 'rrrrrrGG'}, # West straight + left
{'duration': yellow_duration, 'state': 'rrrrrryy'}, # West yellow
{'duration': red_buffer_duration, 'state': 'rrrrrrrr'}, # All red
]
}
else:
# For other signal counts: quarter-based logic ensuring safety
quarter = max(1, signal_count // 4)
# Create phases where only one quarter is green at a time
phases = []
# Phase 1: First quarter green
phase1_state = 'G' * quarter + 'r' * (signal_count - quarter)
phases.append({'duration': green_duration, 'state': phase1_state})
phases.append({'duration': yellow_duration, 'state': 'y' * quarter + 'r' * (signal_count - quarter)})
phases.append({'duration': red_buffer_duration, 'state': 'r' * signal_count})
# Phase 2: Second quarter green
phase2_state = 'r' * quarter + 'G' * quarter + 'r' * (signal_count - 2 * quarter)
phases.append({'duration': green_duration, 'state': phase2_state})
phases.append({'duration': yellow_duration, 'state': 'r' * quarter + 'y' * quarter + 'r' * (signal_count - 2 * quarter)})
phases.append({'duration': red_buffer_duration, 'state': 'r' * signal_count})
# Phase 3: Third quarter green
phase3_state = 'r' * (2 * quarter) + 'G' * quarter + 'r' * (signal_count - 3 * quarter)
phases.append({'duration': green_duration, 'state': phase3_state})
phases.append({'duration': yellow_duration, 'state': 'r' * (2 * quarter) + 'y' * quarter + 'r' * (signal_count - 3 * quarter)})
phases.append({'duration': red_buffer_duration, 'state': 'r' * signal_count})
# Phase 4: Fourth quarter green
phase4_state = 'r' * (3 * quarter) + 'G' * (signal_count - 3 * quarter)
phases.append({'duration': green_duration, 'state': phase4_state})
phases.append({'duration': yellow_duration, 'state': 'r' * (3 * quarter) + 'y' * (signal_count - 3 * quarter)})
phases.append({'duration': red_buffer_duration, 'state': 'r' * signal_count})
# Verify all phases have exactly signal_count length
for i, phase in enumerate(phases):
if len(phase['state']) != signal_count:
# Pad or truncate to match signal_count
if len(phase['state']) < signal_count:
phase['state'] = phase['state'] + 'r' * (signal_count - len(phase['state']))
else:
phase['state'] = phase['state'][:signal_count]
phases[i] = phase
return {
'type': 'static',
'programID': '1',
'offset': str(junction_offset),
'phases': phases
}
def create_safe_traffic_lights_for_city(city_dir):
"""Create safe traffic lights for a specific city"""
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Network file not found: {net_file}")
return None
print(f"\nProcessing {city_dir}...")
# Create safe traffic lights
traffic_lights = {}
junction_offsets = {}
with gzip.open(net_file, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for tl in root.findall('tlLogic'):
tl_id = tl.get('id')
if tl_id is None:
continue
phases = tl.findall('phase')
if not phases:
continue
signal_count = len(phases[0].get('state'))
# Generate random offset for this junction (0-300 seconds)
# Use tl_id to ensure consistent offset for the same junction
random.seed(hash(tl_id) % 10000)
junction_offset = random.randint(0, 300)
# Create safe logic for this traffic light
traffic_lights[tl_id] = create_safe_traffic_light_logic(signal_count, tl_id, junction_offset)
junction_offsets[tl_id] = junction_offset
print(f" Created {len(traffic_lights)} safe traffic lights")
# Generate statistics
offset_distribution = {}
for offset in junction_offsets.values():
if offset not in offset_distribution:
offset_distribution[offset] = 0
offset_distribution[offset] += 1
print(f" Junction offset distribution: {len(offset_distribution)} unique offsets")
# Show safety verification
safety_verified = 0
for tl_id, tl_info in traffic_lights.items():
phases = tl_info['phases']
is_safe = True
# Check that no phase has more than 25% green signals (except for 2-4 signal cases)
signal_count = len(phases[0]['state'])
if signal_count > 4:
for phase in phases:
green_count = phase['state'].count('G')
if green_count > signal_count * 0.25:
is_safe = False
break
if is_safe:
safety_verified += 1
print(f" Safety verified: {safety_verified}/{len(traffic_lights)} traffic lights")
return traffic_lights
def generate_safe_traffic_lights_xml(traffic_lights, output_file):
"""Generate traffic_lights.add.xml file with safe logic"""
root = ET.Element('additional')
for tl_id, tl_info in traffic_lights.items():
# Create tlLogic element
tl_elem = ET.SubElement(root, 'tlLogic')
tl_elem.set('id', tl_id)
tl_elem.set('type', tl_info['type'])
tl_elem.set('programID', tl_info['programID'])
tl_elem.set('offset', tl_info['offset'])
# Add phases
for phase in tl_info['phases']:
phase_elem = ET.SubElement(tl_elem, 'phase')
phase_elem.set('duration', str(phase['duration']))
phase_elem.set('state', phase['state'])
# Write to file
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += ET.tostring(root, encoding='unicode')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(xml_str)
return len(traffic_lights)
def main():
"""Main function"""
cities = ['miami', 'los_angeles', 'new_york']
print("Creating Safe Traffic Light Logic")
print("=" * 60)
print("Safety-first approach:")
print("1. Synchronize all lights with proper coordination")
print("2. Ensure opposing movements are red when one is green")
print("3. Add random offsets to desynchronize junctions")
print("4. Maintain safety within each junction")
print("=" * 60)
for city in cities:
city_dir = os.path.join('.', city)
if not os.path.exists(city_dir):
print(f"Skipping {city}: directory not found")
continue
# Create safe traffic lights for this city
traffic_lights = create_safe_traffic_lights_for_city(city_dir)
if traffic_lights is None:
continue
# Generate the safe add file
output_file = os.path.join(city_dir, 'traffic_lights_safe.add.xml')
count = generate_safe_traffic_lights_xml(traffic_lights, output_file)
print(f" Generated {output_file} with {count} traffic lights")
# Show example
if traffic_lights:
first_id = list(traffic_lights.keys())[0]
first_tl = traffic_lights[first_id]
signal_count = len(first_tl['phases'][0]['state'])
print(f" Example: {first_id} - {signal_count} signals, {len(first_tl['phases'])} phases")
print(f" Offset: {first_tl['offset']}s")
print(f" Green duration: {first_tl['phases'][0]['duration']}s")
print(f" Yellow duration: {first_tl['phases'][1]['duration']}s")
print(f" Phase 1: {first_tl['phases'][0]['state']}")
if len(first_tl['phases']) > 3:
print(f" Phase 4: {first_tl['phases'][3]['state']}")
print("\n" + "=" * 60)
print("Summary:")
print("- Created safe traffic light logic for all cities")
print("- Ensured proper coordination within junctions")
print("- Added random offsets to desynchronize junctions")
print("- Generated traffic_lights_safe.add.xml files")
print("- Ready to update SUMO configurations")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/fix_intersection_timing.py | SUMOxPyPSA/fix_intersection_timing.py | #!/usr/bin/env python3
"""
Script to fix synchronized traffic lights at intersections by creating opposing logic
"""
import gzip
import xml.etree.ElementTree as ET
import os
import re
def analyze_intersection_traffic_lights(netfile):
"""Analyze traffic lights to find which ones are at the same intersection"""
intersections = {}
with gzip.open(netfile, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
# First, get all traffic lights and their controlled links
traffic_lights = {}
for tl in root.findall('tlLogic'):
tl_id = tl.get('id')
if tl_id is None:
continue
# Get controlled links for this traffic light
controlled_links = []
for connection in root.findall('connection'):
if connection.get('tl') == tl_id:
controlled_links.append({
'from': connection.get('from'),
'to': connection.get('to'),
'fromLane': connection.get('fromLane'),
'toLane': connection.get('toLane')
})
traffic_lights[tl_id] = {
'controlled_links': controlled_links,
'phases': []
}
# Get phases
for phase in tl.findall('phase'):
duration = phase.get('duration')
state = phase.get('state')
if duration and state:
traffic_lights[tl_id]['phases'].append({
'duration': int(duration),
'state': state
})
# Group traffic lights by intersection (junction)
for tl_id, tl_info in traffic_lights.items():
if not tl_info['controlled_links']:
continue
# Find the junction this traffic light controls
# We'll use the 'from' edge to find the junction
from_edge = tl_info['controlled_links'][0]['from']
# Find the junction that connects this edge
for junction in root.findall('junction'):
junction_id = junction.get('id')
if junction_id:
# Check if this junction has the from_edge as an incoming edge
for inc in junction.findall('incLane'):
edge_id = inc.get('id').split('_')[0] # Remove lane suffix
if edge_id == from_edge:
if junction_id not in intersections:
intersections[junction_id] = []
intersections[junction_id].append(tl_id)
break
return intersections, traffic_lights
def create_opposing_traffic_light_logic(traffic_lights, intersections):
"""Create opposing traffic light logic for intersections"""
opposing_logic = {}
for junction_id, tl_ids in intersections.items():
if len(tl_ids) < 2:
continue # Skip single traffic light intersections
print(f"Processing intersection {junction_id} with {len(tl_ids)} traffic lights")
# Create opposing logic for this intersection
opposing_logic[junction_id] = {}
for i, tl_id in enumerate(tl_ids):
if tl_id not in traffic_lights:
continue
original_phases = traffic_lights[tl_id]['phases']
if not original_phases:
continue
# Create opposing phases with offset
offset = (i * 50) % 100 # Offset each traffic light by 50% of cycle
# Create 4-phase logic: Green1 -> Yellow1 -> Red -> Green2 -> Yellow2 -> Red
state_length = len(original_phases[0]['state'])
# Determine which lanes should be green in each phase
# This is a simplified approach - you may need to customize based on your network
if i == 0: # First traffic light - North/South
phase1_state = 'G' * (state_length // 2) + 'r' * (state_length // 2)
phase2_state = 'y' * (state_length // 2) + 'r' * (state_length // 2)
phase3_state = 'r' * state_length
phase4_state = 'r' * (state_length // 2) + 'G' * (state_length // 2)
phase5_state = 'r' * (state_length // 2) + 'y' * (state_length // 2)
else: # Second traffic light - East/West (opposite timing)
phase1_state = 'r' * (state_length // 2) + 'G' * (state_length // 2)
phase2_state = 'r' * (state_length // 2) + 'y' * (state_length // 2)
phase3_state = 'r' * state_length
phase4_state = 'G' * (state_length // 2) + 'r' * (state_length // 2)
phase5_state = 'y' * (state_length // 2) + 'r' * (state_length // 2)
opposing_logic[junction_id][tl_id] = {
'offset': offset,
'phases': [
{'duration': 15, 'state': phase1_state}, # Green
{'duration': 3, 'state': phase2_state}, # Yellow
{'duration': 2, 'state': phase3_state}, # All Red
{'duration': 15, 'state': phase4_state}, # Green (opposite)
{'duration': 3, 'state': phase5_state}, # Yellow
{'duration': 2, 'state': phase3_state}, # All Red
]
}
return opposing_logic
def generate_opposing_traffic_lights_xml(opposing_logic, output_file):
"""Generate traffic_lights.add.xml with opposing logic"""
root = ET.Element('additional')
for junction_id, tl_logics in opposing_logic.items():
for tl_id, tl_info in tl_logics.items():
# Create tlLogic element
tl_elem = ET.SubElement(root, 'tlLogic')
tl_elem.set('id', tl_id)
tl_elem.set('type', 'static')
tl_elem.set('programID', '0')
tl_elem.set('offset', str(tl_info['offset']))
# Add phases
for phase in tl_info['phases']:
phase_elem = ET.SubElement(tl_elem, 'phase')
phase_elem.set('duration', str(phase['duration']))
phase_elem.set('state', phase['state'])
# Write to file
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += ET.tostring(root, encoding='unicode')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(xml_str)
return len([tl for junction in opposing_logic.values() for tl in junction.keys()])
def main():
"""Main function"""
current_dir = os.getcwd()
print(f"Current working directory: {current_dir}")
cities = ['miami', 'los_angeles', 'new_york']
print("Traffic Light Opposition Generator")
print("=" * 50)
for city in cities:
city_dir = os.path.join(current_dir, city)
print(f"Checking directory: {city_dir}")
if not os.path.exists(city_dir):
print(f"Skipping {city}: directory not found at {city_dir}")
continue
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Skipping {city}: network file not found at {net_file}")
continue
print(f"\nProcessing {city}...")
# Analyze intersections
intersections, traffic_lights = analyze_intersection_traffic_lights(net_file)
print(f" Found {len(intersections)} intersections")
print(f" Found {len(traffic_lights)} traffic lights")
# Create opposing logic
opposing_logic = create_opposing_traffic_light_logic(traffic_lights, intersections)
# Generate the add file
output_file = os.path.join(city_dir, 'traffic_lights_opposing.add.xml')
count = generate_opposing_traffic_lights_xml(opposing_logic, output_file)
print(f" Generated {output_file} with {count} opposing traffic lights")
# Show example
if opposing_logic:
first_junction = list(opposing_logic.keys())[0]
first_tls = list(opposing_logic[first_junction].keys())
print(f" Example intersection {first_junction}: {first_tls}")
print("\n" + "=" * 50)
print("Summary:")
print("- Generated opposing traffic light logic for intersections")
print("- Traffic lights at same intersection now have different phase offsets")
print("- Created traffic_lights_opposing.add.xml files")
print("- Use these files instead of the synchronized ones")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/compress_net.py | SUMOxPyPSA/compress_net.py | import gzip
import shutil
import os
import sys
def compress_file(input_file):
"""Compress a file using gzip compression"""
if not os.path.exists(input_file):
print(f"Error: File {input_file} does not exist")
return False
output_file = input_file + '.gz'
try:
with open(input_file, 'rb') as f_in:
with gzip.open(output_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
print(f"Successfully compressed {input_file} to {output_file}")
return True
except Exception as e:
print(f"Error compressing file: {e}")
return False
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python compress_net.py <input_file>")
sys.exit(1)
input_file = sys.argv[1]
compress_file(input_file) | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/fix_traffic_synchronization.py | SUMOxPyPSA/fix_traffic_synchronization.py | #!/usr/bin/env python3
"""
Script to fix traffic light synchronization by adding random offsets and varying phase durations
"""
import gzip
import xml.etree.ElementTree as ET
import os
import random
def create_desynchronized_traffic_light_logic(signal_count, tl_id):
"""Create traffic light logic with random offset and varied durations to break synchronization"""
# Use traffic light ID to seed random for consistent but varied behavior
random.seed(hash(tl_id) % 10000)
# Random offset between 0 and 300 seconds (much more granular for better distribution)
offset = random.randint(0, 300)
# Vary the green duration slightly (25-35 seconds)
green_duration = random.randint(25, 35)
# Vary the yellow duration slightly (2-4 seconds)
yellow_duration = random.randint(2, 4)
# Vary the red buffer duration slightly (1-3 seconds)
red_buffer_duration = random.randint(1, 3)
if signal_count == 2:
# For 2 signals: alternate between them
return {
'type': 'static',
'programID': '1',
'offset': str(offset),
'phases': [
{'duration': green_duration, 'state': 'Gr'}, # First signal green
{'duration': yellow_duration, 'state': 'yr'}, # First signal yellow
{'duration': red_buffer_duration, 'state': 'rr'}, # All red
{'duration': green_duration, 'state': 'rG'}, # Second signal green
{'duration': yellow_duration, 'state': 'ry'}, # Second signal yellow
{'duration': red_buffer_duration, 'state': 'rr'}, # All red
]
}
elif signal_count == 3:
# For 3 signals: cycle through them
return {
'type': 'static',
'programID': '1',
'offset': str(offset),
'phases': [
{'duration': green_duration, 'state': 'Grr'}, # First signal green
{'duration': yellow_duration, 'state': 'yrr'}, # First signal yellow
{'duration': red_buffer_duration, 'state': 'rrr'}, # All red
{'duration': green_duration, 'state': 'rGr'}, # Second signal green
{'duration': yellow_duration, 'state': 'ryr'}, # Second signal yellow
{'duration': red_buffer_duration, 'state': 'rrr'}, # All red
{'duration': green_duration, 'state': 'rrG'}, # Third signal green
{'duration': yellow_duration, 'state': 'rry'}, # Third signal yellow
{'duration': red_buffer_duration, 'state': 'rrr'}, # All red
]
}
elif signal_count == 4:
# For 4 signals: opposing movements (2 green at a time)
return {
'type': 'static',
'programID': '1',
'offset': str(offset),
'phases': [
{'duration': green_duration, 'state': 'GGrr'}, # First two signals green (opposing)
{'duration': yellow_duration, 'state': 'yyrr'}, # First two signals yellow
{'duration': red_buffer_duration, 'state': 'rrrr'}, # All red
{'duration': green_duration, 'state': 'rrGG'}, # Last two signals green (opposing)
{'duration': yellow_duration, 'state': 'rryy'}, # Last two signals yellow
{'duration': red_buffer_duration, 'state': 'rrrr'}, # All red
]
}
else:
# For 5+ signals: quarter-based logic (25% green at a time)
quarter = max(1, signal_count // 4)
# Ensure all phases have exactly the same length
# Phase 1: First quarter green
phase1_state = 'G' * quarter + 'r' * (signal_count - quarter)
# Phase 2: First quarter yellow
phase2_state = 'y' * quarter + 'r' * (signal_count - quarter)
# Phase 3: All red
phase3_state = 'r' * signal_count
# Phase 4: Second quarter green
phase4_state = 'r' * quarter + 'G' * quarter + 'r' * (signal_count - 2 * quarter)
# Phase 5: Second quarter yellow
phase5_state = 'r' * quarter + 'y' * quarter + 'r' * (signal_count - 2 * quarter)
# Phase 6: All red
phase6_state = 'r' * signal_count
# Phase 7: Third quarter green
phase7_state = 'r' * (2 * quarter) + 'G' * quarter + 'r' * (signal_count - 3 * quarter)
# Phase 8: Third quarter yellow
phase8_state = 'r' * (2 * quarter) + 'y' * quarter + 'r' * (signal_count - 3 * quarter)
# Phase 9: All red
phase9_state = 'r' * signal_count
# Phase 10: Fourth quarter green
phase10_state = 'r' * (3 * quarter) + 'G' * (signal_count - 3 * quarter)
# Phase 11: Fourth quarter yellow
phase11_state = 'r' * (3 * quarter) + 'y' * (signal_count - 3 * quarter)
# Phase 12: All red
phase12_state = 'r' * signal_count
# Verify all phases have the same length
phases = [phase1_state, phase2_state, phase3_state, phase4_state, phase5_state,
phase6_state, phase7_state, phase8_state, phase9_state, phase10_state,
phase11_state, phase12_state]
# Ensure all phases have exactly signal_count length
for i, phase in enumerate(phases):
if len(phase) != signal_count:
# Pad or truncate to match signal_count
if len(phase) < signal_count:
phase = phase + 'r' * (signal_count - len(phase))
else:
phase = phase[:signal_count]
phases[i] = phase
return {
'type': 'static',
'programID': '1',
'offset': str(offset),
'phases': [
{'duration': green_duration, 'state': phases[0]},
{'duration': yellow_duration, 'state': phases[1]},
{'duration': red_buffer_duration, 'state': phases[2]},
{'duration': green_duration, 'state': phases[3]},
{'duration': yellow_duration, 'state': phases[4]},
{'duration': red_buffer_duration, 'state': phases[5]},
{'duration': green_duration, 'state': phases[6]},
{'duration': yellow_duration, 'state': phases[7]},
{'duration': red_buffer_duration, 'state': phases[8]},
{'duration': green_duration, 'state': phases[9]},
{'duration': yellow_duration, 'state': phases[10]},
{'duration': red_buffer_duration, 'state': phases[11]},
]
}
def fix_traffic_synchronization_for_city(city_dir):
"""Fix traffic light synchronization for a specific city"""
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Network file not found: {net_file}")
return None
print(f"\nProcessing {city_dir}...")
# Create desynchronized traffic lights
traffic_lights = {}
with gzip.open(net_file, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for tl in root.findall('tlLogic'):
tl_id = tl.get('id')
if tl_id is None:
continue
phases = tl.findall('phase')
if not phases:
continue
signal_count = len(phases[0].get('state'))
# Create desynchronized logic for this traffic light
traffic_lights[tl_id] = create_desynchronized_traffic_light_logic(signal_count, tl_id)
print(f" Created {len(traffic_lights)} desynchronized traffic lights")
# Generate statistics
offset_distribution = {}
duration_distribution = {}
for tl_id, tl_info in traffic_lights.items():
offset = int(tl_info['offset'])
if offset not in offset_distribution:
offset_distribution[offset] = 0
offset_distribution[offset] += 1
# Check first green phase duration
green_duration = tl_info['phases'][0]['duration']
if green_duration not in duration_distribution:
duration_distribution[green_duration] = 0
duration_distribution[green_duration] += 1
print(f" Offset distribution (0-300s): {len(offset_distribution)} different offsets")
print(f" Duration distribution: {len(duration_distribution)} different green durations")
return traffic_lights
def generate_desynchronized_traffic_lights_xml(traffic_lights, output_file):
"""Generate traffic_lights.add.xml file with desynchronized logic"""
root = ET.Element('additional')
for tl_id, tl_info in traffic_lights.items():
# Create tlLogic element
tl_elem = ET.SubElement(root, 'tlLogic')
tl_elem.set('id', tl_id)
tl_elem.set('type', tl_info['type'])
tl_elem.set('programID', tl_info['programID'])
tl_elem.set('offset', tl_info['offset'])
# Add phases
for phase in tl_info['phases']:
phase_elem = ET.SubElement(tl_elem, 'phase')
phase_elem.set('duration', str(phase['duration']))
phase_elem.set('state', phase['state'])
# Write to file
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += ET.tostring(root, encoding='unicode')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(xml_str)
return len(traffic_lights)
def main():
"""Main function"""
cities = ['miami', 'los_angeles', 'new_york']
print("Fixing Traffic Light Synchronization")
print("=" * 60)
print("Breaking city-wide synchronization:")
print("- Random offsets (0-300 seconds)")
print("- Varied green durations (25-35 seconds)")
print("- Varied yellow durations (2-4 seconds)")
print("- Varied red buffer durations (1-3 seconds)")
print("=" * 60)
for city in cities:
city_dir = os.path.join('.', city)
if not os.path.exists(city_dir):
print(f"Skipping {city}: directory not found")
continue
# Fix traffic synchronization for this city
traffic_lights = fix_traffic_synchronization_for_city(city_dir)
if traffic_lights is None:
continue
# Generate the desynchronized add file
output_file = os.path.join(city_dir, 'traffic_lights_desync.add.xml')
count = generate_desynchronized_traffic_lights_xml(traffic_lights, output_file)
print(f" Generated {output_file} with {count} traffic lights")
# Show example
if traffic_lights:
first_id = list(traffic_lights.keys())[0]
first_tl = traffic_lights[first_id]
signal_count = len(first_tl['phases'][0]['state'])
print(f" Example: {first_id} - {signal_count} signals, {len(first_tl['phases'])} phases")
print(f" Offset: {first_tl['offset']}s")
print(f" Green duration: {first_tl['phases'][0]['duration']}s")
print(f" Yellow duration: {first_tl['phases'][1]['duration']}s")
print(f" Phase 1: {first_tl['phases'][0]['state']}")
if len(first_tl['phases']) > 3:
print(f" Phase 4: {first_tl['phases'][3]['state']}")
print("\n" + "=" * 60)
print("Summary:")
print("- Fixed traffic light synchronization across all cities")
print("- Added random offsets to break city-wide coordination")
print("- Varied phase durations for realistic behavior")
print("- Generated traffic_lights_desync.add.xml files")
print("- Ready to update SUMO configurations")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/build.py | SUMOxPyPSA/build.py | #!/usr/bin/env python3
"""
Unified build script for SUMO network generation
"""
import os
import sys
import subprocess
from sumo_config import SUMO_COMMON_CONFIG, CITY_CONFIGS
def run_command(cmd, cwd=None):
"""Run a shell command and print its output"""
print(f"Running: {' '.join(cmd)}")
process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
for line in process.stdout:
print(line, end='')
process.wait()
return process.returncode
def build_city(city):
"""Build SUMO network for a specific city"""
if city not in CITY_CONFIGS:
print(f"Unknown city: {city}")
return False
city_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), city)
if not os.path.exists(city_dir):
print(f"City directory not found: {city_dir}")
return False
# Run netconvert
netconvert_cmd = [
"netconvert",
"--osm-files", CITY_CONFIGS[city]['netccfg']['input']['osm-files'],
"--output-file", CITY_CONFIGS[city]['netccfg']['output']['output-file'],
"--type-files", CITY_CONFIGS[city]['netccfg']['input'].get('type-files', ''),
"--lefthand" if CITY_CONFIGS[city]['netccfg']['processing'].get('lefthand') == 'true' else "",
"--keep-edges.by-vclass", CITY_CONFIGS[city]['netccfg']['edge_removal'].get('keep-edges.by-vclass', ''),
"--remove-edges.by-vclass", CITY_CONFIGS[city]['netccfg']['edge_removal'].get('remove-edges.by-vclass', ''),
]
# Filter out empty arguments
netconvert_cmd = [arg for arg in netconvert_cmd if arg]
if run_command(netconvert_cmd, cwd=city_dir) != 0:
print(f"Failed to build network for {city}")
return False
print(f"Successfully built network for {city}")
return True
def main():
"""Main function"""
if len(sys.argv) != 2:
print("Usage: python build.py [manchester|newyork]")
return 1
city = sys.argv[1].lower()
if build_city(city):
return 0
return 1
if __name__ == "__main__":
sys.exit(main()) | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/desynchronize_traffic_lights.py | SUMOxPyPSA/desynchronize_traffic_lights.py | #!/usr/bin/env python3
"""
Script to desynchronize traffic lights by adding random phase offsets
"""
import gzip
import xml.etree.ElementTree as ET
import os
import random
def extract_and_desynchronize_traffic_lights(netfile):
"""Extract traffic lights and add random offsets to desynchronize them"""
traffic_lights = {}
with gzip.open(netfile, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for tl in root.findall('tlLogic'):
tl_id = tl.get('id')
if tl_id is None:
continue
phases = []
for phase in tl.findall('phase'):
duration = phase.get('duration')
state = phase.get('state')
if duration and state:
phases.append({
'duration': int(duration),
'state': state
})
if phases:
# Add random offset to desynchronize this traffic light
offset = random.randint(0, 40) # Random offset between 0-40 seconds
traffic_lights[tl_id] = {
'type': tl.get('type', 'static'),
'programID': tl.get('programID', '0'),
'offset': offset,
'phases': phases
}
return traffic_lights
def fix_traffic_light_phases(phases):
"""Fix traffic light phases to ensure proper green-yellow-red-green cycling"""
if not phases:
return phases
fixed_phases = []
state_length = len(phases[0]['state'])
for i, phase in enumerate(phases):
# Add the original phase
fixed_phases.append(phase)
# Check if this is a yellow phase and next phase is green
if 'y' in phase['state'].lower() and i < len(phases) - 1:
next_state = phases[i + 1]['state']
if 'g' in next_state.lower():
# Insert all-red phase between yellow and green
all_red_state = 'r' * state_length
fixed_phases.append({
'duration': 2, # 2 seconds all-red
'state': all_red_state
})
return fixed_phases
def create_opposing_phases_for_intersection(phases):
"""Create opposing phases for traffic lights at the same intersection"""
if not phases:
return phases
state_length = len(phases[0]['state'])
# Create two different phase patterns for opposing directions
# Pattern 1: First half green, second half red
# Pattern 2: First half red, second half green (opposite)
# Split the state into two halves
half_length = state_length // 2
# Create opposing phases
opposing_phases = []
# Phase 1: North/South green, East/West red
phase1_state = 'G' * half_length + 'r' * (state_length - half_length)
opposing_phases.append({'duration': 15, 'state': phase1_state})
# Phase 2: North/South yellow, East/West red
phase2_state = 'y' * half_length + 'r' * (state_length - half_length)
opposing_phases.append({'duration': 3, 'state': phase2_state})
# Phase 3: All red
phase3_state = 'r' * state_length
opposing_phases.append({'duration': 2, 'state': phase3_state})
# Phase 4: North/South red, East/West green
phase4_state = 'r' * half_length + 'G' * (state_length - half_length)
opposing_phases.append({'duration': 15, 'state': phase4_state})
# Phase 5: North/South red, East/West yellow
phase5_state = 'r' * half_length + 'y' * (state_length - half_length)
opposing_phases.append({'duration': 3, 'state': phase5_state})
# Phase 6: All red
opposing_phases.append({'duration': 2, 'state': phase3_state})
return opposing_phases
def generate_desynchronized_traffic_lights_xml(traffic_lights, output_file, use_opposing=False):
"""Generate traffic_lights.add.xml with desynchronized logic"""
root = ET.Element('additional')
for tl_id, tl_info in traffic_lights.items():
# Create tlLogic element
tl_elem = ET.SubElement(root, 'tlLogic')
tl_elem.set('id', tl_id)
tl_elem.set('type', tl_info['type'])
tl_elem.set('programID', tl_info['programID'])
tl_elem.set('offset', str(tl_info['offset']))
# Choose phases based on whether we want opposing logic
if use_opposing:
phases = create_opposing_phases_for_intersection(tl_info['phases'])
else:
phases = fix_traffic_light_phases(tl_info['phases'])
# Add phases
for phase in phases:
phase_elem = ET.SubElement(tl_elem, 'phase')
phase_elem.set('duration', str(phase['duration']))
phase_elem.set('state', phase['state'])
# Write to file
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += ET.tostring(root, encoding='unicode')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(xml_str)
return len(traffic_lights)
def main():
"""Main function"""
current_dir = os.getcwd()
print(f"Current working directory: {current_dir}")
cities = ['miami', 'los_angeles', 'new_york']
print("Traffic Light Desynchronization Generator")
print("=" * 50)
for city in cities:
city_dir = os.path.join(current_dir, city)
print(f"Checking directory: {city_dir}")
if not os.path.exists(city_dir):
print(f"Skipping {city}: directory not found at {city_dir}")
continue
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Skipping {city}: network file not found at {net_file}")
continue
print(f"\nProcessing {city}...")
# Extract and desynchronize traffic lights
traffic_lights = extract_and_desynchronize_traffic_lights(net_file)
if not traffic_lights:
print(f" No traffic lights found in {city}")
continue
print(f" Found {len(traffic_lights)} traffic lights")
# Show first few IDs as examples
ids_list = list(traffic_lights.keys())
print(f" Example IDs: {ids_list[:5]}")
# Generate desynchronized add file
output_file = os.path.join(city_dir, 'traffic_lights_desync.add.xml')
count = generate_desynchronized_traffic_lights_xml(traffic_lights, output_file, use_opposing=False)
print(f" Generated {output_file} with {count} desynchronized traffic lights")
# Generate opposing add file
output_file_opposing = os.path.join(city_dir, 'traffic_lights_opposing.add.xml')
count_opposing = generate_desynchronized_traffic_lights_xml(traffic_lights, output_file_opposing, use_opposing=True)
print(f" Generated {output_file_opposing} with {count_opposing} opposing traffic lights")
# Show example offsets
if traffic_lights:
first_id = ids_list[0]
first_tl = traffic_lights[first_id]
print(f" Example: {first_id} - offset: {first_tl['offset']}s")
print("\n" + "=" * 50)
print("Summary:")
print("- Generated desynchronized traffic light logic")
print("- Added random phase offsets to prevent synchronization")
print("- Created traffic_lights_desync.add.xml (with offsets)")
print("- Created traffic_lights_opposing.add.xml (with opposing phases)")
print("- Use these files to replace the synchronized ones")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/generate_traffic_lights.py | SUMOxPyPSA/generate_traffic_lights.py | #!/usr/bin/env python3
"""
Script to extract all traffic light IDs from network file and generate matching traffic_lights.add.xml
"""
import gzip
import xml.etree.ElementTree as ET
import os
def extract_traffic_light_info(netfile):
"""Extract all traffic light IDs and their current phases from network file"""
traffic_lights = {}
with gzip.open(netfile, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for tl in root.findall('tlLogic'):
tl_id = tl.get('id')
if tl_id is None:
continue
phases = []
for phase in tl.findall('phase'):
duration = phase.get('duration')
state = phase.get('state')
if duration and state:
phases.append({
'duration': int(duration),
'state': state
})
if phases:
traffic_lights[tl_id] = {
'type': tl.get('type', 'static'),
'programID': tl.get('programID', '0'),
'offset': tl.get('offset', '0'),
'phases': phases
}
return traffic_lights
def fix_traffic_light_phases(phases):
"""Fix traffic light phases to ensure proper green-yellow-red-green cycling"""
if not phases:
return phases
fixed_phases = []
state_length = len(phases[0]['state'])
for i, phase in enumerate(phases):
# Add the original phase
fixed_phases.append(phase)
# Check if this is a yellow phase and next phase is green
if 'y' in phase['state'].lower() and i < len(phases) - 1:
next_state = phases[i + 1]['state']
if 'g' in next_state.lower():
# Insert all-red phase between yellow and green
all_red_state = 'r' * state_length
fixed_phases.append({
'duration': 2, # 2 seconds all-red
'state': all_red_state
})
return fixed_phases
def generate_traffic_lights_add_xml(traffic_lights, output_file):
"""Generate traffic_lights.add.xml file with fixed traffic light logic"""
# Create XML structure
root = ET.Element('additional')
for tl_id, tl_info in traffic_lights.items():
# Create tlLogic element
tl_elem = ET.SubElement(root, 'tlLogic')
tl_elem.set('id', tl_id)
tl_elem.set('type', tl_info['type'])
tl_elem.set('programID', tl_info['programID'])
tl_elem.set('offset', tl_info['offset'])
# Fix the phases
fixed_phases = fix_traffic_light_phases(tl_info['phases'])
# Add phases
for phase in fixed_phases:
phase_elem = ET.SubElement(tl_elem, 'phase')
phase_elem.set('duration', str(phase['duration']))
phase_elem.set('state', phase['state'])
# Write to file
tree = ET.ElementTree(root)
# Add XML declaration
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += ET.tostring(root, encoding='unicode')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(xml_str)
return len(traffic_lights)
def main():
"""Main function"""
# Get the current working directory
current_dir = os.getcwd()
print(f"Current working directory: {current_dir}")
cities = ['miami', 'los_angeles', 'new_york']
print("Traffic Light ID Extractor and Generator")
print("=" * 50)
for city in cities:
city_dir = os.path.join(current_dir, city)
print(f"Checking directory: {city_dir}")
if not os.path.exists(city_dir):
print(f"Skipping {city}: directory not found at {city_dir}")
continue
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Skipping {city}: network file not found at {net_file}")
continue
print(f"\nProcessing {city}...")
# Extract traffic light information
traffic_lights = extract_traffic_light_info(net_file)
if not traffic_lights:
print(f" No traffic lights found in {city}")
continue
print(f" Found {len(traffic_lights)} traffic lights")
# Show first few IDs as examples
ids_list = list(traffic_lights.keys())
print(f" Example IDs: {ids_list[:5]}")
# Generate the add file
output_file = os.path.join(city_dir, 'traffic_lights.add.xml')
count = generate_traffic_lights_add_xml(traffic_lights, output_file)
print(f" Generated {output_file} with {count} traffic lights")
# Show phase information for first traffic light
if traffic_lights:
first_id = ids_list[0]
first_tl = traffic_lights[first_id]
original_phases = len(first_tl['phases'])
fixed_phases = len(fix_traffic_light_phases(first_tl['phases']))
print(f" Example: {first_id} - {original_phases} phases -> {fixed_phases} phases")
print("\n" + "=" * 50)
print("Summary:")
print("- Generated traffic_lights.add.xml files for each city")
print("- Fixed traffic light phases to ensure proper green-yellow-red-green cycling")
print("- Added all-red safety phases between direction changes")
print("- All traffic light IDs now match the network files")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/modify_traffic_lights.py | SUMOxPyPSA/modify_traffic_lights.py | #!/usr/bin/env python3
"""
Script to modify existing traffic light logic to separate straight and left-turn signals
"""
import gzip
import xml.etree.ElementTree as ET
import os
import copy
def analyze_traffic_light_structure(netfile):
"""Analyze current traffic light structure"""
traffic_lights = {}
connections = {}
with gzip.open(netfile, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
# Get all traffic lights
for tl in root.findall('tlLogic'):
tl_id = tl.get('id')
phases = []
for phase in tl.findall('phase'):
phases.append({
'duration': int(phase.get('duration')),
'state': phase.get('state')
})
traffic_lights[tl_id] = {
'type': tl.get('type', 'static'),
'programID': tl.get('programID', '0'),
'offset': tl.get('offset', '0'),
'phases': phases
}
# Get all connections
for conn in root.findall('connection'):
tl_id = conn.get('tl')
if tl_id:
if tl_id not in connections:
connections[tl_id] = []
connections[tl_id].append({
'from': conn.get('from'),
'to': conn.get('to'),
'fromLane': conn.get('fromLane'),
'toLane': conn.get('toLane'),
'dir': conn.get('dir', 's'), # Default to straight
'linkIndex': conn.get('linkIndex')
})
return traffic_lights, connections
def separate_straight_and_left_signals(traffic_lights, connections):
"""Separate straight and left-turn signals for each traffic light"""
modified_tls = {}
for tl_id, tl_info in traffic_lights.items():
if tl_id not in connections:
continue
conns = connections[tl_id]
if not conns:
continue
# Group connections by direction
straight_conns = [c for c in conns if c['dir'] in ['s', 't']] # straight/through
left_conns = [c for c in conns if c['dir'] == 'l'] # left turn
right_conns = [c for c in conns if c['dir'] == 'r'] # right turn (usually always green)
# Get the original state length
original_state_length = len(tl_info['phases'][0]['state'])
# Create new phases with the same number of signal groups
new_phases = []
for phase in tl_info['phases']:
original_state = phase['state']
# Create new state with the same length but reorganized logic
new_state = list(original_state)
# Determine which movements should be green in this phase
has_straight = 'G' in original_state[:original_state_length//3]
has_left = 'G' in original_state[original_state_length//3:2*original_state_length//3]
has_right = 'G' in original_state[2*original_state_length//3:]
# Reorganize the signal groups:
# First third: straight movements
# Second third: left turn movements
# Last third: right turn movements (usually always green)
# Set straight signals (first third)
for i in range(original_state_length // 3):
if has_straight:
new_state[i] = 'G'
else:
new_state[i] = 'r'
# Set left turn signals (second third)
for i in range(original_state_length // 3, 2 * original_state_length // 3):
if has_left:
new_state[i] = 'G'
else:
new_state[i] = 'r'
# Set right turn signals (last third) - usually always green
for i in range(2 * original_state_length // 3, original_state_length):
new_state[i] = 'G' # Right turns are usually always allowed
new_phases.append({
'duration': phase['duration'],
'state': ''.join(new_state)
})
modified_tls[tl_id] = {
'type': tl_info['type'],
'programID': '1', # Use programID '1' to avoid conflicts
'offset': tl_info['offset'],
'phases': new_phases,
'straight_connections': straight_conns,
'left_connections': left_conns,
'right_connections': right_conns
}
return modified_tls
def update_connections_for_separate_signals(connections, modified_tls):
"""Update connections to use separate signal groups"""
updated_connections = []
for tl_id, tl_info in modified_tls.items():
if tl_id not in connections:
continue
# Update straight connections to use signal group 0
for conn in tl_info['straight_connections']:
conn_copy = conn.copy()
conn_copy['linkIndex'] = '0' # Signal group 0 for straight
updated_connections.append(conn_copy)
# Update left turn connections to use signal group 1
for conn in tl_info['left_connections']:
conn_copy = conn.copy()
conn_copy['linkIndex'] = '1' # Signal group 1 for left
updated_connections.append(conn_copy)
# Update right turn connections to use signal group 2
for conn in tl_info['right_connections']:
conn_copy = conn.copy()
conn_copy['linkIndex'] = '2' # Signal group 2 for right
updated_connections.append(conn_copy)
return updated_connections
def generate_modified_traffic_lights_xml(modified_tls, output_file):
"""Generate traffic_lights.add.xml with separated signals"""
root = ET.Element('additional')
for tl_id, tl_info in modified_tls.items():
# Create tlLogic element
tl_elem = ET.SubElement(root, 'tlLogic')
tl_elem.set('id', tl_id)
tl_elem.set('type', tl_info['type'])
tl_elem.set('programID', tl_info['programID'])
tl_elem.set('offset', str(tl_info['offset']))
# Add phases
for phase in tl_info['phases']:
phase_elem = ET.SubElement(tl_elem, 'phase')
phase_elem.set('duration', str(phase['duration']))
phase_elem.set('state', phase['state'])
# Write to file
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += ET.tostring(root, encoding='unicode')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(xml_str)
return len(modified_tls)
def main():
"""Main function"""
cities = ['miami', 'los_angeles', 'new_york']
print("Traffic Light Signal Separation")
print("=" * 50)
for city in cities:
city_dir = os.path.join('.', city)
if not os.path.exists(city_dir):
print(f"Skipping {city}: directory not found")
continue
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Skipping {city}: network file not found")
continue
print(f"\nProcessing {city}...")
# Analyze current structure
traffic_lights, connections = analyze_traffic_light_structure(net_file)
print(f" Found {len(traffic_lights)} traffic lights")
print(f" Found {len(connections)} traffic lights with connections")
# Separate signals
modified_tls = separate_straight_and_left_signals(traffic_lights, connections)
print(f" Modified {len(modified_tls)} traffic lights")
# Generate new traffic lights file
output_file = os.path.join(city_dir, 'traffic_lights_separated.add.xml')
count = generate_modified_traffic_lights_xml(modified_tls, output_file)
print(f" Generated {output_file} with {count} traffic lights")
# Show example
if modified_tls:
first_id = list(modified_tls.keys())[0]
first_tl = modified_tls[first_id]
print(f" Example: {first_id} - {len(first_tl['phases'])} phases")
print(f" Signal groups: [straight, left, right]")
print(f" Straight connections: {len(first_tl['straight_connections'])}")
print(f" Left connections: {len(first_tl['left_connections'])}")
print(f" Right connections: {len(first_tl['right_connections'])}")
print("\n" + "=" * 50)
print("Summary:")
print("- Separated straight and left-turn signals")
print("- Each direction now has at most 2 lights (straight + left)")
print("- Right turns are always green (signal group 2)")
print("- Vehicles will only react to their relevant signal")
print("- Generated traffic_lights_separated.add.xml files")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/randomize_traffic_lights.py | SUMOxPyPSA/randomize_traffic_lights.py | #!/usr/bin/env python3
"""
Script to randomize traffic light timing to break synchronization
"""
import gzip
import xml.etree.ElementTree as ET
import os
import random
def randomize_traffic_lights(netfile):
"""Extract traffic lights and randomize their timing"""
traffic_lights = {}
with gzip.open(netfile, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for tl in root.findall('tlLogic'):
tl_id = tl.get('id')
if tl_id is None:
continue
phases = []
for phase in tl.findall('phase'):
duration = phase.get('duration')
state = phase.get('state')
if duration and state:
# Add slight randomization to phase duration (±2 seconds)
original_duration = int(duration)
randomized_duration = max(1, original_duration + random.randint(-2, 2))
phases.append({
'duration': randomized_duration,
'state': state
})
if phases:
# Add random offset (0-60 seconds) to break synchronization
offset = random.randint(0, 60)
# Better randomization of initial state
# Instead of random phase shift, we'll create a more realistic distribution
# 40% start with red, 30% start with yellow, 30% start with green
rand_val = random.random()
if rand_val < 0.4: # 40% start with red
# Find a red phase or create one
red_phases = [i for i, p in enumerate(phases) if 'r' in p['state'].lower() and 'g' not in p['state'].lower()]
if red_phases:
phase_shift = random.choice(red_phases)
else:
# If no red phase, start at a random phase
phase_shift = random.randint(0, len(phases) - 1)
elif rand_val < 0.7: # 30% start with yellow
# Find a yellow phase
yellow_phases = [i for i, p in enumerate(phases) if 'y' in p['state'].lower()]
if yellow_phases:
phase_shift = random.choice(yellow_phases)
else:
# If no yellow phase, start at a random phase
phase_shift = random.randint(0, len(phases) - 1)
else: # 30% start with green
# Find a green phase
green_phases = [i for i, p in enumerate(phases) if 'g' in p['state'].lower()]
if green_phases:
phase_shift = random.choice(green_phases)
else:
# If no green phase, start at a random phase
phase_shift = random.randint(0, len(phases) - 1)
# Apply the phase shift
shifted_phases = phases[phase_shift:] + phases[:phase_shift]
traffic_lights[tl_id] = {
'type': tl.get('type', 'static'),
'programID': tl.get('programID', '0'),
'offset': offset,
'phases': shifted_phases,
'initial_state': shifted_phases[0]['state'] if shifted_phases else 'unknown'
}
return traffic_lights
def fix_traffic_light_phases(phases):
"""Fix traffic light phases to ensure proper green-yellow-red-green cycling"""
if not phases:
return phases
fixed_phases = []
state_length = len(phases[0]['state'])
for i, phase in enumerate(phases):
# Add the original phase
fixed_phases.append(phase)
# Check if this is a yellow phase and next phase is green
if 'y' in phase['state'].lower() and i < len(phases) - 1:
next_state = phases[i + 1]['state']
if 'g' in next_state.lower():
# Insert all-red phase between yellow and green
all_red_state = 'r' * state_length
fixed_phases.append({
'duration': 2, # 2 seconds all-red
'state': all_red_state
})
return fixed_phases
def generate_randomized_traffic_lights_xml(traffic_lights, output_file):
"""Generate traffic_lights.add.xml with randomized timing"""
root = ET.Element('additional')
for tl_id, tl_info in traffic_lights.items():
# Create tlLogic element
tl_elem = ET.SubElement(root, 'tlLogic')
tl_elem.set('id', tl_id)
tl_elem.set('type', tl_info['type'])
tl_elem.set('programID', '1') # Use programID '1' to avoid conflicts with existing logic
tl_elem.set('offset', str(tl_info['offset']))
# Fix the phases and add all-red safety phases
fixed_phases = fix_traffic_light_phases(tl_info['phases'])
# Add phases
for phase in fixed_phases:
phase_elem = ET.SubElement(tl_elem, 'phase')
phase_elem.set('duration', str(phase['duration']))
phase_elem.set('state', phase['state'])
# Write to file
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += ET.tostring(root, encoding='unicode')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(xml_str)
return len(traffic_lights)
def main():
"""Main function"""
current_dir = os.getcwd()
print(f"Current working directory: {current_dir}")
cities = ['miami', 'los_angeles', 'new_york']
print("Traffic Light Randomization Generator")
print("=" * 50)
for city in cities:
city_dir = os.path.join(current_dir, city)
print(f"Checking directory: {city_dir}")
if not os.path.exists(city_dir):
print(f"Skipping {city}: directory not found at {city_dir}")
continue
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Skipping {city}: network file not found at {net_file}")
continue
print(f"\nProcessing {city}...")
# Randomize traffic lights
traffic_lights = randomize_traffic_lights(net_file)
if not traffic_lights:
print(f" No traffic lights found in {city}")
continue
print(f" Found {len(traffic_lights)} traffic lights")
# Show first few IDs as examples
ids_list = list(traffic_lights.keys())
print(f" Example IDs: {ids_list[:5]}")
# Generate randomized add file
output_file = os.path.join(city_dir, 'traffic_lights_randomized.add.xml')
count = generate_randomized_traffic_lights_xml(traffic_lights, output_file)
print(f" Generated {output_file} with {count} randomized traffic lights")
# Show example randomization
if traffic_lights:
first_id = ids_list[0]
first_tl = traffic_lights[first_id]
print(f" Example: {first_id} - offset: {first_tl['offset']}s, phases: {len(first_tl['phases'])}")
# Show phase durations
durations = [phase['duration'] for phase in first_tl['phases']]
print(f" Phase durations: {durations}")
# Show initial state distribution
red_count = sum(1 for tl in traffic_lights.values() if 'r' in tl['initial_state'].lower() and 'g' not in tl['initial_state'].lower())
yellow_count = sum(1 for tl in traffic_lights.values() if 'y' in tl['initial_state'].lower())
green_count = sum(1 for tl in traffic_lights.values() if 'g' in tl['initial_state'].lower())
total = len(traffic_lights)
print(f" Initial state distribution:")
print(f" Red: {red_count}/{total} ({red_count/total*100:.1f}%)")
print(f" Yellow: {yellow_count}/{total} ({yellow_count/total*100:.1f}%)")
print(f" Green: {green_count}/{total} ({green_count/total*100:.1f}%)")
print("\n" + "=" * 50)
print("Summary:")
print("- Added random offsets (0-60s) to break synchronization")
print("- Randomized initial states by shifting phases")
print("- Varied phase durations slightly (±2s)")
print("- Added all-red safety phases")
print("- Created traffic_lights_randomized.add.xml files")
print("- Use these files to replace the synchronized ones")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/regenerate_networks.py | SUMOxPyPSA/regenerate_networks.py | #!/usr/bin/env python3
"""
Script to regenerate SUMO networks with updated traffic light configuration
"""
import os
import subprocess
import sys
# Get SUMO binary path from config
try:
from config import SUMO_PATH
NETCONVERT_BINARY = os.path.join(SUMO_PATH, "bin/netconvert")
except ImportError:
print("Error: Could not import SUMO_PATH from config.py")
sys.exit(1)
def regenerate_network(city_dir):
"""Regenerate the network for a specific city"""
print(f"Regenerating network for {city_dir}...")
# Change to the city directory
original_dir = os.getcwd()
os.chdir(city_dir)
try:
# Check if netccfg file exists
netccfg_file = "osm.netccfg"
if not os.path.exists(netccfg_file):
print(f"Warning: {netccfg_file} not found in {city_dir}")
return False
# Run netconvert
cmd = [NETCONVERT_BINARY, "-c", netccfg_file]
print(f"Running: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode == 0:
print(f"Successfully regenerated network for {city_dir}")
return True
else:
print(f"Error regenerating network for {city_dir}:")
print(result.stderr)
return False
except Exception as e:
print(f"Exception while regenerating network for {city_dir}: {e}")
return False
finally:
os.chdir(original_dir)
def main():
"""Main function to regenerate all networks"""
cities = ["los_angeles", "miami"]
print("Regenerating SUMO networks with updated traffic light configuration...")
print(f"Using netconvert: {NETCONVERT_BINARY}")
success_count = 0
for city in cities:
if regenerate_network(city):
success_count += 1
print(f"\nRegeneration complete: {success_count}/{len(cities)} cities successful")
if success_count == len(cities):
print("All networks regenerated successfully!")
print("The traffic lights should now follow proper green → yellow → red → green cycling.")
else:
print("Some networks failed to regenerate. Check the error messages above.")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/config.py | SUMOxPyPSA/config.py | import os
# SUMO Configuration
# Modify these paths according to your system
SUMO_PATH = "/usr/share/sumo" # Default for Linux
# Alternative paths for different systems:
# Windows: "C:\\Program Files (x86)\\Eclipse\\Sumo"
# macOS: "/opt/homebrew/Cellar/sumo/1.20.0/share/sumo"
# Web Server Configuration
HOST = "0.0.0.0" # Allow external connections
PORT = 8080 # Web server port
# Simulation Configuration
SIMULATION_SPEED = 0.025 # Reduced for smoother movement
UPDATE_FREQUENCY = 2 # Update every 2 frames for smoother movement
# City paths are relative to the config file location
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
NYC_PATH = os.path.join(BASE_DIR, "new_york")
MIAMI_PATH = os.path.join(BASE_DIR, "miami")
LA_PATH = os.path.join(BASE_DIR, "los_angeles")
# City configurations
CITY_CONFIGS = {
"newyork": {
"cfg_file": os.path.join(NYC_PATH, "osm.sumocfg"),
"name": "New York, USA",
"working_dir": NYC_PATH
},
"miami": {
"cfg_file": os.path.join(MIAMI_PATH, "osm.sumocfg"),
"name": "Miami, USA",
"working_dir": MIAMI_PATH
},
"losangeles": {
"cfg_file": os.path.join(LA_PATH, "osm.sumocfg"),
"name": "Los Angeles, USA",
"working_dir": LA_PATH
}
}
# Default city
DEFAULT_CITY = "losangeles" | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/fix_traffic_lights.py | SUMOxPyPSA/fix_traffic_lights.py | #!/usr/bin/env python3
"""
Script to fix traffic light logic by adding all-red phases and ensuring proper cycling
"""
import gzip
import xml.etree.ElementTree as ET
import re
import os
def fix_traffic_light_logic(input_file, output_file):
"""
Fix traffic light logic by adding all-red phases between direction changes
"""
print(f"Processing {input_file}...")
# Read the network file
with gzip.open(input_file, 'rt', encoding='utf-8') as f:
content = f.read()
# Parse XML
root = ET.fromstring(content)
# Find all tlLogic elements
tllogics = root.findall('tlLogic')
print(f"Found {len(tllogics)} traffic light logics")
fixed_count = 0
for tl in tllogics:
tl_id = tl.get('id')
print(f"\nProcessing traffic light: {tl_id}")
phases = tl.findall('phase')
if len(phases) < 2:
print(f" Skipping {tl_id}: insufficient phases")
continue
# Get the state length from the first phase
first_state = phases[0].get('state')
state_length = len(first_state)
print(f" State length: {state_length}")
# Create new phases with all-red phases inserted
new_phases = []
for i, phase in enumerate(phases):
duration = int(phase.get('duration'))
state = phase.get('state')
# Add the original phase
new_phases.append({
'duration': duration,
'state': state
})
# Check if this is a yellow phase and next phase is green
if 'y' in state.lower() and i < len(phases) - 1:
next_state = phases[i + 1].get('state')
if 'g' in next_state.lower():
# Insert all-red phase between yellow and green
all_red_state = 'r' * state_length
new_phases.append({
'duration': 2, # 2 seconds all-red
'state': all_red_state
})
print(f" Added all-red phase after phase {i+1}")
# Replace the phases in the XML
tl.clear()
for attr, value in tl.attrib.items():
tl.set(attr, value)
for phase_data in new_phases:
phase_elem = ET.SubElement(tl, 'phase')
phase_elem.set('duration', str(phase_data['duration']))
phase_elem.set('state', phase_data['state'])
fixed_count += 1
print(f" Fixed {tl_id}: {len(phases)} phases -> {len(new_phases)} phases")
# Write the fixed network file
print(f"\nWriting fixed network to {output_file}...")
# Create the XML tree
tree = ET.ElementTree(root)
# Write to gzipped file
with gzip.open(output_file, 'wt', encoding='utf-8') as f:
# Write XML declaration
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
# Convert to string and write
xml_str = ET.tostring(root, encoding='unicode')
f.write(xml_str)
print(f"Fixed {fixed_count} traffic light logics")
print(f"Output written to: {output_file}")
def create_fixed_add_file(city_dir):
"""
Create a fixed traffic_lights.add.xml file with proper phase cycling
"""
add_file = os.path.join(city_dir, 'traffic_lights.add.xml')
fixed_add_file = os.path.join(city_dir, 'traffic_lights_fixed.add.xml')
print(f"Creating fixed traffic lights file: {fixed_add_file}")
# Create a template for a 4-way intersection with proper cycling
template = '''<?xml version="1.0" encoding="UTF-8"?>
<additional>
<!-- Template for 4-way intersection with proper green-yellow-red-green cycling -->
<tlLogic id="intersection_template" type="static" programID="0" offset="0">
<!-- Phase 1: North-South green, East-West red -->
<phase duration="15" state="GGGGrrrr"/>
<!-- Phase 2: North-South yellow, East-West red -->
<phase duration="3" state="yyyyrrrr"/>
<!-- Phase 3: All red (safety buffer) -->
<phase duration="2" state="rrrrrrrr"/>
<!-- Phase 4: East-West green, North-South red -->
<phase duration="15" state="rrrrGGGG"/>
<!-- Phase 5: East-West yellow, North-South red -->
<phase duration="3" state="rrrryyyy"/>
<!-- Phase 6: All red (safety buffer) -->
<phase duration="2" state="rrrrrrrr"/>
</tlLogic>
<!-- Template for complex intersection with more signal groups -->
<tlLogic id="complex_intersection_template" type="static" programID="0" offset="0">
<!-- Phase 1: Main directions green -->
<phase duration="15" state="GGGGGGGrrrrrrrrrrrrrrr"/>
<!-- Phase 2: Main directions yellow -->
<phase duration="3" state="yyyyyyyrrrrrrrrrrrrrrr"/>
<!-- Phase 3: All red -->
<phase duration="2" state="rrrrrrrrrrrrrrrrrrrrrrr"/>
<!-- Phase 4: Cross directions green -->
<phase duration="15" state="rrrrrrrGGGGGGGrrrrrrrr"/>
<!-- Phase 5: Cross directions yellow -->
<phase duration="3" state="rrrrrrryyyyyyyrrrrrrrr"/>
<!-- Phase 6: All red -->
<phase duration="2" state="rrrrrrrrrrrrrrrrrrrrrrr"/>
</tlLogic>
</additional>'''
with open(fixed_add_file, 'w') as f:
f.write(template)
print(f"Created template file: {fixed_add_file}")
print("You can use this as a reference for creating proper traffic light logic")
def main():
"""Main function"""
# Get the current working directory
current_dir = os.getcwd()
print(f"Current working directory: {current_dir}")
cities = ['miami', 'los_angeles', 'new_york']
print("Traffic Light Logic Fixer")
print("=" * 50)
for city in cities:
city_dir = os.path.join(current_dir, city)
print(f"Checking directory: {city_dir}")
if not os.path.exists(city_dir):
print(f"Skipping {city}: directory not found at {city_dir}")
continue
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Skipping {city}: network file not found at {net_file}")
continue
print(f"\nProcessing {city}...")
# Create backup
backup_file = os.path.join(city_dir, 'osm.net.xml.gz.backup')
if not os.path.exists(backup_file):
import shutil
shutil.copy2(net_file, backup_file)
print(f"Created backup: {backup_file}")
# Fix the network file
fixed_net_file = os.path.join(city_dir, 'osm.net.xml.gz.fixed')
fix_traffic_light_logic(net_file, fixed_net_file)
# Create template add file
create_fixed_add_file(city_dir)
print(f"\nFor {city}:")
print(f" - Original: {net_file}")
print(f" - Fixed: {fixed_net_file}")
print(f" - Backup: {backup_file}")
print(f" - Template: {city_dir}/traffic_lights_fixed.add.xml")
print("\n" + "=" * 50)
print("Next steps:")
print("1. Review the fixed network files")
print("2. If satisfied, replace the original files:")
print(" mv osm.net.xml.gz.fixed osm.net.xml.gz")
print("3. Use the template files as reference for custom traffic light logic")
print("4. Test the simulation to ensure proper traffic light cycling")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/fix_miami_traffic_lights.py | SUMOxPyPSA/fix_miami_traffic_lights.py | #!/usr/bin/env python3
"""
Script to fix Miami traffic lights with proper logic for different signal group counts
"""
import gzip
import xml.etree.ElementTree as ET
import os
def create_fixed_traffic_lights(netfile):
"""Create fixed traffic light logic that works for all signal group counts"""
traffic_lights = {}
with gzip.open(netfile, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for tl in root.findall('tlLogic'):
tl_id = tl.get('id')
if tl_id is None:
continue
# Get the original state length
phases = tl.findall('phase')
if not phases:
continue
original_state_length = len(phases[0].get('state'))
# Create logic based on the number of signal groups
if original_state_length == 2:
# For 2 signal groups: alternate between them
traffic_lights[tl_id] = {
'type': tl.get('type', 'static'),
'programID': '1',
'offset': '0',
'phases': [
{'duration': 30, 'state': 'Gr'}, # First signal green
{'duration': 3, 'state': 'yr'}, # First signal yellow
{'duration': 2, 'state': 'rr'}, # All red
{'duration': 30, 'state': 'rG'}, # Second signal green
{'duration': 3, 'state': 'ry'}, # Second signal yellow
{'duration': 2, 'state': 'rr'}, # All red
]
}
elif original_state_length == 3:
# For 3 signal groups: cycle through them
traffic_lights[tl_id] = {
'type': tl.get('type', 'static'),
'programID': '1',
'offset': '0',
'phases': [
{'duration': 30, 'state': 'Grr'}, # First signal green
{'duration': 3, 'state': 'yrr'}, # First signal yellow
{'duration': 2, 'state': 'rrr'}, # All red
{'duration': 30, 'state': 'rGr'}, # Second signal green
{'duration': 3, 'state': 'ryr'}, # Second signal yellow
{'duration': 2, 'state': 'rrr'}, # All red
{'duration': 30, 'state': 'rrG'}, # Third signal green
{'duration': 3, 'state': 'rry'}, # Third signal yellow
{'duration': 2, 'state': 'rrr'}, # All red
]
}
elif original_state_length == 4:
# For 4 signal groups: use opposing movement logic
traffic_lights[tl_id] = {
'type': tl.get('type', 'static'),
'programID': '1',
'offset': '0',
'phases': [
{'duration': 30, 'state': 'GGrr'}, # First two signals green (opposing)
{'duration': 3, 'state': 'yyrr'}, # First two signals yellow
{'duration': 2, 'state': 'rrrr'}, # All red
{'duration': 30, 'state': 'rrGG'}, # Last two signals green (opposing)
{'duration': 3, 'state': 'rryy'}, # Last two signals yellow
{'duration': 2, 'state': 'rrrr'}, # All red
]
}
else:
# For 5+ signal groups: use quarter-based logic but ensure at least one signal is green
quarter = max(1, original_state_length // 4)
# Phase 1: First quarter green
phase1_state = 'G' * quarter + 'r' * (original_state_length - quarter)
# Phase 2: First quarter yellow
phase2_state = 'y' * quarter + 'r' * (original_state_length - quarter)
# Phase 3: All red
phase3_state = 'r' * original_state_length
# Phase 4: Second quarter green
phase4_state = 'r' * quarter + 'G' * quarter + 'r' * (original_state_length - 2 * quarter)
# Phase 5: Second quarter yellow
phase5_state = 'r' * quarter + 'y' * quarter + 'r' * (original_state_length - 2 * quarter)
# Phase 6: All red
phase6_state = 'r' * original_state_length
# Phase 7: Third quarter green
phase7_state = 'r' * (2 * quarter) + 'G' * quarter + 'r' * (original_state_length - 3 * quarter)
# Phase 8: Third quarter yellow
phase8_state = 'r' * (2 * quarter) + 'y' * quarter + 'r' * (original_state_length - 3 * quarter)
# Phase 9: All red
phase9_state = 'r' * original_state_length
# Phase 10: Fourth quarter green
phase10_state = 'r' * (3 * quarter) + 'G' * (original_state_length - 3 * quarter)
# Phase 11: Fourth quarter yellow
phase11_state = 'r' * (3 * quarter) + 'y' * (original_state_length - 3 * quarter)
# Phase 12: All red
phase12_state = 'r' * original_state_length
traffic_lights[tl_id] = {
'type': tl.get('type', 'static'),
'programID': '1',
'offset': '0',
'phases': [
{'duration': 30, 'state': phase1_state},
{'duration': 3, 'state': phase2_state},
{'duration': 2, 'state': phase3_state},
{'duration': 30, 'state': phase4_state},
{'duration': 3, 'state': phase5_state},
{'duration': 2, 'state': phase6_state},
{'duration': 30, 'state': phase7_state},
{'duration': 3, 'state': phase8_state},
{'duration': 2, 'state': phase9_state},
{'duration': 30, 'state': phase10_state},
{'duration': 3, 'state': phase11_state},
{'duration': 2, 'state': phase12_state},
]
}
return traffic_lights
def generate_fixed_traffic_lights_xml(traffic_lights, output_file):
"""Generate traffic_lights.add.xml with fixed logic"""
root = ET.Element('additional')
for tl_id, tl_info in traffic_lights.items():
# Create tlLogic element
tl_elem = ET.SubElement(root, 'tlLogic')
tl_elem.set('id', tl_id)
tl_elem.set('type', tl_info['type'])
tl_elem.set('programID', tl_info['programID'])
tl_elem.set('offset', tl_info['offset'])
# Add phases
for phase in tl_info['phases']:
phase_elem = ET.SubElement(tl_elem, 'phase')
phase_elem.set('duration', str(phase['duration']))
phase_elem.set('state', phase['state'])
# Write to file
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += ET.tostring(root, encoding='unicode')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(xml_str)
return len(traffic_lights)
def main():
"""Main function"""
print("Fixing Miami Traffic Lights")
print("=" * 50)
print("Fixing issues with traffic lights that are always red")
print("=" * 50)
city_dir = '../miami'
net_file = os.path.join(city_dir, 'osm.net.xml.gz')
if not os.path.exists(net_file):
print(f"Network file not found: {net_file}")
return
print(f"Processing Miami...")
# Create fixed traffic lights
traffic_lights = create_fixed_traffic_lights(net_file)
print(f" Created {len(traffic_lights)} traffic lights")
# Generate the add file
output_file = os.path.join(city_dir, 'traffic_lights_fixed_miami.add.xml')
count = generate_fixed_traffic_lights_xml(traffic_lights, output_file)
print(f" Generated {output_file} with {count} traffic lights")
# Show examples for different signal group counts
examples = {}
for tl_id, tl_info in traffic_lights.items():
signal_count = len(tl_info['phases'][0]['state'])
if signal_count not in examples:
examples[signal_count] = tl_info
for signal_count, tl_info in sorted(examples.items()):
print(f" Example {signal_count} signals: {len(tl_info['phases'])} phases")
print(f" Phase durations: {[p['duration'] for p in tl_info['phases']]}")
print(f" Phase 1: {tl_info['phases'][0]['state']}")
if len(tl_info['phases']) > 3:
print(f" Phase 4: {tl_info['phases'][3]['state']}")
print("\n" + "=" * 50)
print("Summary:")
print("- Fixed traffic lights that were always red")
print("- Implemented proper logic for different signal group counts")
print("- Ensured at least one signal is green in each phase")
print("- Generated traffic_lights_fixed_miami.add.xml")
if __name__ == "__main__":
main() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/map_to_power.py | SUMOxPyPSA/map_to_power.py | import os
import subprocess
import pandas as pd
import sys
def find_python_executable():
"""Find the appropriate Python executable to use."""
# Try different possible Python executables
python_candidates = [
'/bin/python',
'/usr/bin/python',
'/usr/bin/python3',
'python3',
'python'
]
for python_exe in python_candidates:
try:
# Test if the executable exists and works
result = subprocess.run([python_exe, '--version'],
capture_output=True, text=True, timeout=5)
if result.returncode == 0:
print(f"Using Python executable: {python_exe}")
return python_exe
except (FileNotFoundError, subprocess.TimeoutExpired):
continue
# If none found, use sys.executable as fallback
print(f"Using fallback Python executable: {sys.executable}")
return sys.executable
def convert_osm_to_pypsa(osm_file, output_dir, gridkit_script='tools/gridkit.py'):
"""
Converts an OpenStreetMap file to a PyPSA network format using GridKit.
Args:
osm_file (str): Path to the input OSM file (e.g., 'new_york/osm_nyc_bbox.osm.xml.gz').
output_dir (str): Directory to save the PyPSA-compatible CSV files.
gridkit_script (str): Path to the GridKit main python script.
"""
if not os.path.exists(osm_file):
print(f"Error: OSM file not found at {osm_file}")
return
if not os.path.exists(gridkit_script):
print(f"Error: GridKit script not found at {gridkit_script}")
return
os.makedirs(output_dir, exist_ok=True)
# Find the appropriate Python executable
python_exe = find_python_executable()
# --- 1. Run GridKit to extract the power grid ---
print("Running GridKit to extract power grid from OSM data...")
# GridKit produces output files in its own directory
gridkit_dir = os.path.dirname(gridkit_script)
cmd = [python_exe, gridkit_script, osm_file]
# We must run gridkit from within its directory
run_dir = os.path.dirname(gridkit_script)
try:
# Note: We pass the absolute path to the osm_file now
abs_osm_file = os.path.abspath(osm_file)
# Add the 'util' directory to the python path for the subprocess
env = os.environ.copy()
util_path = os.path.join(run_dir, 'util')
env['PYTHONPATH'] = f"{util_path}:{env.get('PYTHONPATH', '')}"
# Set PostgreSQL environment variables to avoid interactive prompts
env['PGUSER'] = 'gridkit' # Use the gridkit user we created
env['PGHOST'] = 'localhost'
env['PGPORT'] = '5432'
env['PGDATABASE'] = 'gridkit'
env['PGPASSWORD'] = 'gridkit123' # Password for gridkit user
print(f"Running command: {python_exe} {os.path.basename(gridkit_script)} {abs_osm_file} --no-interactive")
print(f"Working directory: {run_dir}")
print(f"Environment variables: PGUSER={env['PGUSER']}, PGHOST={env['PGHOST']}, PGDATABASE={env['PGDATABASE']}")
result = subprocess.run([python_exe, os.path.basename(gridkit_script), abs_osm_file, '--no-interactive'],
cwd=run_dir, env=env, capture_output=True, text=True)
print(f"GridKit exit code: {result.returncode}")
if result.stdout:
print(f"GridKit stdout: {result.stdout}")
if result.stderr:
print(f"GridKit stderr: {result.stderr}")
if result.returncode != 0:
print(f"Error running GridKit: Command returned non-zero exit status {result.returncode}")
return
print("GridKit processing complete.")
except subprocess.CalledProcessError as e:
print(f"Error running GridKit: {e}")
print(f"GridKit stdout: {e.stdout}")
print(f"GridKit stderr: {e.stderr}")
return
except FileNotFoundError as e:
print(f"Error: Python executable not found: {e}")
return
except Exception as e:
print(f"Unexpected error: {e}")
return
# --- 2. Process GridKit's output ---
print("Converting GridKit output to PyPSA format...")
vertices_file = os.path.join(run_dir, 'gridkit-highvoltage-vertices.csv')
edges_file = os.path.join(run_dir, 'gridkit-highvoltage-edges.csv')
if not os.path.exists(vertices_file) or not os.path.exists(edges_file):
print("Error: GridKit did not produce the expected output files.")
return
# Read GridKit output
vertices_df = pd.read_csv(vertices_file)
edges_df = pd.read_csv(edges_file)
# --- 3. Create PyPSA buses.csv ---
# Map GridKit vertices to PyPSA buses
buses = pd.DataFrame()
buses['id'] = 'Bus ' + vertices_df['id'].astype(str)
buses['x'] = vertices_df['lon']
buses['y'] = vertices_df['lat']
buses['v_nom'] = vertices_df['voltage'].fillna(220) # Default voltage if not specified
buses['control'] = 'PV' # Assume all are PV buses for simplicity
# Save buses.csv
buses_path = os.path.join(output_dir, 'buses.csv')
buses.to_csv(buses_path, index=False)
print(f"Created buses.csv at {buses_path}")
# --- 4. Create PyPSA lines.csv ---
# Map GridKit edges to PyPSA lines
lines = pd.DataFrame()
lines['id'] = 'Line ' + edges_df.index.astype(str)
lines['bus0'] = 'Bus ' + edges_df['v0'].astype(str)
lines['bus1'] = 'Bus ' + edges_df['v1'].astype(str)
lines['x'] = 0.0001 # Placeholder for reactance
lines['r'] = 0.0001 # Placeholder for resistance
lines['s_nom'] = 1000 # Placeholder for nominal power (MVA)
# Save lines.csv
lines_path = os.path.join(output_dir, 'lines.csv')
lines.to_csv(lines_path, index=False)
print(f"Created lines.csv at {lines_path}")
print("\nPyPSA network files created successfully!")
if __name__ == '__main__':
# --- Configuration ---
# Note: This assumes you have the uncompressed OSM file.
osm_input_gz = 'new_york/osm_nyc_bbox.osm.xml.gz'
osm_input_uncompressed = 'new_york/osm_nyc_bbox.osm.xml'
pypsa_output_dir = 'pypsa_network/new_york'
# --- Run the conversion ---
if os.path.exists(osm_input_uncompressed):
convert_osm_to_pypsa(osm_input_uncompressed, pypsa_output_dir, gridkit_script='tools/gridkit.py')
else:
# Try to uncompress first
if os.path.exists(osm_input_gz):
import gzip
import shutil
print(f"Uncompressing {osm_input_gz}...")
with gzip.open(osm_input_gz, 'rb') as f_in:
with open(osm_input_uncompressed, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# Now run the conversion
convert_osm_to_pypsa(osm_input_uncompressed, pypsa_output_dir, gridkit_script='tools/gridkit.py')
else:
print(f"Error: Input OSM file not found: {osm_input_uncompressed} or {osm_input_gz}")
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/app.py | SUMOxPyPSA/app.py | from flask import Flask, render_template, send_from_directory
from flask_socketio import SocketIO, emit
import traci
import time
import threading
import os
import sys
import tempfile
from config import *
from sumo_config import SUMO_COMMON_CONFIG, CITY_CONFIGS as SUMO_CITY_CONFIGS
app = Flask(__name__, static_url_path='/static', static_folder='static')
app.config['SECRET_KEY'] = 'A34F6g7JK0c5N'
socketio = SocketIO(app, async_mode='threading')
# Get SUMO binary path from config
SUMO_BINARY = os.path.join(SUMO_PATH, "bin/sumo") # or "sumo-gui" for the GUI version
# Global variables
simulation_running = False
simulation_thread = None
stop_event = threading.Event()
CURRENT_CITY = DEFAULT_CITY
# Traffic light state tracking
traffic_light_states = {} # Store current state for each traffic light
traffic_light_phases = {} # Store phase information for each traffic light
def create_temp_sumocfg(city):
"""Create a temporary SUMO configuration file for the city"""
city_dir = CITY_CONFIGS[city]["working_dir"]
city_sumo_config = SUMO_CITY_CONFIGS[city.upper()]
# Create temporary file in the city directory
temp_path = os.path.join(city_dir, "temp.sumocfg")
with open(temp_path, 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write('<configuration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/sumoConfiguration.xsd">\n')
# Input files
f.write(' <input>\n')
f.write(f' <net-file value="{os.path.basename(city_sumo_config["net-file"])}"/>\n')
f.write(f' <route-files value="{os.path.basename(city_sumo_config["route-files"])}"/>\n')
f.write(f' <additional-files value="{os.path.basename(city_sumo_config["additional-files"])}"/>\n')
f.write(' </input>\n')
# Processing
f.write(' <processing>\n')
for key, value in SUMO_COMMON_CONFIG["processing"].items():
f.write(f' <{key} value="{value}"/>\n')
f.write(' </processing>\n')
# Time
f.write(' <time>\n')
for key, value in SUMO_COMMON_CONFIG["time"].items():
f.write(f' <{key} value="{value}"/>\n')
f.write(' </time>\n')
f.write('</configuration>\n')
return temp_path
def sumo_simulation(city=DEFAULT_CITY):
global simulation_running
if city not in CITY_CONFIGS:
print(f"City {city} not found in configurations.")
return
city_config = CITY_CONFIGS[city]
working_dir = city_config["working_dir"]
print(f"Starting simulation for {city_config['name']}")
print(f"Working directory: {working_dir}")
# Store current directory to restore later
original_dir = os.getcwd()
temp_cfg = None
try:
# Change to the city's working directory
os.chdir(working_dir)
print(f"Changed to directory: {os.getcwd()}")
# Create temporary SUMO configuration
temp_cfg = create_temp_sumocfg(city)
print(f"Created temporary config at: {temp_cfg}")
simulation_running = True
stop_event.clear()
# Start SUMO with the temporary config
sumo_cmd = [SUMO_BINARY, "-c", os.path.basename(temp_cfg)]
print(f"Running command: {' '.join(sumo_cmd)}")
try:
traci.start(sumo_cmd)
print("Successfully connected to SUMO")
# Switch to our randomized traffic light program (programID '1')
for tl_id in traci.trafficlight.getIDList():
try:
traci.trafficlight.setProgram(tl_id, "1")
except:
pass # Ignore errors if program doesn't exist
except Exception as e:
print(f"Failed to connect to SUMO: {str(e)}")
raise
# Counter for controlling update frequency
step_counter = 0
while traci.simulation.getMinExpectedNumber() > 0 and not stop_event.is_set():
traci.simulationStep()
step_counter += 1
# Fix traffic light logic to ensure proper cycling
fix_traffic_light_logic()
# Send updates based on configured frequency
if step_counter % UPDATE_FREQUENCY == 0:
# Debug each traffic light
traffic_lights = []
for idx, tl_id in enumerate(traci.trafficlight.getIDList()):
try:
controlled_links = traci.trafficlight.getControlledLinks(tl_id)
gps_position = None
# Try fromEdge of controlled links
if controlled_links and controlled_links[0]:
first_link = controlled_links[0][0]
from_edge = first_link[0]
try:
edge_shape = traci.edge.getShape(from_edge)
if edge_shape:
gps_position = traci.simulation.convertGeo(*edge_shape[0])
except:
pass
# If not found, try junction position
if gps_position is None:
try:
# First try to get the junction ID that this traffic light controls
junction_id = None
if controlled_links and controlled_links[0]:
# Get junction from the first controlled link
first_link = controlled_links[0][0]
from_edge = first_link[0]
to_edge = first_link[1]
# Try to find the junction that connects these edges
try:
junction_id = traci.edge.getFromJunction(from_edge)
except:
try:
junction_id = traci.edge.getToJunction(to_edge)
except:
pass
# If we found a junction ID, get its position
if junction_id:
junction_pos = traci.junction.getPosition(junction_id)
gps_position = traci.simulation.convertGeo(*junction_pos)
else:
# Fallback: try using tl_id as junction ID (original approach)
junction_pos = traci.junction.getPosition(tl_id)
gps_position = traci.simulation.convertGeo(*junction_pos)
except:
# If all else fails, skip this traffic light
continue
if gps_position is not None:
state = traci.trafficlight.getRedYellowGreenState(tl_id)
traffic_lights.append({
'id': tl_id,
'x': gps_position[0],
'y': gps_position[1],
'state': state
})
except:
# Skip this traffic light if any error occurs
continue
# Get traffic light information
vehicles = []
for vehicle_id in traci.vehicle.getIDList():
position = traci.vehicle.getPosition(vehicle_id)
gps_position = traci.simulation.convertGeo(*position)
angle = traci.vehicle.getAngle(vehicle_id)
vehicles.append({'id': vehicle_id, 'x': gps_position[0], 'y': gps_position[1], 'angle': angle})
# Send both vehicles and traffic lights
socketio.emit('update', {
'vehicles': vehicles,
'traffic_lights': traffic_lights
})
# Use configured simulation speed
time.sleep(SIMULATION_SPEED)
traci.close()
except Exception as e:
print(f"Error in simulation: {e}")
import traceback
print("Full traceback:")
print(traceback.format_exc())
finally:
# Clean up temporary file
if temp_cfg and os.path.exists(temp_cfg):
os.unlink(temp_cfg)
# Restore original directory
os.chdir(original_dir)
simulation_running = False
@socketio.on('connect')
def handle_connect():
"""Handle client connection - don't start simulation automatically"""
pass
@socketio.on('change_city')
def handle_change_city(data):
global simulation_thread, CURRENT_CITY
city = data.get('city', DEFAULT_CITY)
if city not in CITY_CONFIGS:
return
CURRENT_CITY = city
print(f"Changing city to {CITY_CONFIGS[CURRENT_CITY]['name']}")
# Stop current simulation if running
if simulation_running:
stop_event.set()
if simulation_thread:
simulation_thread.join(timeout=2)
# Start a new simulation with the selected city
simulation_thread = threading.Thread(target=sumo_simulation, args=(CURRENT_CITY,))
simulation_thread.start()
@socketio.on('restart')
def handle_restart(data):
global simulation_thread, CURRENT_CITY
city = data.get('city', CURRENT_CITY)
if city not in CITY_CONFIGS:
return
CURRENT_CITY = city
print(f"Restarting simulation for {CITY_CONFIGS[CURRENT_CITY]['name']}")
# Stop current simulation if running
if simulation_running:
stop_event.set()
if simulation_thread:
simulation_thread.join(timeout=2)
# Start a new simulation
simulation_thread = threading.Thread(target=sumo_simulation, args=(CURRENT_CITY,))
simulation_thread.start()
@app.route('/')
def index():
return render_template('index.html')
def fix_traffic_light_logic():
"""Fix traffic light logic to ensure proper green-yellow-red-green cycling"""
global traffic_light_states, traffic_light_phases
for tl_id in traci.trafficlight.getIDList():
if tl_id not in traffic_light_states:
traffic_light_states[tl_id] = {
'current_state': None,
'last_state': None,
'state_duration': 0,
'phase_index': 0
}
traffic_light_phases[tl_id] = []
current_state = traci.trafficlight.getRedYellowGreenState(tl_id)
tl_state = traffic_light_states[tl_id]
# If state changed, update tracking
if current_state != tl_state['current_state']:
tl_state['last_state'] = tl_state['current_state']
tl_state['current_state'] = current_state
tl_state['state_duration'] = 0
else:
tl_state['state_duration'] += 1
# Check for improper transitions and fix them
if (tl_state['last_state'] and
'y' in tl_state['last_state'].lower() and
'g' in current_state.lower() and
'r' not in tl_state['last_state'].lower()):
# This is a yellow-to-green transition without red - fix it
print(f"Fixing improper transition for {tl_id}: {tl_state['last_state']} -> {current_state}")
# Force a red state for 2 seconds before allowing green
if tl_state['state_duration'] < 20: # 2 seconds at 0.1s step length
# Create a red state for all lanes
red_state = 'r' * len(current_state)
try:
traci.trafficlight.setRedYellowGreenState(tl_id, red_state)
except:
pass # Ignore errors if we can't set the state
if __name__ == "__main__":
print(f"NYC path: {NYC_PATH}")
socketio.run(app, debug=True, host=HOST, port=PORT) | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/sumo_config.py | SUMOxPyPSA/sumo_config.py | """
Unified SUMO configuration settings for all cities
"""
import os
# Base directory is where this file is located
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Common configuration settings for all cities
SUMO_COMMON_CONFIG = {
'processing': {
'time-to-teleport': '300',
'collision.action': 'none',
'collision.stoptime': '0',
'collision.check-junctions': 'false',
'ignore-accidents': 'true',
'lateral-resolution': '0.8',
'route-steps': '200',
'no-step-log': 'true',
'no-internal-links': 'false',
'ignore-route-errors': 'true'
},
'routing': {
'device.rerouting.adaptation-interval': '1',
'device.rerouting.adaptation-weight': '0.0',
'device.rerouting.adaptation-steps': '180',
'device.rerouting.with-taz': 'false',
'device.rerouting.init-with-loaded-weights': 'true',
'device.rerouting.threads': '0',
'device.rerouting.synchronize': 'true',
'device.rerouting.output': 'rerouting.xml'
},
'time': {
'step-length': '0.5',
'start': '0',
'end': '3600'
},
'report': {
'verbose': 'true',
'no-step-log': 'true'
},
'gui_only': {
'start': 'true',
'quit-on-end': 'true'
}
}
# City-specific configurations
CITY_CONFIGS = {
"MANCHESTER": {
"net-file": "osm.net.xml.gz",
"route-files": "osm.passenger.trips.xml",
"additional-files": "osm.poly.xml.gz,traffic_lights_safe.add.xml"
},
"NEWYORK": {
"net-file": "osm.net.xml.gz",
"route-files": "osm.passenger.trips.xml",
"additional-files": "osm.poly.xml.gz,traffic_lights_safe.add.xml"
},
"MIAMI": {
"net-file": "osm.net.xml.gz",
"route-files": "osm.passenger.trips.xml",
"additional-files": "osm.poly.xml.gz,traffic_lights_safe.add.xml"
},
"LOSANGELES": {
"net-file": "osm.net.xml.gz",
"route-files": "osm.passenger.trips.xml",
"additional-files": "osm.poly.xml.gz,traffic_lights_safe.add.xml"
}
} | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/gridkit.py | SUMOxPyPSA/tools/gridkit.py | #!/usr/bin/env python
"""GridKit is a power grid extraction toolkit.
Usage:
python gridkit.py path/to/data-file.osm --filter \\
--poly path/to/area.poly \\
--pg user=gridkit database=gridkit
GridKit will create a database, import the power data, run the
extraction procedures, and write CSV's with the high-voltage network
extract.
"""
from __future__ import print_function, unicode_literals, division
import os, sys, io, re, csv, argparse, logging, subprocess, functools, getpass, operator
from util.postgres import PgWrapper as PgClient, PSQL
from util.which import which
__author__ = 'Bart Wiegmans'
if sys.version_info >= (3,0):
raw_input = input
def ask(question, default=None, type=str):
if default is not None:
question = "{0} [{1}]".format(question, default)
try:
value = raw_input(question + ' ')
except KeyboardInterrupt:
print('')
quit(1)
if not value:
return default
try:
return type(value)
except ValueError:
return None
def ask_db_params(pg_client, database_params):
while not pg_client.check_connection():
print("Please provide the PostgreSQL connection parameters (press Ctrl+C to exit)")
user = ask("PostgreSQL user name:", default=(database_params.get('user') or getpass.getuser()))
host = ask("PostgreSQL hostname:", default=(database_params.get('host') or 'localhost'))
port = ask("PostgreSQL port number:", type=int, default=(database_params.get('port') or 5432))
dbnm = ask("PostgreSQL database:", type=str, default=(database_params.get('database') or user))
new_params = database_params.copy()
new_params.update(user=user, host=host, port=port, database=dbnm)
pg_client.update_params(new_params)
print("Connection succesful")
database_params.update(**new_params)
def setup_database(pg_client, database_name, interactive):
io_handle = io.StringIO()
pg_client.do_getcsv('SELECT datname FROM pg_database', io_handle)
io_handle.seek(0,0)
databases = list(map(operator.itemgetter(0), csv.reader(io_handle)))
while interactive and database_name in databases:
overwrite = ask("Database {0} exists. Overwrite [y/N]?".format(database_name),
type=lambda s: s.lower().startswith('y'))
if overwrite:
break
database_name = ask("Database name:", default='gridkit')
if not database_name in databases:
pg_client.do_createdb(database_name)
pg_client.update_params({'database': database_name})
pg_client.check_connection()
pg_client.do_query('CREATE EXTENSION IF NOT EXISTS hstore;')
pg_client.do_query('CREATE EXTENSION IF NOT EXISTS postgis;')
print("Database", database_name, "set up")
return database_name
def do_import(osm_data_file, database_name, database_params):
if 'password' in database_params:
os.environ['PGPASS'] = database_params['password']
command_line = [OSM2PGSQL, '-d', database_name,
'-c', '-k', '-s', '-S', POWERSTYLE]
if 'port' in database_params:
command_line.extend(['-P', str(database_params['port'])])
if 'user' in database_params:
command_line.extend(['-U', database_params['user']])
if 'host' in database_params:
command_line.extend(['-H', database_params['host']])
command_line.append(osm_data_file)
logging.info("Calling %s", ' '.join(command_line))
subprocess.check_call(command_line)
def do_conversion(pg_client, voltage_cutoff=220000):
f = functools.partial(os.path.join, BASE_DIR, 'src')
# preparing tables
logging.info("Preparing tables")
pg_client.do_queryfile(f('prepare-functions.sql'))
pg_client.do_queryfile(f('prepare-tables.sql'))
# shared node algorithms
logging.info("Shared-node algorithms started")
pg_client.do_queryfile(f('node-1-find-shared.sql'))
pg_client.do_queryfile(f('node-2-merge-lines.sql'))
pg_client.do_queryfile(f('node-3-line-joints.sql'))
logging.info("Shared-node algorithms finished")
# spatial algorithms
logging.info("Spatial algorithms started")
pg_client.do_queryfile(f('spatial-1-merge-stations.sql'))
pg_client.do_queryfile(f('spatial-2-eliminate-line-overlap.sql'))
pg_client.do_queryfile(f('spatial-3-attachment-joints.sql'))
pg_client.do_queryfile(f('spatial-4-terminal-intersections.sql'))
pg_client.do_queryfile(f('spatial-5-terminal-joints.sql'))
pg_client.do_queryfile(f('spatial-6-merge-lines.sql'))
logging.info("Spatial algorithms finished")
# topological algoritms
logging.info("Topological algorithms started")
pg_client.do_queryfile(f('topology-1-connections.sql'))
pg_client.do_queryfile(f('topology-2-dangling-joints.sql'))
pg_client.do_queryfile(f('topology-3-redundant-splits.sql'))
pg_client.do_queryfile(f('topology-4-redundant-joints.sql'))
logging.info("Topological algorithms finished")
logging.info("Electric algorithms started")
pg_client.do_queryfile(f('electric-1-tags.sql'))
pg_client.do_queryfile(f('electric-2-patch.sql'))
pg_client.do_queryfile(f('electric-3-compute.sql'))
pg_client.do_queryfile(f('electric-4-reference.sql'))
logging.info("Electric algorithms finished")
pg_client.do_queryfile(f('topology-3a-assign-tags.sql'))
pg_client.do_queryfile(f('topology-3b-electrical-properties.sql'))
with io.open(f('topology-4-high-voltage-network.sql'), 'r') as handle:
query_text = handle.read().replace('220000', str(voltage_cutoff))
pg_client.do_query(query_text)
pg_client.do_queryfile(f('topology-5-abstraction.sql'))
logging.info("Topological algorithms done")
def export_network_csv(pg_client, full_export=False, base_name='gridkit'):
logging.info("Running export")
if full_export:
with io.open(base_name + '-all-vertices.csv', 'w') as handle:
pg_client.do_getcsv('heuristic_vertices', handle)
with io.open(base_name + '-all-links.csv', 'w') as handle:
pg_client.do_getcsv('heuristic_links', handle)
with io.open(base_name + '-highvoltage-vertices.csv', 'w') as handle:
pg_client.do_getcsv('heuristic_vertices_highvoltage', handle)
with io.open(base_name + '-highvoltage-links.csv', 'w') as handle:
pg_client.do_getcsv('heuristic_links_highvoltage', handle)
logging.info("Export done")
def file_age_cmp(a, b):
# negative if a is younger than b, positive if a is older than b
return os.path.getmtime(b) - os.path.getmtime(a)
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s [%(asctime)s] / %(message)s', level=logging.INFO)
OSM2PGSQL = which('osm2pgsql')
OSMCONVERT = which('osmconvert')
OSMFILTER = which('osmfilter')
OSMOSIS = which('osmosis')
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
POWERSTYLE = os.path.join(BASE_DIR, 'power.style')
parse_pair = lambda s: tuple(s.split('=', 1))
ap = argparse.ArgumentParser()
# polygon filter files
ap.add_argument('--filter', action='store_true', help='Filter input file for power data (requires osmfilter)')
ap.add_argument('--poly',type=str,nargs='+', help='Polygon file(s) to limit the areas of the input file (requires osmconvert)')
ap.add_argument('--no-interactive', action='store_false', dest='interactive', help='Proceed automatically without asking questions')
ap.add_argument('--no-import', action='store_false', dest='_import', help='Skip import step')
ap.add_argument('--no-conversion', action='store_false', dest='convert', help='Skip conversion step')
ap.add_argument('--no-export', action='store_false', dest='export', help='Skip export step')
ap.add_argument('--pg', type=parse_pair, default=[], nargs='+', help='Connection arguments to PostgreSQL, eg. --pg user=gridkit database=europe')
ap.add_argument('--psql', type=str, help='Location of psql binary', default=PSQL)
ap.add_argument('--osm2pgsql', type=str, help='Location of osm2pgsql binary', default=OSM2PGSQL)
ap.add_argument('--voltage', type=int, help='High-voltage cutoff level', default=220000)
ap.add_argument('--full-export', action='store_true', dest='full_export')
ap.add_argument('osmfile', nargs='?')
args = ap.parse_args()
# i've added this for the scigrid folks
PSQL = args.psql
OSM2PGSQL = args.osm2pgsql
osmfile = args.osmfile
interactive = args.interactive and os.isatty(sys.stdin.fileno())
if args._import and args.osmfile is None:
ap.error("OSM source file required")
if args.filter:
if not OSMFILTER:
logging.error("Cannot find osmfilter executable, necessary for --filter")
quit(1)
name, ext = os.path.splitext(osmfile)
new_name = name + '-power.o5m'
logging.info("Filtering %s to make %s", osmfile, new_name)
subprocess.check_call([OSMFILTER, osmfile, '--keep="power=*"', '-o=' + new_name])
osmfile = new_name
# get effective database parameters
db_params = dict((k[2:].lower(), v) for k, v in os.environ.items() if k.startswith('PG'))
db_params.update(**dict(args.pg))
# need 'root' database for polyfile based extraction
if args.poly:
db_params.update(database=db_params.get('user') or 'postgres')
pg_client = PgClient()
pg_client.update_params(db_params)
if pg_client.check_connection():
logging.info("Connection OK")
elif interactive and not args.poly:
logging.warn("Cannot connect to database")
ask_db_params(pg_client, db_params)
else:
logging.error("Cannot connect to database")
quit(1)
if OSM2PGSQL is None or not (os.path.isfile(OSM2PGSQL) and os.access(OSM2PGSQL, os.X_OK)):
logging.error("Cannot find osm2pgsql executable")
quit(1)
if args.poly:
osmfiles = dict()
for polyfile in args.poly:
if not os.path.isfile(polyfile):
logging.warn("%s is not a file", polyfile)
continue
polygon_name, ext = os.path.splitext(os.path.basename(polyfile))
osmfile_name, ext = os.path.splitext(osmfile)
osmfile_for_area = '{0}-{1}.o5m'.format(osmfile_name, polygon_name)
if os.path.isfile(osmfile_for_area) and file_age_cmp(osmfile_for_area, osmfile) < 0:
logging.info("File %s already exists and is newer than %s", osmfile_for_area, osmfile)
else:
logging.info("Extracting area %s from %s to make %s", polygon_name, osmfile, osmfile_for_area)
subprocess.check_call([OSMCONVERT, osmfile, '--complete-ways', '-B='+polyfile, '-o='+osmfile_for_area])
osmfiles[polygon_name] = osmfile_for_area
for area_name, area_osmfile in osmfiles.items():
# cleanup the name for use as a database name
database_name = 'gridkit_' + re.sub('[^A-Z0-9]+', '_', area_name, 0, re.I)
# select 'postgres' database for creating other databases
pg_client.update_params({'database':'postgres'})
pg_client.check_connection()
setup_database(pg_client, database_name, False)
# setup-database automatically uses the right connection
do_import(area_osmfile, database_name, db_params)
do_conversion(pg_client, args.voltage)
export_network_csv(pg_client, args.full_export, database_name)
else:
database_name = db_params.get('database') or db_params.get('postgres')
if database_name is None:
# last case fallback
osmfile_name, ext = os.path.splitext(os.path.basename(osmfile))
database_name = re.sub(r'[^A-Z0-9_]+', '_', osmfile_name.lower(), 0, re.I)
if args._import:
database_name = setup_database(pg_client, database_name, interactive)
do_import(osmfile, database_name, db_params)
if args.convert:
try:
do_conversion(pg_client, args.voltage)
except KeyboardInterrupt:
logging.warn("Execution interrupted - process is not finished")
quit(1)
if args.export:
export_network_csv(pg_client, args.full_export, database_name or 'gridkit')
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/geojson-to-postgis.py | SUMOxPyPSA/tools/util/geojson-to-postgis.py | #!/usr/bin/env python
from __future__ import print_function, unicode_literals
import operator
import psycopg2
import psycopg2.extras
import io
import json
import sys
import logging
CREATE_TABLES = '''
CREATE EXTENSION IF NOT EXISTS hstore;
CREATE EXTENSION IF NOT EXISTS postgis;
DROP TABLE IF EXISTS feature_points;
DROP TABLE IF EXISTS feature_lines;
DROP TABLE IF EXISTS feature_multilines;
CREATE TABLE feature_points (
import_id serial primary key,
point geometry(point, 4326),
properties hstore
);
CREATE TABLE feature_lines (
import_id serial primary key,
line geometry(linestring, 4326),
properties hstore
);
CREATE TABLE feature_multilines (
import_id serial primary key,
multiline geometry(multilinestring, 4326),
properties hstore
)
'''
INSERT_STATEMENT = {
'Point': 'INSERT INTO feature_points (point, properties) VALUES (ST_SetSRID(ST_GeomFromText(%s), 4326), %s);',
'LineString': 'INSERT INTO feature_lines (line, properties) VALUES (ST_SetSRID(ST_GeomFromText(%s), 4326), %s);',
'MultiLineString': 'INSERT INTO feature_multilines (multiline, properties) VALUES (ST_SetSRID(ST_GeomFromText(%s), 4326), %s);',
}
REMOVE_DUPLICATES = '''
DELETE FROM feature_lines WHERE import_id IN (
SELECT b.import_id
FROM feature_lines a, feature_lines b
WHERE a.import_id < b.import_id
AND a.properties = b.properties
AND a.line = b.line
);
'''
SPLIT_MULTILINES = '''
INSERT INTO feature_lines (line, properties)
SELECT (ST_Dump(multiline)).geom, properties
FROM feature_multilines;
'''
def hstore(d):
return dict((unicode(k), unicode(v)) for k, v, in d.items())
def wkt(g):
def coords(c):
if isinstance(c[0], list):
if isinstance(c[0][0], list):
f = '({0})'
else:
f = '{0}'
t = ', '.join(f.format(a) for a in map(coords, c))
else:
t = '{0:f} {1:f}'.format(*c)
return t
return '{0:s} ({1:s})'.format(g['type'].upper(), coords(g['coordinates']))
def import_feature(cur,feature_data):
if feature_data.get('type') == 'FeatureCollection':
for feature in feature_data['features']:
import_feature(cur, feature)
elif feature_data.get('type') == 'Feature':
cur.execute(INSERT_STATEMENT[feature_data['geometry']['type']],
(wkt(feature_data['geometry']),
hstore(feature_data['properties'])))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
con = psycopg2.connect('')
# create table
with con:
with con.cursor() as cur:
cur.execute(CREATE_TABLES)
# use hstore to store attributes
psycopg2.extras.register_hstore(con)
if len(sys.argv) == 1:
handles = [sys.stdin]
else:
handles = [io.open(a,'r') for a in sys.argv[1:]]
for handle in handles:
with handle:
feature_data = json.load(handle)
with con:
with con.cursor() as cur:
import_feature(cur, feature_data)
cur.execute(SPLIT_MULTILINES)
cur.execute(REMOVE_DUPLICATES)
con.commit()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/network.py | SUMOxPyPSA/tools/util/network.py | from __future__ import unicode_literals, division, print_function
import io
import csv
import random
import itertools
import heapq
import math
import warnings
try:
from recordclass import recordclass
except ImportError:
from collections import namedtuple as recordclass
warnings.warn("recordclass is necessary for Network.patch() to work")
try:
from numpy import array
from matplotlib import pyplot
except ImportError as e:
warnings.warn(str(e))
class Station(recordclass('Station', str('station_id lat lon name operator voltages frequencies lines'))):
def __hash__(self):
return hash(self.station_id)
@property
def coordinates(self):
return self.lon, self.lat
def distance(self, other):
# See https://www.math.ksu.edu/~dbski/writings/haversine.pdf
# earths radius will be 6.371 km
R = 6372.8
delta_lat = math.radians(other.lat - self.lat)
delta_lon = math.radians(other.lon - self.lon)
a = math.sin(delta_lat/2)**2 + math.cos(math.radians(self.lat))*math.cos(math.radians(other.lat))*math.sin(delta_lon/2)**2
c = 2*math.asin(math.sqrt(a))
return R*c
def to_ewkt(self):
return 'SRID=4326;POINT({0} {1})'.format(self.lon, self.lat)
class Line(recordclass('Line', str('line_id operator left right length frequencies voltages resistance reactance capacitance max_current'))):
def __hash__(self):
return hash(self.line_id)
def __repr__(self):
return "{0}: {1} -> {2}".format(self.line_id, self.left.name, self.right.name).encode('utf-8')
@property
def susceptance(self):
if self.capacitance is None or not self.frequencies:
return None
return self.capacitance * max(self.frequencies)
class Path(object):
def __init__(self, stations):
self.stations = stations
# make list of lines
self.lines = list()
for i in range(1, len(stations)):
f = stations[i-1]
t = stations[i]
for l in f.lines:
if f is l.left:
if t is l.right:
break
elif t is l.left:
break
self.lines.append(l)
def plot(self, figure=None, color='yellow'):
if figure is None:
figure = pyplot.figure()
axs = figure.add_subplot(1,1,1)
lat = [s.lat for s in self.stations]
lon = [s.lon for s in self.stations]
axs.plot(lon,lat, color=color)
return figure
@property
def length(self):
return sum(l.length for l in self.lines)
def __iter__(self):
return iter(self.stations)
def __repr__(self):
return 'Path of length {0} over [{1}]'.format(
self.length, ', '.join(s.name for s in self.stations)
).encode('utf-8')
def to_ewkt(self):
return 'SRID=4326;LINESTRING({0})'.format(
','.join('{0} {1}'.format(s.lon, s.lat) for s in self.stations)
)
class Network(object):
def __init__(self):
self.stations = dict()
self.lines = dict()
self._areas = dict()
def connected_sets(self):
# bfs algorithm to find connected sets in the network
unseen = set(self.stations.values())
connected = []
while unseen:
current = []
root = unseen.pop()
queue = [root]
while queue:
node = queue.pop()
if node in unseen:
unseen.remove(node)
current.append(node)
for line in node.lines:
if line.left in unseen:
queue.append(line.left)
if line.right in unseen:
queue.append(line.right)
connected.append(current)
return connected
def patch(self):
# flood algorithm to patch all lines and stations with values from neighbours
totals = list()
while True:
changes = 0
for station in self.stations.itervalues():
line_voltages = set(v for line in station.lines for v in line.voltages)
line_frequencies = set(f for line in station.lines for f in line.frequencies)
if line_voltages - station.voltages:
station.voltages |= line_voltages
changes += 1
if line_frequencies - station.frequencies:
station.frequencies |= line_frequencies
changes += 1
for line in self.lines.itervalues():
shared_frequencies = line.left.frequencies & line.right.frequencies
if shared_frequencies and not line.frequencies & shared_frequencies:
line.frequencies |= shared_frequencies
changes += 1
elif not line.frequencies:
if line.left.frequencies:
line.frequencies = set(line.left.frequencies)
changes += 1
elif line.right.frequencies:
line.frequencies = set(line.right.frequencies)
changes += 1
shared_voltages = line.left.voltages & line.right.voltages
if shared_voltages and not line.voltages & shared_voltages:
line.voltages |= shared_voltages
changes += 1
elif not line.voltages:
if line.left.voltages:
line.voltages = set(line.left.voltages)
changes += 1
elif line.right.voltages:
line.voltages = set(line.right.voltages)
changes += 1
if changes == 0:
break
totals.append(changes)
if len(totals) > 1000:
raise Exception('dont think ill be stopping soon')
return totals
def report(self):
# calculate missing values statically
broken_stations = 0
broken_lines = 0
mismatches = 0
for station in self.stations.itervalues():
if not station.voltages or not station.frequencies:
broken_stations += 1
for line in station.lines:
if station.frequencies:
if line.frequencies - station.frequencies:
mismatches += 1
continue
elif line.frequencies:
mismatches += 1
continue
if station.voltages:
if line.voltages - station.voltages:
mismatches += 1
continue
elif line.voltages:
mismatches += 1
continue
for line in self.lines.itervalues():
if not line.voltages or not line.frequencies:
broken_lines += 1
return broken_stations, broken_lines, mismatches
def find(self, from_id, to_id):
# A* algorithm to find shortest path
scores = dict()
come_from = dict()
seen = set()
path = list()
try:
start = self.stations[from_id]
goal = self.stations[to_id]
except KeyError:
return None
queue = [(0,start)]
while queue:
score, station = heapq.heappop(queue)
if station is goal:
break
seen.add(station)
for line in station.lines:
neighbor = line.left if line.right is station else line.right
if neighbor in seen:
continue
g_score = score + line.length
if scores.get(neighbor, g_score+1) < g_score:
continue
h_score = goal.distance(neighbor)
heapq.heappush(queue, (g_score + h_score, neighbor))
come_from[neighbor] = station
if station is not goal:
return None
while station is not start:
path.append(station)
station = come_from[station]
path.append(start)
path.reverse()
return Path(path)
def plot(self, figure=None, node_color='blue', edge_color='red'):
if figure is None:
figure = pyplot.figure()
axis = figure.add_subplot(1,1,1)
for line in self.lines.values():
axis.plot([line.left.lon, line.right.lon],
[line.left.lat, line.right.lat], color=edge_color)
coordinates = [s.coordinates for s in self.stations.values()]
axis.plot(*zip(*coordinates), marker='o', color=node_color, lineStyle='None')
return figure
def _area_number(self, area_name):
if area_name not in self._areas:
# assign next area number
self._areas[area_name] = len(self._areas) + 1
return self._areas[area_name]
def powercase(self, loads=None):
# loads is a map of station id -> load, either positive or
# negative; a negative load is represented by a generator.
# if no loads map is passed, generate an 'electrified pair' of
# two random nodes, one of which delivers power, the other
# consumes it
if loads is None:
loads = self._electrified_pair()
ppc = {
"version": 2,
"baseMVA": 100.0
}
nodes = list()
edges = list()
generators = list()
station_to_bus = dict()
bus_id_gen = itertools.count()
for station in self.stations.itervalues():
# because we do a DC PF, we ignore frequencies completely
minv, maxv = min(station.voltages), max(station.voltages)
for voltage in station.voltages:
if station.station_id in loads and voltage == minv:
bus_load = loads[station.station_id]
else:
bus_load = 0
bus_id = next(bus_id_gen)
station_to_bus[station.station_id, voltage] = bus_id
if bus_load < 0:
# it is a generator instead of a load, insert it
generators.append(self._make_generator(bus_id, -bus_load))
bus_load = 0
nodes.append(self._make_bus(station, voltage, bus_load, bus_id))
for voltage in station.voltages:
if voltage != maxv:
# create a transformer branch from max voltage to this voltage
from_bus = station_to_bus[station.station_id, maxv]
to_bus = station_to_bus[station.station_id, voltage]
edges.append(self._make_transformer(from_bus, to_bus))
for line in self.lines.itervalues():
# create branches between stations
for voltage in line.voltages:
from_bus = station_to_bus[line.left.station_id, voltage]
to_bus = station_to_bus[line.right.station_id, voltage]
edges.append(self._make_line(line, from_bus, to_bus))
ppc['bus'] = array(nodes)
ppc['gen'] = array(generators)
ppc['branch'] = array(edges)
return ppc
def _electrified_pair(self):
src, dst = random.sample(self.stations, 2)
return {
src: -100, # MW
dst: 50, # MW
}
def _make_bus(self, station, voltage, load, bus_id):
# see pypower.caseformat for documentation on how this works
area_nr = self._area_number(station.operator)
base_kv = voltage // 1000
return [
bus_id,
3, # slack bus
load, # real load in MW
0, # reactive load MVAr, zero because DC
0, # shunt conductance
0, # shunt susceptance
area_nr, # area number
1.0, # voltage magnitude per unit
0, # voltage angle
base_kv, # base voltage (per unit base)
area_nr, # loss zone nr
1.1, # max voltage per unit
0.9, # min voltage per unit
]
def _make_transformer(self, from_bus, to_bus):
return [
from_bus,
to_bus,
0.01, # resistance
0.01, # reactance
0.01, # line charging susceptance
200, # long term rating (MW)
200, # short term rating (MW)
200, # emergency rating (MW)
1, # off-nominal (correction) taps ratio, 1 for no correction
0, # transformer phase shift angle,
1, # status (1 = on)
-360, # minimum angle
360, # maximum angle
]
def _make_line(self, line, from_bus, to_bus):
return [
from_bus,
to_bus,
line.resistance or 0.01, # default value if None
line.reactance or 0.01,
line.susceptance or 0.01,
200,
200,
200,
0, # not a transformer
0, # not a transformer
1, # status
-360,
360
]
def _make_generator(self, bus_id, power_output):
return [
bus_id,
power_output,
0, # reactive power output
0, # maximum reactive power output
0, # minimum reactive power output
1.0, # per-unit voltage magnitude setpoint
100, # base MVA
1, # status (on)
power_output, # maximum real power output
0, # minimum real power output
0, # Pc1, irrelevant
0, # Pc2
0, # Qc1min
0, # Qc1max
0, # Qc2min
0, # Qc2max
5, # ramp rate load-following (MW/min)
5, # ramp rate 10-min reserve (MW/min)
5, # ramp rate 30-min reserve (MW/min)
0, # ramp rate reactive power
0, # area participation factor
]
pass
def dot(self):
buf = io.StringIO()
buf.write("graph {\n")
buf.write("rankdir LR\n")
for station in self.stations.itervalues():
buf.write('s_{0} [label="{1}"]\n'.format(station.station_id, station.name.replace('"', "'")))
for line in self.lines.itervalues():
buf.write('s_{0} -- s_{1}\n'.format(line.left.station_id, line.right.station_id))
buf.write("}\n")
return buf.getvalue()
def __repr__(self):
return "Network of {0} stations, {1} lines".format(len(self.stations), len(self.lines)).encode('utf-8')
class ScigridNetwork(Network):
class _csv_dialect(csv.excel):
quotechar = b"'"
def read(self, vertices_csv, links_csv):
with io.open(vertices_csv, 'rb') as handle:
for row in csv.DictReader(handle, dialect=self._csv_dialect):
station_id = int(row['v_id'])
lat = float(row['lat'])
lon = float(row['lon'])
name = row['name'].decode('utf-8')
operator = row['operator'].decode('utf-8')
voltages = set(map(int, row['voltage'].split(';')) if row['voltage'] else [])
frequencies = set(map(float, row['frequency'].split(';')) if row['frequency'] else [])
self.stations[station_id] = Station(station_id=station_id, lat=lat, lon=lon, name=name, operator=operator,
voltages=voltages, frequencies=frequencies, lines=list())
with io.open(links_csv, 'rb') as handle:
for i, row in enumerate(csv.DictReader(handle, dialect=self._csv_dialect)):
line_id = int(row['l_id'])
operator = row['operator'].decode('utf-8')
left = self.stations[int(row['v_id_1'])]
right = self.stations[int(row['v_id_2'])]
length = float(row['length_m'])
resistance = float(row['r_ohmkm']) * int(row['length_m']) / 1000 if row['r_ohmkm'] else None
reactance = float(row['x_ohmkm']) * int(row['length_m']) / 1000 if row['x_ohmkm'] else None
capacitance = float(row['c_nfkm']) * int(row['length_m']) / 1000 if row['c_nfkm'] else None
max_current = float(row['i_th_max_a']) if row['i_th_max_a'] else None
# use complex voltages for lines
frequencies = set(map(float, row['frequency'].split(';')) if row['frequency'] else [])
voltages = set(map(int, row['voltage'].split(';')) if row['voltage'] else [])
line = Line(line_id=line_id, operator=operator, left=left, right=right, length=length,
voltages=voltages, frequencies=frequencies,
resistance=resistance, reactance=reactance, capacitance=capacitance,
max_current=max_current)
self.lines[line_id] = line
left.lines.append(line)
right.lines.append(line)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/load_polyfile.py | SUMOxPyPSA/tools/util/load_polyfile.py | #!/usr/bin/env python
from __future__ import print_function, unicode_literals, division
import argparse
import sys
import os
import io
from polyfile import PolyfileParser
from geometry import Polygon
ap = argparse.ArgumentParser()
ap.add_argument('file', nargs='+', type=str)
ap.add_argument('--table', type=str, default='polygons')
args = ap.parse_args()
polygons = dict()
parser = PolyfileParser()
for file_name in args.file:
if not os.path.isfile(file_name):
print("Usage: {0} <files>".format(sys.argv[0]))
name, ext = os.path.splitext(os.path.basename(file_name))
try:
pr = parser.parse(io.open(file_name, 'r').read())
pl = Polygon(pr[1]['1'])
polygons[name] = pl.to_wkt()
except Exception as e:
print("Could not process {0} because {1}".format(file_name, e), file=sys.stderr)
quit(1)
values = ','.join("('{0}', ST_SetSRID(ST_GeomFromText('{1}'), 4326))".format(n, p)
for (n, p) in polygons.items())
print('''
BEGIN;
DROP TABLE IF EXISTS {0};
CREATE TABLE {0} (
name varchar(64) primary key,
polygon geometry(polygon, 4326)
);
INSERT INTO {0} (name, polygon) VALUES {1};
COMMIT;
'''.format(args.table, values))
# of course you can abuse this. don't do that, then
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/postgres.py | SUMOxPyPSA/tools/util/postgres.py | from .which import which
try:
import psycopg2
import psycopg2.extensions
except ImportError:
psycopg2 = False
PSQL = which('psql')
class QueryError(Exception):
def __init__(self, error, query):
super(QueryError, self).__init__(error)
self.query = query
def make_copy_query(subquery_or_table):
if subquery_or_table.lower().startswith('select'):
query = 'COPY ({0}) TO STDOUT WITH CSV HEADER'
else:
query = 'COPY {0} TO STDOUT WITH CSV HEADER'
return query.format(subquery_or_table)
class PsqlWrapper(object):
"Wrap psql client executable under subprocess"
def check_connection(self):
try:
self.do_query('SELECT 1')
except QueryError as e:
return False
else:
return True
def update_params(self, params):
for n,v in params.items():
k = 'PG' + n.upper()
os.environ[k] = str(v)
def do_createdb(self, database_name):
self.do_query('CREATE DATABASE {0}'.format(database_name))
def do_query(self, query):
try:
subprocess.check_call([PSQL, '-v', 'ON_ERROR_STOP=1', '-c', query])
except subprocess.CalledProcessError as e:
raise QueryError(e, query)
except OSError as e:
raise Exception(e)
def do_queryfile(self, queryfile):
try:
subprocess.check_call([PSQL, '-v', 'ON_ERROR_STOP=1', '-f', queryfile])
except subprocess.CalledProcessError as e:
raise QueryError(e, queryfile)
except OSError as e:
raise Exception(e)
def do_getcsv(self, subquery_or_table, io_handle):
query = make_copy_query(subquery_or_table)
try:
command = [PSQL, '-v', 'ON_ERROR_STOP=1', '-c', query]
try:
subprocess.check_call(command, stdout=io_handle)
except io.UnsupportedOperation as e:
io_handle.write(subprocess.check_output(command).decode('utf-8'))
except subprocess.CalledProcessError as e:
raise QueryError(e, subquery_or_table)
except OSError as e:
raise Exception(e)
class Psycopg2Wrapper(object):
"Wrap psycopg2 for consistency with psql-wrapper"
def __init__(self):
self._connection = None
self._params = dict()
def update_params(self, params):
if self._connection is not None:
# close existing connection
if not self._connection.closed:
self._connection.close()
self._connection = None
self._params.update(**params)
def check_connection(self):
try:
if self._connection is None:
self._connection = psycopg2.connect(**self._params)
except (TypeError, psycopg2.Error) as e:
return False
else:
return True
def do_createdb(self, database_name):
self._connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.do_query('CREATE DATABASE {0};'.format(database_name))
def do_query(self, query):
try:
with self._connection as con:
with con.cursor() as cursor:
cursor.execute(query)
except psycopg2.Error as e:
raise QueryError(e, query)
def do_queryfile(self, queryfile):
with io.open(queryfile, 'r', encoding='utf-8') as handle:
query = handle.read()
self.do_query(query)
def do_getcsv(self, subquery_or_table, io_handle):
query = make_copy_query(subquery_or_table)
try:
with self._connection.cursor() as cursor:
cursor.copy_expert(query, io_handle)
except psycopg2.Error as e:
raise QueryError(e, query)
class PgWrapper(Psycopg2Wrapper if psycopg2 else PsqlWrapper):
'''
Wrap interfaces of either psycopg2 or psql-under-subprocess
Which of these is actually implemented depends on the runtime
environment; psycopg2 is given preference, but psql is a fallback.
'''
pass
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/polyfile.py | SUMOxPyPSA/tools/util/polyfile.py | import re
# syntax of poly files;
# name
# number
# indented list of longitude, latitude
# end
# possibly another number
# another end
class PolyfileParser(object):
newline = re.compile(r'\s*\n')
whitespace = re.compile(r'\s+')
end = re.compile(r'END')
word = re.compile(r'\w+')
number = re.compile(r'-?\d\.\d+E[+-]\d+')
identifier = re.compile(r'!?\d+')
class Error(Exception):
pass
def parse(self, buf):
self.buf = buf
self.position = 0
name = self.read(self.word)
sections = {}
self.read(self.newline)
while not self.peek(self.end):
# read section
identifier = self.read(self.identifier)
sequence = []
self.read(self.newline)
while not self.peek(self.end):
# read sequence
self.read(self.whitespace)
longitude = float(self.read(self.number))
self.read(self.whitespace)
latitude = float(self.read(self.number))
coordinates = (longitude, latitude)
sequence.append(coordinates)
self.read(self.newline)
self.read(self.end)
self.read(self.newline)
sections[identifier] = sequence
self.read(self.end)
if self.peek(self.newline):
self.read(self.newline)
return name, sections
def peek(self, expect):
return expect.match(self.buf, self.position) is not None
def read(self, expect):
match = expect.match(self.buf, self.position)
if match is None:
raise self.Error("%s was not matched (got %s...)" % (expect.pattern, self.buf[self.position:self.position+10]))
self.position = match.end()
return match.group()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/__init__.py | SUMOxPyPSA/tools/util/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/geometry.py | SUMOxPyPSA/tools/util/geometry.py | from __future__ import print_function, division
import collections
def cross_vertical(line_d, line_v):
pt_d1, pt_d2 = line_d
pt_v1, pt_v2 = line_v
x_d1, y_d1 = pt_d1
x_d2, y_d2 = pt_d2
dy_d = y_d2 - y_d1
dx_d = x_d2 - x_d1
if dx_d == 0:
# parallel (vertical) line
return None
slope_d = dy_d / dx_d
intercept_d = y_d1 - slope_d * x_d1
x_v, y_v1 = pt_v1
x_v, y_v2 = pt_v2
if x_v < min(x_d1, x_d2) or x_v > max(x_d1, x_d2):
return None
y_meet = slope_d * x_v + intercept_d
if min(y_v1, y_v2) <= y_meet <= max(y_v1, y_v2):
return x_v, y_meet
return None
def cross_line(line_a, line_b):
pt_a1, pt_a2 = line_a
pt_b1, pt_b2 = line_b
x_a1, y_a1 = pt_a1
x_a2, y_a2 = pt_a2
x_b1, y_b1 = pt_b1
x_b2, y_b2 = pt_b2
dy_a = y_a2 - y_a1
dx_a = x_a2 - x_a1
dy_b = y_b2 - y_b1
dx_b = x_b2 - x_b1
if dx_a == 0:
return cross_vertical(line_b, line_a)
if dx_b == 0:
return cross_vertical(line_a, line_b)
slope_a = dy_a / dx_a
slope_b = dy_b / dx_b
intercept_a = y_a1 - slope_a * x_a1
intercept_b = y_b1 - slope_b * x_b1
if slope_a == slope_b:
# parallel lines, never meet
return None
x_meet = (intercept_b - intercept_a) / (slope_a - slope_b)
y_meet = slope_a * x_meet + intercept_a
if max(min(x_a1, x_a2), min(x_b1, x_b2)) <= x_meet <= min(max(x_a1, x_a2), max(x_b1, x_b2)):
return x_meet, y_meet
return None
def edges(polygon):
for i in range(1, len(polygon)):
yield polygon[i-1], polygon[i]
if polygon[0] != polygon[-1]:
yield polygon[-1], polygon[0]
def polygon_includes(polygon, point):
x, y = zip(*polygon)
line_left = ((min(x), point[1]), (point[0], point[1]))
line_right = ((point[0], point[1]), (max(x), point[1]))
# The set is necessary to eliminate duplicate points, which happens
# when a node is crossed exactly, in which case the line crosses
# two edges rather than one
cross_left = set(cross_line(line_left, edge)
for edge in edges(polygon)) - {None}
cross_right = set(cross_line(line_right, edge)
for edge in edges(polygon)) - {None}
return (len(cross_left) & 1) == 1 and (len(cross_right) & 1) == 1
class Edges(object):
def __init__(self, points):
self.points = points
def __getitem__(self, idx):
if 0 <= idx < len(self.points) - 1:
return self.points[idx], self.points[idx+1]
elif idx == len(self.points) - 1:
return self.points[idx], self.points[0]
elif -len(self.points) < idx < 0:
return self[idx+len(self.points)]
else:
raise IndexError("%s not in %s" % (idx, len(self.points)))
def __iter__(self):
for i in range(len(self.points)):
yield self[i]
class IntervalTree(object):
Node = collections.namedtuple('Node', ['left','right','interval','value'])
def __init__(self, intervals):
# first, transform of a,b into min(a,b),max(a,b), i sorted
prepared = sorted((min(x1,x2),max(x1,x2), i) for (i, (x1, x2)) in enumerate(intervals))
# then recursively build a tree
self.root = self._build_tree(prepared, 0, len(prepared))
def _build_tree(self, sorted_intervals, left, right):
if left == right or left + 1 == right:
# leaf node
return self.Node(None, None, sorted_intervals[left][0:2], sorted_intervals[left][2])
else:
mid = (left + right) // 2
left_node = self._build_tree(sorted_intervals, left, mid)
right_node = self._build_tree(sorted_intervals, mid, right)
interval = left_node.interval[0], max(left_node.interval[1], right_node.interval[1])
# interior node
return self.Node(left_node, right_node, interval, None)
def _query_tree(self, node, x):
min_x, max_x = node.interval
results = []
if min_x > x or max_x < x:
return results
if node.left is not None:
results.extend(self._query_tree(node.left, x))
if node.right is not None:
results.extend(self._query_tree(node.right, x))
if node.value is not None:
results.append(node.value)
return results
def __getitem__(self, x):
return self._query_tree(self.root, x)
class Polygon(object):
def __init__(self, points):
self.points = points
self.edges = Edges(points)
# build vertical extent tree for efficient point-in-polygon query
verticals = ((y1, y2) for ((x1,y1), (x2, y2)) in self.edges)
self.vertical_intervals = IntervalTree(verticals)
def __len__(self):
return len(self.points)
def __iter__(self):
return iter(self.points)
def __contains__(self, point):
x, y = point
left, right = 0, 0
# don't have min/max handy, so this will have to do
horizontal = ((-180, y), (180, y))
# a horizontal line at y crosses the edges given by the idxs
cross_pts = set()
cross_left, cross_right = 0, 0
# check if they cross an odd number of times on right and left sides
for i in self.vertical_intervals[y]:
cross_x, cross_y = cross_line(horizontal, self.edges[i])
if cross_x in cross_pts:
continue
cross_pts.add(cross_x)
if cross_x < x:
cross_left += 1
else:
cross_right += 1
if cross_left & 1 == 1 and cross_right & 1 == 1:
return True
return False
def to_wkt(self):
return 'POLYGON(({0}))'.format(','.join('{0} {1}'.format(x, y) for (x,y) in self.points))
if __name__ == '__main__':
line_a = ((1,1), (3,4))
line_b = ((1,3), (3,2))
line_c = ((1,6), (3,5))
line_v = ((2, 0), (2, 4))
assert cross_line(line_a, line_b) == (2.0, 2.5)
assert cross_line(reversed(line_a), line_b) == (2.0, 2.5)
assert cross_line(line_v, line_b) == (2.0, 2.5)
assert cross_line(line_b, line_c) is None
square = ((0,0), (0,5),
(5,5), (5,0))
point = (2,2)
assert polygon_includes(square, point)
assert not polygon_includes(square, (7, 2))
pentagon = ((1,0), (0, 2), (2, 3), (4,2), (3,0))
assert polygon_includes(pentagon, (2, 2))
assert not polygon_includes(pentagon, (1,3))
print("done")
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/which.py | SUMOxPyPSA/tools/util/which.py | import os
def which(program):
'''Find executable for a given name by PATH, or None if no executable could be found'''
if os.name == 'nt':
return _nt_which(program)
elif os.name == 'posix':
return _posix_which(program)
raise NotImplementedError(os.platform)
def _nt_which(program):
PATH = os.environ['PATH'].split(os.pathsep)
EXT = os.environ['PATHEXT'].split(os.pathsep)
name, ext = os.path.splitext(program)
if ext in EXT:
# program is specified as foo.exe, for example, in which case
# we don't go looking for foo.exe.exe or foo.exe.bat
for p in PATH:
n = os.path.join(p, program)
if os.path.isfile(n):
return n
else:
for p in PATH:
for e in EXT:
n = os.path.join(p, program + e)
if os.path.isfile(n):
return n
return None
def _posix_which(program):
PATH = os.environ['PATH'].split(os.pathsep)
for p in PATH:
n = os.path.join(p, program)
if os.path.isfile(n) and os.access(n, os.X_OK):
return n
return None
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/tools/util/hstore.py | SUMOxPyPSA/tools/util/hstore.py | import re
class hstore(dict):
class parser(object):
word = re.compile(r'"([^"]+)"')
arrow = re.compile(r'\s*=>\s*')
comma = re.compile(r',\s*')
def __init__(self, text):
self.position = 0
self.text = text
def __iter__(self):
while self.peek(self.word):
key = self.read(self.word, 1)
self.read(self.arrow)
value = self.read(self.word, 1)
yield key, value
if self.peek(self.comma):
self.read(self.comma)
else:
break
def read(self, expect, group=0):
match = expect.match(self.text, self.position)
if match is None:
raise Exception('parse error at ' + position)
self.position = match.end()
return match.group(group)
def peek(self, expect):
return expect.match(self.text, self.position) is not None
def __init__(self, text):
super(hstore,self).__init__(self.parser(text))
def __str__(self):
return ', '.join('"{0}"=>"{1}"'.format(k,v) for k,v in self.items())
def _main():
hstore_str = '"foo" => "bar"'
hstore_dct = hstore(hstore_str)
assert hstore_dct['foo'] == 'bar'
assert str(hstore_dct) == '"foo"=>"bar"'
assert repr(hstore_dct) == "{'foo': 'bar'}"
if __name__ == '__main__':
_main()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/miami/check_tls_id_mismatches.py | SUMOxPyPSA/miami/check_tls_id_mismatches.py | import gzip
import xml.etree.ElementTree as ET
def get_tllogic_ids_from_net(netfile):
ids = set()
with gzip.open(netfile, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for tl in root.findall('tlLogic'):
tid = tl.get('id')
if tid is not None:
ids.add(tid)
return ids
def get_tllogic_ids_from_add(addfile):
ids = set()
tree = ET.parse(addfile)
root = tree.getroot()
for tl in root.findall('tlLogic'):
tid = tl.get('id')
if tid is not None:
ids.add(tid)
return ids
if __name__ == "__main__":
netfile = 'osm.net.xml.gz'
addfile = 'traffic_lights.add.xml'
print(f"Checking network file: {netfile}")
print(f"Checking add file: {addfile}")
net_ids = get_tllogic_ids_from_net(netfile)
add_ids = get_tllogic_ids_from_add(addfile)
print(f"\nTraffic light IDs in network file ({len(net_ids)}):")
print(sorted(net_ids))
print(f"\nTraffic light IDs in add file ({len(add_ids)}):")
print(sorted(add_ids))
missing = add_ids - net_ids
if missing:
print(f"\nIDs in add file but NOT in network file ({len(missing)}):")
print(sorted(missing))
else:
print("\nAll add file IDs are present in the network file.")
extra = net_ids - add_ids
if extra:
print(f"\nIDs in network file but NOT in add file ({len(extra)}):")
print(sorted(extra))
else:
print("\nAll network file IDs are present in the add file.") | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/SUMOxPyPSA/miami/extract_tllogic.py | SUMOxPyPSA/miami/extract_tllogic.py | import gzip
import xml.etree.ElementTree as ET
def extract_tllogics(filename):
with gzip.open(filename, 'rt', encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for tl in root.findall('tlLogic'):
print(f"\n<tlLogic id=\"{tl.get('id')}\" type=\"{tl.get('type')}\" programID=\"{tl.get('programID')}\" offset=\"{tl.get('offset')}\">")
for phase in tl.findall('phase'):
print(f" <phase duration=\"{phase.get('duration')}\" state=\"{phase.get('state')}\"/>")
print("</tlLogic>")
if __name__ == "__main__":
extract_tllogics('osm.net.xml.gz') | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/examples/xflow_loader.py | examples/xflow_loader.py | import sys
import os
current_script_directory = os.path.dirname(os.path.abspath(__file__))
xflow_path = os.path.join(current_script_directory, '..', '..', 'xflow')
sys.path.insert(1, xflow_path) | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/examples/main.py | examples/main.py | import xflow_loader
from xflow.dataset.nx import BA, connSW
from xflow.dataset.pyg import Cora
from xflow.diffusion import SI, IC, LT
from xflow.seed import random as seed_random, degree as seed_degree, eigen as seed_eigen
from xflow.util import run
# graphs to test
fn = lambda: connSW(n=1000, beta=0.1)
fn.__name__ = 'connSW'
gs = [fn, BA]
# diffusion models to test
# TODO actually, no need to import in this main.py, because the diffusion models are embeded in the methods
df = [SI, IC, LT]
# configurations of IM experiments
from xflow.method.im import pi as im_pi, degree as im_degree, sigma as im_sigma, eigen as im_eigen, celf as im_celf,celfpp as im_celfpp, greedy as im_greedy
me = [im_pi, im_eigen]
rt = run (
graph = gs, diffusion = df,
method = me, eval = 'im', epoch = 10,
budget = 10,
output = [ 'animation', 'csv', 'fig'],
seeds = seed_random)
# configurations of IBM experiments
from xflow.method.ibm import pi as ibm_pi, degree as ibm_degree, sigma as ibm_sigma, eigen as im_eigen, greedy as ibm_greedy
me = [ibm_pi, ibm_sigma, ibm_degree]
rt = run (
graph = gs, diffusion = df,
method = me, eval = 'ibm', epoch = 10,
budget = 10,
output = [ 'animation', 'csv', 'fig'],
seeds = seed_random)
# configurations of SL experiments
from xflow.method.cosasi.source_inference.multiple_source.netsleuth import netsleuth, fast_multisource_netsleuth
from xflow.method.cosasi.source_inference.multiple_source.lisn import fast_multisource_lisn
from xflow.method.cosasi.source_inference.multiple_source.jordan import fast_multisource_jordan_centrality
me = [netsleuth]
rt = run (
graph = gs, diffusion = df,
method = me, eval = 'sl', epoch = 10,
budget = 10,
output = [ 'animation', 'csv', 'fig'],
seeds = seed_random)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/examples/__init__.py | examples/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/examples/FlowTaskEx1.py | examples/FlowTaskEx1.py | from FlowTasks import forward, backward, graph_eval
# ### Testing / Examples
# a FW1 dataset, with the observations stored as attributes to a networkx graph
# In[23]:
output = forward(1, obs_type = 'networkx', num_results=5)
#print observation type
print('Observations are of type:', type(output[0]['observations'][0]['observation']), end='\n\n')
for result in output:
observations = result['observations']
graph = result['base_graph']
sir = result['SIR_model']
#print sir values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(sir["beta"],3)}, gamma = {round(sir["gamma"],3)}')
#print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss["time"]}, ', end='')
print()
start = observations[0]
for i in range(1, len(observations)):
print(f'Predicting {observations[i]["time"]} is the same as {start["time"]} - ', end='')
eval_dict = graph_eval(observations[i]["observation"], start["observation"])
for key, value in eval_dict.items():
print(f'{key}: {round(value,3)}, ', end='')
print()
print()
# a FW1_2 dataset, with the observations stored in numpy arrays
# In[24]:
output = forward([1,2], obs_type = 'numpy', num_results=5)
#print observation type
print('Observations are of type:', type(output[0]['observations'][0]['observation']), end='\n\n')
for result in output:
observations = result['observations']
graph = result['base_graph']
sir = result['SIR_model']
#print sir values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(sir["beta"],3)}, gamma = {round(sir["gamma"],3)}')
#print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss["time"]}, ', end='')
print()
start = observations[0]
for i in range(1, len(observations)):
print(f'Predicting {observations[i]["time"]} is the same as {start["time"]} - ', end='')
eval_dict = graph_eval(observations[i]["observation"], start["observation"])
for key, value in eval_dict.items():
print(f'{key}: {round(value,3)}, ', end='')
print()
print()
# a BW1_4 dataset, with the observations stored as pytorch geometric data objects
# In[25]:
output = backward([1,4], obs_type = 'torch', num_results=5)
#print observation type
print('Observations are of type:', type(output[0]['observations'][0]['observation']), end='\n\n')
for result in output:
observations = result['observations']
graph = result['base_graph']
sir = result['SIR_model']
#print sir values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(sir["beta"],3)}, gamma = {round(sir["gamma"],3)}')
#print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss["time"]}, ', end='')
print()
start = observations[0]
for i in range(1, len(observations)):
print(f'Predicting {observations[i]["time"]} is the same as {start["time"]} - ', end='')
eval_dict = graph_eval(observations[i]["observation"], start["observation"])
for key, value in eval_dict.items():
print(f'{key}: {round(value,3)}, ', end='')
print()
print()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
androidtrackers/certified-android-devices | https://github.com/androidtrackers/certified-android-devices/blob/47d470ee2a5633a17c31c22c53641c62fcb18519/sync.py | sync.py | #!/usr/bin/env -S uv run --script
# /// script
# dependencies = [
# "requests<3",
# ]
# ///
"""Google certified android devices tracker"""
import difflib
import json
import sys
from datetime import date
from os import environ, system
from pathlib import Path
from time import sleep
from requests import get, post
GIT_OAUTH_TOKEN = environ.get("GIT_OAUTH_TOKEN_XFU", "")
BOT_TOKEN = environ.get("BOTTOKEN", "")
LOCAL_MODE = "--local" in sys.argv or not GIT_OAUTH_TOKEN or not BOT_TOKEN
if LOCAL_MODE:
print("Running in local mode - no GitHub or Telegram updates will be performed")
TODAY = str(date.today())
BY_DEVICE = {}
BY_MODEL = {}
BY_BRAND = {}
BY_NAME = {}
def add_device(brand, name, device, model):
"""add device to devices dict"""
try:
updated = BY_DEVICE[device] + [{"brand": brand, "name": name, "model": model}]
BY_DEVICE.update({device: updated})
except KeyError:
BY_DEVICE.update({device: [{"brand": brand, "name": name, "model": model}]})
def add_model(brand, name, device, model):
"""add device to models dict"""
try:
updated = BY_MODEL[model] + [{"brand": brand, "name": name, "device": device}]
BY_MODEL.update({model: updated})
except KeyError:
BY_MODEL.update({model: [{"brand": brand, "name": name, "device": device}]})
def add_brand(brand, name, device, model):
"""add device to brand dict"""
try:
updated = BY_BRAND[brand] + [{"device": device, "name": name, "model": model}]
BY_BRAND.update({brand: updated})
except KeyError:
BY_BRAND.update({brand: [{"device": device, "name": name, "model": model}]})
def add_name(brand, name, device, model):
"""add device to names dict"""
try:
updated = BY_NAME[name] + [{"brand": brand, "device": device, "model": model}]
BY_NAME.update({name: updated})
except KeyError:
BY_NAME.update({name: [{"brand": brand, "device": device, "model": model}]})
def save_data(data_list):
"""Save Data to various files"""
with Path("README.md").open("w", encoding="utf-8") as markdown:
markdown.write("# Google Play Certified Android devices\n")
markdown.write(
f"Last sync is {TODAY}\n\nhttps://support.google.com/googleplay/"
"answer/1727131?hl=en\n\n"
)
markdown.write("|Retail Branding|Marketing Name|Device|Model|\n")
markdown.write("|---|---|---|---|\n")
for line in data_list[1:]:
i = line.strip().replace('"', "").split(",")
try:
brand = i[0].strip()
name = i[1].strip()
device = i[2].strip()
model = i[3].strip()
markdown.write(f"|{brand}|{name}|{device}|{model}|\n")
add_device(brand, name, device, model)
add_model(brand, name, device, model)
add_brand(brand, name, device, model)
add_name(brand, name, device, model)
except IndexError:
pass
Path("by_device.json").write_bytes(
json.dumps(BY_DEVICE, indent=1, ensure_ascii=False).encode("utf-8")
)
Path("by_model.json").write_bytes(
json.dumps(BY_MODEL, indent=1, ensure_ascii=False).encode("utf-8")
)
Path("by_brand.json").write_bytes(
json.dumps(BY_BRAND, indent=1, ensure_ascii=False).encode("utf-8")
)
Path("by_name.json").write_bytes(
json.dumps(BY_NAME, indent=1, ensure_ascii=False).encode("utf-8")
)
def fetch():
"""
Download latest and convert to utf-8
"""
url = "http://storage.googleapis.com/play_public/supported_devices.csv"
response = get(url)
data = response.content.decode("utf-16")
data_list = list(data.split("\n"))
return data_list
def diff_files():
"""
diff old and new README files
"""
old_readme_path = Path("old.md")
new_readme_path = Path("README.md")
changes_path = Path("changes")
if not old_readme_path.exists() or not new_readme_path.exists():
return
with (
old_readme_path.open("r", encoding="utf-8") as old_file,
new_readme_path.open("r", encoding="utf-8") as new_file,
):
diff = difflib.unified_diff(
old_file.readlines(), new_file.readlines(), fromfile="old", tofile="new"
)
changes_path.write_text(
"".join(
[
line[1:]
for line in diff
if line.startswith("+") and not line.startswith("+++")
]
),
encoding="utf-8",
)
def post_to_tg():
"""
post new devices to telegram channel
"""
telegram_chat = "@CertifiedAndroidDevices"
changes_path = Path("changes")
if not changes_path.exists():
return
changes_content = changes_path.read_text(encoding="utf-8")
for line in changes_content.strip().splitlines():
if not line.startswith("|"): # Skip non-table lines if any
continue
parts = line.strip("|").split("|")
if len(parts) < 4:
print(f"Skipping malformed line: {line}")
continue
brand = parts[0].strip()
name = parts[1].strip()
codename = parts[2].strip()
model = parts[3].strip()
telegram_message = (
f"New certified device added:\n"
f"Brand: *{brand}*\n"
f"Name: *{name}*\n"
f"*Codename:* `{codename}`\n"
f"Model: *{model}*"
)
params = (
("chat_id", telegram_chat),
("text", telegram_message),
("parse_mode", "Markdown"),
("disable_web_page_preview", "yes"),
)
telegram_url = f"https://api.telegram.org/bot{BOT_TOKEN}/sendMessage"
telegram_req = post(telegram_url, params=params)
telegram_status = telegram_req.status_code
if telegram_status == 200:
print("{0}: Telegram Message sent".format(name))
else:
print("Telegram Error")
sleep(3)
def git_commit_push():
"""
git add - git commit - git push
"""
commit_message = f"[skip ci sync: {TODAY}"
push_url = f"https://{GIT_OAUTH_TOKEN}@github.com/androidtrackers/certified-android-devices.git HEAD:master"
system(
f'git add README.md *.json && git -c "user.name=XiaomiFirmwareUpdater" '
f'-c "user.email=xiaomifirmwareupdater@gmail.com" '
f'commit -m "{commit_message}" && git push -q {push_url}'
)
def main():
"""
certified-android-devices tracker
"""
readme_path = Path("README.md")
old_readme_path = Path("old.md")
if readme_path.exists():
if old_readme_path.exists():
old_readme_path.unlink()
readme_path.rename(old_readme_path)
data_list = fetch()
save_data(data_list)
diff_files()
if not LOCAL_MODE:
post_to_tg()
git_commit_push()
if __name__ == "__main__":
main()
| python | MIT | 47d470ee2a5633a17c31c22c53641c62fcb18519 | 2026-01-05T07:03:50.578463Z | false |
inigodelportillo/ITU-Rpy | https://github.com/inigodelportillo/ITU-Rpy/blob/e69587f75bdb7f8b1049259f36eb31a36ca5c570/setup.py | setup.py | """A setuptools based setup module for ITUR-py."""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open as open_codecs
from os import path
import itur
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open_codecs(path.join(here, "README.rst")) as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name="itur",
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=itur.__version__,
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description="A python implementation of the ITU-R P. Recommendations",
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url="https://github.com/inigodelportillo/ITU-Rpy",
# This should be your name or the name of the organization which owns the
# project.
author="Inigo del Portillo",
# This should be a valid email address corresponding to the author listed
# above.
author_email="inigo.del.portillo@gmail.com",
license="MIT",
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
"Intended Audience :: Telecommunications Industry",
"Topic :: Scientific/Engineering :: Physics",
# Pick your license as you wish
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords="atmopheric-propagation attenuation communications",
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=["contrib", "docs", "tests"]),
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["astropy", "scipy", "numpy", "pyproj"], # Optional
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
package_data={ # Optional
"itur": [
"LICENSE.txt",
"README.md",
"data/453/*.npz",
"data/530/*.npz",
"data/676/*.txt",
"data/836/*.npz",
"data/837/*.npz",
"data/839/*.npz",
"data/840/*.npz",
"data/1510/*.npz",
"data/1511/*.npz",
]
},
project_urls={ # Optional
"Bug Reports": "https://github.com/inigodelportillo/ITU-Rpy/issues",
"Source": "https://github.com/inigodelportillo/ITU-Rpy/",
},
)
| python | MIT | e69587f75bdb7f8b1049259f36eb31a36ca5c570 | 2026-01-05T07:12:38.084174Z | false |
inigodelportillo/ITU-Rpy | https://github.com/inigodelportillo/ITU-Rpy/blob/e69587f75bdb7f8b1049259f36eb31a36ca5c570/itur/plotting.py | itur/plotting.py | # -*- coding: utf-8 -*-
"""``itur.plotting`` provides convenient function to plot maps in ITU-Rpy.
This submodule uses ``matplotlib`` and ``cartopy`` as the default library to
plot maps. Alternatively, the user can use ``basemap`` (if installed).
The example below shows the use of ``plot_in_map`` to display the mean surface
temperature on the Earth.
.. code-block:: python
import itur
# Generate a regular grid of latitude and longitudes with 0.1 degree
# resolution.
lat, lon = itur.utils.regular_lat_lon_grid(resolution_lat=0.1,
resolution_lon=0.1)
# Compute the surface mean temperature
T = itur.models.itu1510.surface_mean_temperature(lat, lon)
# Display the results in a map (using cartopy)
ax = itur.plotting.plot_in_map(
T, lat, lon, cmap='jet', vmin=230, vmax=310,
cbar_text='Annual mean surface temperature [K]')
# Display the results in a map (using basemap)
ax = itur.plotting.plot_in_map_basemap(
T, lat, lon, cmap='jet', vmin=230, vmax=310,
cbar_text='Annual mean surface temperature [K]')
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
try:
import cartopy.crs as ccrs
import cartopy.feature as cpf
plotting_installed = True
except BaseException:
plotting_installed = False
def plot_in_map(data, lat=None, lon=None, lat_min=None, lat_max=None,
lon_min=None, lon_max=None, cbar_text='', ax=None,
figsize=(6, 4), **kwargs):
"""Plot the values in `data` in a map using ``cartopy``.
The map uses an PlateCarree projection. Either
{``lat``, ``lon``} or {``lat_min``, ``lat_max``, ``lon_min``, ``lon_max``}
need to be provided as inputs. This function requires that ``cartopy``
and ``matplotlib`` are installed.
Parameters
----------
data : np.ndarray
Data values to be plotted.
lat : np.ndarray
Matrix with the latitudes for each point in data (deg N)
lon : np.ndarray
Matrix with the longitudes for each point in data (deg E)
lat_min : float
Minimum latitude of the data (deg N)
lat_max : float
Maximum latitude of the data (deg N)
lon_min : float
Minimum longitude of the data (deg E)
lat_max : float
Maximum longitude of the data (deg E)
cbar_text : string
Colorbar text caption.
ax : Axes
matplotlib axes where the data will be plotted.
figsize : tuple
Dimensions of the Figure
**kwargs: dict
Key-value arguments that will be passed to the contourf function.
Returns
-------
ax : Axes
The matploltib axes object
"""
import matplotlib.pyplot as plt
if not plotting_installed:
raise RuntimeError('Neither cartopy nor matplotlib are installed. '
'Therefore plot_in_map cannot be used. '
'To use this function you need to install '
'the cartopy and matplotlib libraries')
if all([el is None for el in [lat, lon, lat_min, lon_min,
lat_max, lon_max]]):
raise ValueError('Either {{lat, lon}} or {{lat_min, lon_min, lat_max,'
'lon_max}} need to be provided')
elif lat is not None and lon is not None:
if not(np.shape(lat) == np.shape(lon) and
np.shape(lat) == np.shape(data)):
raise RuntimeError('Shape of latitude grid is not equal to shape'
'of longitude grid')
lat_max = np.max(lat)
lat_min = np.min(lat)
lon_max = np.max(lon)
lon_min = np.min(lon)
if ax is None:
fig = plt.figure(figsize=figsize)
proj = ccrs.PlateCarree(central_longitude=0.0)
ax = fig.add_subplot(111, projection=proj)
ax.set_extent([lon_min, lon_max, lat_min, lat_max],
crs=ccrs.PlateCarree())
ax.coastlines(color='grey', linewidth=0.8)
ax.add_feature(cpf.BORDERS, edgecolor='grey')
parallels = np.arange(-80, 81, 20)
meridians = np.arange(-180., 181., 30.)
ax.gridlines(xlocs=meridians, ylocs=parallels, draw_labels=True,
color='white', linestyle=':', linewidth=0.2)
im = ax.contourf(lon, lat, data, 100, transform=ccrs.PlateCarree(),
**kwargs)
cbar = fig.colorbar(im, orientation='horizontal', fraction=0.046, pad=0.04)
cbar.set_label(cbar_text)
fig.show()
return ax
def plot_in_map_basemap(data, lat=None, lon=None, lat_min=None,
lat_max=None, lon_min=None, lon_max=None,
cbar_text='', ax=None, figsize=(6, 4), **kwargs):
"""Plot the values in `data` in a map using ``basemap``.
The map uses an equidistant cylindrical projection. Either
{``lat``, ``lon``} or {``lat_min``, ``lat_max``, ``lon_min``, ``lon_max``}
to be provided as inputs. This function requires that ``basemap`` and
``matplotlib`` are installed.
Parameters
----------
data : np.ndarray
Data values to be plotted.
lat : np.ndarray
Matrix with the latitudes for each point in data (deg N)
lon : np.ndarray
Matrix with the longitudes for each point in data (deg E)
lat_min : float
Minimum latitude of the data (deg N)
lat_max : float
Maximum latitude of the data (deg N)
lon_min : float
Minimum longitude of the data (deg E)
lat_max : float
Maximum longitude of the data (deg E)
cbar_text : string
Colorbar text caption.
ax : Axes
matplotlib axes where the data will be plotted.
figsize : tuple
Dimensions of the Figure
**kwargs: dict
Key-value arguments that will be passed to the imshow function.
Returns
-------
m : Basemap
The map object generated by Basemap
"""
try:
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
except BaseException:
raise RuntimeError('Basemap is not installed and therefore '
'plot_in_map_basemap cannot be used. To use this '
'function you need to install the basemap library')
if all([el is None for el in [lat, lon, lat_min, lon_min,
lat_max, lon_max]]):
raise ValueError('Either {{lat, lon}} or {{lat_min, lon_min, lat_max,'
'lon_max}} need to be provided')
elif lat is not None and lon is not None:
if not(np.shape(lat) == np.shape(lon) and
np.shape(lat) == np.shape(data)):
raise RuntimeError('Shape of latitude grid is not equal to shape'
'of longitude grid')
lat_max = np.max(lat)
lat_min = np.min(lat)
lon_max = np.max(lon)
lon_min = np.min(lon)
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
m = Basemap(ax=ax, projection='cyl', llcrnrlat=lat_min,
urcrnrlat=lat_max, llcrnrlon=lon_min, urcrnrlon=lon_max,
resolution='l')
m.drawcoastlines(color='grey', linewidth=0.8)
m.drawcountries(color='grey', linewidth=0.8)
parallels = np.arange(-80, 81, 20)
m.drawparallels(parallels, labels=[1, 0, 0, 1], dashes=[2, 1],
linewidth=0.2, color='white')
meridians = np.arange(0., 360., 30.)
m.drawmeridians(meridians, labels=[1, 0, 0, 1], dashes=[2, 1],
linewidth=0.2, color='white')
im = m.imshow(np.flipud(data), **kwargs)
cbar = m.colorbar(im, location='bottom', pad="8%")
cbar.set_label(cbar_text)
return m
| python | MIT | e69587f75bdb7f8b1049259f36eb31a36ca5c570 | 2026-01-05T07:12:38.084174Z | false |
inigodelportillo/ITU-Rpy | https://github.com/inigodelportillo/ITU-Rpy/blob/e69587f75bdb7f8b1049259f36eb31a36ca5c570/itur/utils.py | itur/utils.py | # -*- coding: utf-8 -*-
"""
``itur.utils`` is a utilities library for ITU-Rpy.
This utility library for ITU-Rpy contains methods to:
* Load data and build an interpolator object.
* Prepare the input and output arrays, and handle unit transformations.
* Compute distances and elevation angles between two points on Earth and or space.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numbers
import numpy as np
from pyproj import Geod
from astropy import units as u
# Set the basepath for the module and the basepath for the data
dir_path = os.path.dirname(os.path.realpath(__file__))
dataset_dir = os.path.join(dir_path, 'data/')
# Define numeric types including numpy types
__NUMERIC_TYPES__ = [numbers.Number, int, float, complex,
np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64]
# Define the geodetic system using the WSG-84 ellipsoid
__wgs84_geod__ = Geod(ellps='WGS84')
# A very small quantity used to avoid log(0) errors.
EPSILON = 1e-9
def load_data_interpolator(path_lat, path_lon, path_data, interp_fcn,
flip_ud=True):
"""Load a lat-lon tabulated dataset and build an interpolator.
Parameters
----------
path_lat : string
Path for the file containing the latitude values
path_lon : string
Path for the file containing the longitude values
path_data : string
Path for the file containing the data values
interp_fcn : string
The interpolation function to be used
flip_ud : boolean
Whether to flip the latitude and data arrays along the first axis. This
is an artifact of the format that the ITU uses to encode its data,
which is inconsistent across recommendations (in some recommendations,
latitude are sorted in ascending order, in others they are sorted in
descending order).
Returns
-------
interp: interp_fcn
An interpolator that given a latitude-longitude pair, returns the
data value
"""
vals = load_data(os.path.join(dataset_dir, path_data))
lats = load_data(os.path.join(dataset_dir, path_lat))
lons = load_data(os.path.join(dataset_dir, path_lon))
if flip_ud:
return interp_fcn(np.flipud(lats), lons, np.flipud(vals))
else:
return interp_fcn(lats, lons, vals)
def load_data(path, is_text=False, **kwargs):
"""Load data files from `./itur/data/`.
Loads data from a comma-separated values file. The contents of the file
can be numeric or text-based.
Parameters
----------
path : string
Path of the data to load
is_text : bool
Indicates whether the data is text (`True`) or numerical (`False`).
Default value is `False`.
Returns
-------
data: numpy.ndarray
Numpy-array with the data. Numerical data is returned as a float
"""
# TODO: Change method to allow for h5df data too
if not os.path.isfile(path):
raise RuntimeError(f"The path provided is not a file - {path}")
_, file_extension = os.path.splitext(path)
if file_extension == '.npz':
data = np.load(path)['arr_0']
elif file_extension == '.npy':
data = np.load(path)
elif file_extension == '.txt':
if is_text:
data = np.loadtxt(path, dtype=np.string_, delimiter=',', **kwargs)
else:
data = np.genfromtxt(path, dtype=float, delimiter=',', **kwargs)
return data
def get_input_type(inpt):
"""Return the type of the input.
If the input is an object of type Quantity, it returns the type of the
associated value
Parameters
----------
inpt : object
The input object.
Returns
-------
type: type
The type of the input.
"""
if isinstance(inpt, u.Quantity):
return type(inpt.value)
else:
return type(inpt)
def prepare_input_array(input_array):
"""Format an array to be a 2-D numpy-array.
If the contents of `input_array` are 0-D or 1-D, it converts is to an
array with at least two dimensions.
Parameters
----------
input_array : numpy.ndarray, sequence, or number
The input value. It can be a scalar, 1-D array, or 2-D array.
Returns
-------
output_array : numpy.ndarray
An 2-D numpy array with the input values
"""
if input_array is None:
return None
return np.atleast_2d(input_array)
def prepare_output_array(output_array, type_input=None):
"""Format the output to have the same shape and type as the input.
This function is a generic wrapper to format the output of a function
to have the same type as the input. ITU-Rpy makes extensive use of numpy
arrays, but uses this function to return outputs having the same type
that was provided in the input of the function.
"""
# First, differentiate between the units and the value of the output_array
# since the rest of the funcion is mainly focused on casting the value
# of the output_array to the type in type_input
if isinstance(output_array, u.Quantity):
value = output_array.value
unit = output_array.unit
else:
value = output_array
unit = None
# Squeeze output array to remove singleton dimensions
if isinstance(value, np.ndarray) or isinstance(value, list):
value = np.array(value).squeeze()
type_output = get_input_type(output_array)
# First, cast the output_array to the same type of the input
# Check if the output array is a 0-D number and cast it to a float
if (type_input in __NUMERIC_TYPES__ and
(type_output in __NUMERIC_TYPES__) or
((isinstance(output_array, np.ndarray) and output_array.size == 1) or
(not type_output not in __NUMERIC_TYPES__ and
len(output_array) == 1))):
value = float(value)
# Check if the input array was a list and conver appropriately
elif type_input is list:
if isinstance(value, np.ndarray):
value = value.tolist()
else:
value = list(value)
# Otherwise, we assume that the value already has the required type
else:
value = value
# Add the units of the
if unit is not None:
return value * unit
else:
return value
def prepare_quantity(value, units=None, name_val=None):
"""Convert the input to the required units.
The function verifies that the input has the right units and converts
it to the desired units. For example, if a value is introduced in km
but posterior frequencies require this value to be in meters, this
function would be called with `units=u.m`
Parameters
----------
value : astropy.units.Quantity, number, sequence, or np.ndarry
The input value
units : astropy.units
Desired units of the output
name_val : string
Name of the variable (for debugging purposes)
Returns
-------
q : numpy.ndarray
An numpy array with the values converted to the desired units.
"""
if value is None:
return None
# If the units of the value are a temperature
if isinstance(value, u.Quantity):
if value.unit == units:
return value.value
elif units in [u.K, u.deg_C, u.Kelvin, u.Celsius, u.imperial.deg_F]:
return value.to(units, equivalencies=u.temperature()).value
else:
return value.to(units).value
# Process numbers
elif isinstance(value, numbers.Number) and units is not None:
return value
# Process arrays and tuples
elif isinstance(value, np.ndarray) and units is not None:
return value
elif isinstance(value, list) and units is not None:
return np.array([prepare_quantity(v, units, name_val) for v in value])
elif isinstance(value, tuple) and units is not None:
return np.array([prepare_quantity(v, units, name_val) for v in value])
else:
raise ValueError('%s has not the correct format. It must be a value,'
'sequence, array, or a Quantity with %s units' %
(name_val, str(units)))
def compute_distance_earth_to_earth(lat_p, lon_p, lat_grid, lon_grid,
method=None):
"""
Compute the distance between a point and a matrix of (lat, lons).
If the number of elements in `lat_grid` is smaller than 100,000, uses the
WGS84 method, otherwise, uses the Haversine formula.
Parameters
----------
lat_p : number
Latitude projection of the point P (degrees)
lon_p : number
Longitude projection of the point P (degrees)
lat_grid : number, sequence of np.ndarray
Grid of latitude points to which compute the distance (degrees)
lon_grid : number, sequence of np.ndarray
Grid of longitude points to which compute the distance (degrees)
Returns
-------
d : numpy.ndarray
Distance between the point P and each point in (lat_grid, lon_grid)
(km)
"""
if ((method == 'WGS84' and not(method is not None)) or
(type(lat_p) in __NUMERIC_TYPES__) or
(type(lat_grid) in __NUMERIC_TYPES__) or
(len(lat_grid) < 10000) or
(isinstance(lat_grid, np.ndarray) and lat_grid.size < 1e5)):
return compute_distance_earth_to_earth_wgs84(
lat_p, lon_p, lat_grid, lon_grid)
else:
return compute_distance_earth_to_earth_haversine(
lat_p, lon_p, lat_grid, lon_grid)
def compute_distance_earth_to_earth_wgs84(lat_p, lon_p, lat_grid, lon_grid):
"""Compute the distance between points using the WGS84 inverse method.
Compute the distance between a point (P) in (`lat_p`, `lon_p`) and a matrix
of latitude and longitudes (`lat_grid`, `lon_grid`) using the WGS84 inverse
method.
Parameters
----------
lat_p : number
Latitude projection of the point P (degrees)
lon_p : number
Longitude projection of the point P (degrees)
lat_grid : number, sequence of np.ndarray
Grid of latitude points to which compute the distance (degrees)
lon_grid : number, sequence of np.ndarray
Grid of longitude points to which compute the distance (degrees)
Returns
-------
d : numpy.ndarray
Distance between the point P and each point in (lat_grid, lon_grid)
(km)
"""
lat_p = lat_p * np.ones_like(lat_grid)
lon_p = lon_p * np.ones_like(lon_grid)
_a, _b, d = __wgs84_geod__.inv(lon_p, lat_p, lon_grid, lat_grid)
return d / 1e3
def compute_distance_earth_to_earth_haversine(lat_p, lon_p,
lat_grid, lon_grid):
"""Compute the distance between points using the Haversine formula.
Compute the distance between a point (P) in (`lat_s`, `lon_s`) and a matrix
of latitude and longitudes (`lat_grid`, `lon_grid`) using the Haversine
formula.
Parameters
----------
lat_p : number
Latitude projection of the point P (degrees)
lon_p : number
Longitude projection of the point P (degrees)
lat_grid : number, sequence of np.ndarray
Grid of latitude points to which compute the distance (degrees)
lon_grid : number, sequence of np.ndarray
Grid of longitude points to which compute the distance (degrees)
Returns
-------
d : numpy.ndarray
Distance between the point P and each point in (lat_grid, lon_grid)
(km)
References
----------
This is based on the Haversine formula
"""
RE = 6371.0 # Radius of the Earth, km
lat1 = np.deg2rad(lat_grid)
lat2 = np.deg2rad(lat_p)
lon1 = np.deg2rad(lon_grid)
lon2 = np.deg2rad(lon_p)
dlat = lat2 - lat1
dlon = lon2 - lon1
# Compute the distance
a = np.clip((np.sin(dlat / 2.0))**2 + np.cos(lat1) * np.cos(lat2) *
(np.sin(dlon / 2))**2, -1, 1)
c = 2 * np.arcsin(np.sqrt(a))
d = RE * c
return d
def regular_lat_lon_grid(resolution_lat=1, resolution_lon=1, lon_start_0=False,
lat_min=-90, lat_max=90, lon_min=-180, lon_max=180):
"""
Build regular latitude and longitude matrices.
Builds a latitude and longitude coordinate matrix with resolution
`resolution_lat`, `resolution_lon`.
Parameters
----------
resolution_lat: number
Resolution for the latitude axis (deg)
resolution_lon: number
Resolution for the longitude axis (deg)
lon_start_0: boolean
Indicates whether the longitude is indexed using a 0 - 360 scale (True)
or using -180 - 180 scale (False). Default value is False
Returns
-------
lat: numpy.ndarray
Grid of coordinates of the latitude point
lon: numpy.ndarray
Grid of coordinates of the longitude point
"""
if lon_start_0:
lon, lat = np.meshgrid(np.arange(lon_min + 180.0, lon_max + 180.0,
resolution_lon),
np.arange(lat_max, lat_min, - resolution_lat))
else:
lon, lat = np.meshgrid(np.arange(lon_min, lon_max, resolution_lon),
np.arange(lat_max, lat_min, - resolution_lat))
return lat, lon
def elevation_angle(h, lat_s, lon_s, lat_grid, lon_grid):
"""
Compute the elevation angle between a satellite and a point on Earth.
Compute the elevation angle between a satellite located in an orbit
at height h and located above coordinates (`lat_s`, `lon_s`) and a matrix
of latitude and longitudes (`lat_grid`, `lon_grid`).
Parameters
----------
h : float
Orbital altitude of the satellite (km)
lat_s : float
Latitude of the projection of the satellite (degrees)
lon_s : float
Longitude of the projection of the satellite (degrees)
lat_grid : number, sequence of np.ndarray
Grid of latitude points to which compute the elevation angle (degrees)
lon_grid : number, sequence of np.ndarray
Grid of longitude points to which compute the elevation angle (degrees)
Returns
-------
elevation : numpy.ndarray
Elevation angle between the satellite and each point in
(lat_grid, lon_grid) (degrees)
References
----------
[1] http://www.propagation.gatech.edu/ECE6390/notes/ASD5.pdf - Slides 3, 4
"""
h = prepare_quantity(h, u.km, name_val='Orbital altitude of the satellite')
RE = 6371.0 # Radius of the Earth (km)
rs = RE + h
# Transform latitude_longitude values to radians
lat1 = np.deg2rad(lat_grid)
lat2 = np.deg2rad(lat_s)
lon1 = np.deg2rad(lon_grid)
lon2 = np.deg2rad(lon_s)
# Compute the elevation angle as described in
gamma = np.arccos(
np.clip(np.sin(lat2) * np.sin(lat1) +
np.cos(lat1) * np.cos(lat2) * np.cos(lon2 - lon1), -1, 1))
elevation = np.arccos(np.sin(gamma) /
np.sqrt(1 + (RE / rs)**2 -
2 * (RE / rs) * np.cos(gamma))) # In radians
return np.rad2deg(elevation)
| python | MIT | e69587f75bdb7f8b1049259f36eb31a36ca5c570 | 2026-01-05T07:12:38.084174Z | false |
inigodelportillo/ITU-Rpy | https://github.com/inigodelportillo/ITU-Rpy/blob/e69587f75bdb7f8b1049259f36eb31a36ca5c570/itur/__init__.py | itur/__init__.py | # -*- coding: utf-8 -*-
"""
ITU-RPy is a python implementation of the ITU-P R Recommendations.
ITU-Rpy can be used to compute atmospheric attenuation for Earth-to-space
and horizontal paths, for frequencies in the GHz range.
The propagation loss on an Earth-space path and a horizontal-path, relative to
the free-space loss, is the sum of different contributions, namely:
* attenuation by atmospheric gases;
* attenuation by rain, other precipitation and clouds;
* scintillation and multipath effects;
* attenuation by sand and dust storms.
Each of these contributions has its own characteristics as a function of
frequency, geographic location and elevation angle. ITU-Rpy allows for fast,
vectorial computation of the different contributions to the atmospheric
attenuation.
"""
from __future__ import absolute_import, division, print_function
__all__ = ["utils", "plotting"]
import warnings
import astropy.units as u
import numpy as np
import itur.plotting
import itur.utils
from .__version__ import __version__
from .models.itu618 import rain_attenuation, scintillation_attenuation
from .models.itu676 import (
gaseous_attenuation_inclined_path,
gaseous_attenuation_slant_path,
gaseous_attenuation_terrestrial_path,
)
from .models.itu835 import standard_pressure
from .models.itu836 import surface_water_vapour_density, total_water_vapour_content
from .models.itu840 import cloud_attenuation
from .models.itu1510 import surface_mean_temperature
from .models.itu1511 import topographic_altitude
# Ignore divide by zero errors
np.seterr(divide="ignore")
AUTHORS = "Inigo del Portillo"
__all__ = ["atmospheric_attenuation_slant_path"]
def atmospheric_attenuation_slant_path(
lat,
lon,
f,
el,
p,
D,
hs=None,
rho=None,
R001=None,
eta=0.5,
T=None,
H=None,
P=None,
hL=1e3,
Ls=None,
tau=45,
V_t=None,
mode="approx",
return_contributions=False,
include_rain=True,
include_gas=True,
include_scintillation=True,
include_clouds=True,
):
"""
Calculate long-term atmospheric attenuation statistics for slant paths.
This function provides estimates of the long-term statistics of
the slant-path atmospheric attenuation at a given location, for
frequencies up to 55 GHz and percentages of time 0.001% < `p` < 50%.
The model used is based on the guidelines provided in Section 2 of
ITU-R P.618. If optional values are not provided they will be
automatically computed using the procedures described in other ITU-R P.
recommendations.
Parameters
----------
lat : number, sequence, or numpy.ndarray
Latitudes of the receiver points
lon : number, sequence, or numpy.ndarray
Longitudes of the receiver points
f : number or Quantity
Frequency (GHz)
el : sequence, number or Quantity
Elevation angle (degrees)
p : number
Percentage of the time the rain attenuation value is exceeded.
D: number or Quantity
Physical diameter of the earth-station antenna (m)
hs : number, sequence, or numpy.ndarray, optional
Height above mean sea level of the earth station (km). If local data for
the earth station height above mean sea level is not available, an
estimate is obtained from the maps of topographic altitude
given in Recommendation ITU-R P.1511.
rho : number or Quantity, optional
Water vapor density (g/m3). If not provided, an estimate is obtained
from Recommendation Recommendation ITU-R P.836.
R001: number or Quantity, optional
Point rainfall rate for the location for 0.01% of an average year \
(mm/h). If not provided, an estimate is obtained from Recommendation
ITU-R P.837. Some useful values:
* 0.25 mm/h : Drizzle
* 2.5 mm/h : Light rain
* 12.5 mm/h : Medium rain
* 25.0 mm/h : Heavy rain
* 50.0 mm/h : Downpour
* 100 mm/h : Tropical
* 150 mm/h : Monsoon
eta: number, optional
Antenna efficiency. Default value 0.5 (conservative estimate)
T: number, sequence, or numpy.ndarray, optional
Average surface ambient temperature (°C) at the site. If None, uses the
ITU-R P.1510 to estimate the surface ambient temperature.
H: number, sequence, or numpy.ndarray, optional
Average surface relative humidity (%) at the site. If None, uses the
ITU-R P.836 to estimate the wet term of the surface relative humidity.
P: number, sequence, or numpy.ndarray, optional
Average surface pressure (hPa) at the site. If None, uses the
ITU-R P.835 to estimate the average surface pressure.
hL : number, optional
Height of the turbulent layer (m). Default value 1000 m
Ls :number, optional
Slant path length (km). If not provided, it will be computed using the
rain height and the elevation angle. The ITU model does not require
this parameter as an input.
tau : number, optional
Polarization tilt angle relative to the horizontal (degrees)
(tau = 45 deg for circular polarization). Default value is 45
V_t : number or Quantity, optional
Integrated water vapour content along the path (kg/m2 or mm).
If not provided this value is estimated using Recommendation
ITU-R P.836. Default value None
mode : string, optional
Mode for the calculation of gaseous attenuation. Valid values are
'approx', 'exact'. If 'approx' Uses the method in Annex 2 of
Recommendation ITU-R P.676, else uses the method described in
Section 1. Default, 'approx'
return_contributions: bool, optional
Determines whether individual contributions from gases, rain, clouds
and scintillation are returned in addition to the total attenuation
(True), or just the total atmospheric attenuation (False).
Default is False
include_rain: bool, optional
Determines whether to include the rain contribution in the total
atmospheric attenuation calculation or not. Default is True
include_gas: bool, optional
Determines whether to include the gaseous contribution in the total
atmospheric attenuation calculation or not. Default is True
include_scintillation: bool, optional
Determines whether to include the scintillation contribution in the
total atmospheric attenuation calculation or not. Default is True
include_clouds: bool, optional
Determines whether to include the clouds contribution in the total
atmospheric attenuation calculation or not. Default is True
Returns
-------
A : Quantity
Total atmospheric attenuation (dB)
Ag, Ac, Ar, As, A : tuple
Gaseous, Cloud, Rain, Scintillation contributions to total attenuation,
and total attenuation (dB)
References
----------
[1] Propagation data and prediction methods required for the design of
Earth-space telecommunication systems:
https://www.itu.int/dms_pubrec/itu-r/rec/p/R-REC-P.618-12-201507-I!!PDF-E.pdf
"""
if np.logical_or(p < 0.001, p > 50).any():
warnings.warn(
RuntimeWarning(
"The method to compute the total "
"atmospheric attenuation in recommendation ITU-P 618-13 "
"is only recommended for unavailabilities (p) between "
"0.001% and 50 %"
)
)
# This takes account of the fact that a large part of the cloud attenuation
# and gaseous attenuation is already included in the rain attenuation
# prediction for time percentages below 1%. Eq. 64 and Eq. 65 in
# Recommendation ITU 618-12
p_c_g = np.maximum(1, p)
# Estimate the ground station altitude
if hs is None:
hs = topographic_altitude(lat, lon)
# Surface mean temperature
if T is None:
T = surface_mean_temperature(lat, lon)
# Estimate the surface Pressure
if P is None:
P = standard_pressure(hs)
# Estimate the surface Pressure
if V_t is None:
V_t = total_water_vapour_content(lat, lon, p_c_g, hs)
# Estimate the surface water vapour density
if rho is None:
rho = surface_water_vapour_density(lat, lon, p_c_g, hs)
# Compute the attenuation components
if include_rain:
Ar = rain_attenuation(lat, lon, f, el, hs, p, R001, tau, Ls)
else:
Ar = 0 * u.dB
if include_gas:
Ag = gaseous_attenuation_slant_path(f, el, rho, P, T, V_t, hs, mode)
else:
Ag = 0 * u.dB
if include_clouds:
Ac = cloud_attenuation(lat, lon, el, f, p_c_g)
else:
Ac = 0 * u.dB
if include_scintillation:
As = scintillation_attenuation(lat, lon, f, el, p, D, eta, T, H, P, hL)
else:
As = 0 * u.dB
# Compute the total attenuation according to
A = Ag + np.sqrt((Ar + Ac) ** 2 + As ** 2)
if return_contributions:
return Ag, Ac, Ar, As, A
else:
return A
| python | MIT | e69587f75bdb7f8b1049259f36eb31a36ca5c570 | 2026-01-05T07:12:38.084174Z | false |
inigodelportillo/ITU-Rpy | https://github.com/inigodelportillo/ITU-Rpy/blob/e69587f75bdb7f8b1049259f36eb31a36ca5c570/itur/__version__.py | itur/__version__.py | # -*- coding: utf-8 -*-
"""Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.4.0"
| python | MIT | e69587f75bdb7f8b1049259f36eb31a36ca5c570 | 2026-01-05T07:12:38.084174Z | false |
inigodelportillo/ITU-Rpy | https://github.com/inigodelportillo/ITU-Rpy/blob/e69587f75bdb7f8b1049259f36eb31a36ca5c570/itur/models/itu838.py | itur/models/itu838.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from astropy import units as u
from itur.utils import prepare_quantity
class __ITU838__():
"""Specific attenuation model for rain for use in prediction methods
Available versions include:
* P.838-0 (03/92) (Superseded)
* P.838-1 (10/99) (Superseded)
* P.838-2 (04/03) (Superseded)
* P.838-3 (03/05) (Current version)
"""
# This is an abstract class that contains an instance to a version of the
# ITU-R P.838 recommendation.
def __init__(self, version=3):
if version == 3:
self.instance = _ITU838_3_()
elif version == 2:
self.instance = _ITU838_2_()
elif version == 1:
self.instance = _ITU838_1_()
elif version == 0:
self.instance = _ITU838_0_()
else:
raise ValueError(
'Version ' +
str(version) +
' is not implemented' +
' for the ITU-R P.838 model.')
@property
def __version__(self):
return self.instance.__version__
def rain_specific_attenuation_coefficients(self, f, el, tau):
# Abstract method to compute the rain height
fcn = np.vectorize(self.instance.rain_specific_attenuation_coefficients,
excluded=[1], otypes=[np.ndarray])
return np.array(fcn(f, el, tau).tolist())
def rain_specific_attenuation(self, R, f, el, tau):
# Abstract method to compute the zero isoterm height
k, alpha = self.rain_specific_attenuation_coefficients(f, el, tau)
return k * R**alpha
class _ITU838_3_():
def __init__(self):
self.__version__ = 3
self.year = 2005
self.month = 3
self.link = 'https://www.itu.int/rec/R-REC-P.838-3-200503-I/en'
@staticmethod
def rain_specific_attenuation_coefficients(f, el, tau):
"""
"""
kh = {'aj': [-5.33980, -0.35351, -0.23789, -0.94158],
'bj': [-0.10008, 1.2697, 0.86036, 0.64552],
'cj': [1.13098, 0.454, 0.15354, 0.16817],
'mk': -0.18961,
'ck': 0.71147}
kv = {'aj': [-3.80595, -3.44965, -0.39902, 0.50167],
'bj': [0.56934, -0.22911, 0.73042, 1.07319],
'cj': [0.81061, 0.51059, 0.11899, 0.27195],
'mk': -0.16398,
'ck': 0.63297}
alphah = {'aj': [-0.14318, 0.29591, 0.32177, -5.37610, 16.1721],
'bj': [1.82442, 0.77564, 0.63773, -0.96230, -3.29980],
'cj': [-0.55187, 0.19822, 0.13164, 1.47828, 3.4399],
'ma': 0.67849,
'ca': -1.95537}
alphav = {'aj': [-0.07771, 0.56727, -0.20238, -48.2991, 48.5833],
'bj': [2.3384, 0.95545, 1.1452, 0.791669, 0.791459],
'cj': [-0.76284, 0.54039, 0.26809, 0.116226, 0.116479],
'ma': -0.053739,
'ca': 0.83433}
def curve_fcn(f, a, b, c):
return (a * np.exp(-((np.log10(f) - b) / c)**2))
KH = np.power(10, sum([curve_fcn(f, kh['aj'][j], kh['bj'][j], kh['cj'][j])
for j in range(4)]) + kh['mk'] * np.log10(f) + kh['ck'])
KV = np.power(10, sum([curve_fcn(f, kv['aj'][j], kv['bj'][j], kv['cj'][j])
for j in range(4)]) + kv['mk'] * np.log10(f) + kv['ck'])
alphaH = sum([curve_fcn(f, alphah['aj'][j], alphah['bj'][j], alphah['cj'][j])
for j in range(5)]) + alphah['ma'] * np.log10(f) + alphah['ca']
alphaV = sum([curve_fcn(f, alphav['aj'][j], alphav['bj'][j], alphav['cj'][j])
for j in range(5)]) + alphav['ma'] * np.log10(f) + alphav['ca']
k = (KH + KV + (KH - KV) * np.cos(np.deg2rad(el))**2
* np.cos(np.deg2rad(2 * tau))) / 2.0
alpha = (KH * alphaH + KV * alphaV + (KH * alphaH - KV * alphaV) *
np.cos(np.deg2rad(el))**2 * np.cos(np.deg2rad(2 * tau))) / (2.0 * k)
return k, alpha
class _ITU838_2_():
def __init__(self):
self.__version__ = 2
self.year = 2003
self.month = 4
self.link = 'https://www.itu.int/rec/R-REC-P.838-2-200304-S/en'
@staticmethod
def rain_specific_attenuation_coefficients(f, el, tau):
"""
"""
kh = {'aj': [0.3364, 0.7520, -0.9466],
'bj': [1.1274, 1.6644, 2.8496],
'cj': [0.2916, 0.5175, 0.4315],
'mk': 1.9925,
'ck': -4.4123}
kv = {'aj': [0.3023, 0.7790, -1.0022],
'bj': [1.1402, 1.6723, 2.9400],
'cj': [0.2826, 0.5694, 0.4823],
'mk': 1.9710,
'ck': -4.4535}
alphah = {'aj': [0.5564, 0.2237, -0.1961, -0.02219],
'bj': [0.7741, 1.4023, 0.5769, 2.2959],
'cj': [0.4011, 0.3475, 0.2372, 0.2801],
'ma': -0.08016,
'ca': 0.8993}
alphav = {'aj': [0.5463, 0.2158, -0.1693, -0.01895],
'bj': [0.8017, 1.4080, 0.6353, 2.3105],
'cj': [0.3657, 0.3636, 0.2155, 0.2938],
'ma': -0.07059,
'ca': 0.8756}
def curve_fcn(f, a, b, c):
return a * np.exp(-((np.log10(f) - b) / c)**2)
KH = np.power(10, sum([curve_fcn(f, kh['aj'][j], kh['bj'][j], kh['cj'][j])
for j in range(3)]) + kh['mk'] * np.log10(f) + kh['ck'])
KV = np.power(10, sum([curve_fcn(f, kv['aj'][j], kv['bj'][j], kv['cj'][j])
for j in range(3)]) + kv['mk'] * np.log10(f) + kv['ck'])
alphaH = sum([curve_fcn(f, alphah['aj'][j], alphah['bj'][j], alphah['cj'][j])
for j in range(4)]) + alphah['ma'] * np.log10(f) + alphah['ca']
alphaV = sum([curve_fcn(f, alphav['aj'][j], alphav['bj'][j], alphav['cj'][j])
for j in range(4)]) + alphav['ma'] * np.log10(f) + alphav['ca']
k = (KH + KV + (KH - KV) * np.cos(np.deg2rad(el))**2
* np.cos(np.deg2rad(2 * tau))) / 2.0
alpha = (KH * alphaH + KV * alphaV + (KH * alphaH - KV * alphaV) *
np.cos(np.deg2rad(el))**2 *
np.cos(np.deg2rad(2 * tau))) / (2.0 * k)
return k, alpha
class _ITU838_1_():
def __init__(self):
self.__version__ = 1
self.year = 1999
self.month = 10
self.link = 'https://www.itu.int/rec/R-REC-P.838-1-199910-S/en'
@staticmethod
def rain_specific_attenuation_coefficients(f, el, tau):
"""
The frequency-dependent coefficients k and α are given in Table 1 for
linear polarizations (horizontal: H, vertical: V) and horizontal paths.
Values of k and α at frequencies other than those in Table 1 can be
obtained by interpolator using a logarithmic scale for frequency,
a logarithmic scale for k and a linear scale for α.
The values in Table 1 have been tested and found to be sufficiently
accurate for attenuation predictions up to frequencies of 55 GHz.
"""
_f = [1, 2, 4, 6, 7, 8, 10, 12, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70,
80, 90, 100, 120, 150, 200, 300, 400]
_kH = [0.0000387, 0.000154, 0.00065, 0.00175, 0.00301, 0.00454, 0.0101,
0.0188, 0.0367, 0.0751, 0.124, 0.187, 0.263, 0.35, 0.442, 0.536,
0.707, 0.851, 0.975, 1.06, 1.12, 1.18, 1.31, 1.45, 1.36, 1.32]
_kV = [0.0000352, 0.000138, 0.000591, 0.00155, 0.00265, 0.00395,
0.00887, 0.0168, 0.0335, 0.0691, 0.113, 0.167, 0.233, 0.31,
0.393, 0.479, 0.642, 0.784, 0.906, 0.999, 1.06, 1.13, 1.27,
1.42, 1.35, 1.31]
_alphaH = [0.912, 0.963, 1.121, 1.308, 1.332, 1.327, 1.276, 1.217,
1.154, 1.099, 1.061, 1.021, 0.979, 0.939, 0.903, 0.873,
0.826, 0.793, 0.769, 0.753, 0.743, 0.731, 0.71, 0.689,
0.688, 0.683]
_alphaV = [0.88, 0.923, 1.075, 1.265, 1.312, 1.31, 1.264, 1.2, 1.128,
1.065, 1.03, 1, 0.963, 0.929, 0.897, 0.868, 0.824, 0.793,
0.769, 0.754, 0.744, 0.732, 0.711, 0.69, 0.689, 0.684]
KH = np.exp(np.interp(np.log(f), np.log(_f), np.log(_kH)))
KV = np.exp(np.interp(np.log(f), np.log(_f), np.log(_kV)))
alphaH = np.interp(np.log(f), np.log(_f), _alphaH)
alphaV = np.interp(np.log(f), np.log(_f), _alphaV)
k = (KH + KV + (KH - KV) * np.cos(np.deg2rad(el))**2 *
np.cos(np.deg2rad(2 * tau))) / 2.0
alpha = (KH * alphaH + KV * alphaV + (KH * alphaH - KV * alphaV) *
np.cos(np.deg2rad(el))**2 *
np.cos(np.deg2rad(2 * tau))) / (2.0 * k)
return k, alpha
class _ITU838_0_():
def __init__(self):
self.__version__ = 0
self.year = 1992
self.month = 8
self.link = 'https://www.itu.int/rec/R-REC-P.838-0-199203-S/en'
@staticmethod
def rain_specific_attenuation_coefficients(*args, **kwargs):
return _ITU838_1_.rain_specific_attenuation_coefficients(*args,
**kwargs)
__model = __ITU838__()
def change_version(new_version):
"""
Change the version of the ITU-R P.838 recommendation currently being used.
This function changes the model used for the ITU-R P.838 recommendation
to a different version.
Parameters
----------
new_version : int
Number of the version to use.
Valid values are:
* 3: Activates recommendation ITU-R P.838-3 (03/05) (Current version)
* 2: Activates recommendation ITU-R P.838-2 (04/03) (Superseded)
* 1: Activates recommendation ITU-R P.838-1 (10/99) (Superseded)
* 0: Activates recommendation ITU-R P.838-0 (03/92) (Superseded)
"""
global __model
__model = __ITU838__(new_version)
def get_version():
"""
Obtain the version of the ITU-R P.838 recommendation currently being used.
Returns
-------
version: int
Version currently being used.
"""
return __model.__version__
def rain_specific_attenuation_coefficients(f, el, tau):
"""Compute the values for the coefficients k and α.
A method to compute the values for the coefficients k and α to compute
the rain specific attenuation :math:`\\gamma_R` (dB/km) (dB/km)
Parameters
----------
f : number or Quantity
Frequency (GHz)
el : number, sequence, or numpy.ndarray
Elevation angle of the receiver points
tau : number, sequence, or numpy.ndarray
Polarization tilt angle relative to the horizontal (degrees). Tau = 45
deg for circular polarization)
Returns
-------
k: number
Coefficient k (non-dimensional)
α: number
Coefficient α (non-dimensional)
References
----------
[1] Rain height model for prediction methods:
https://www.itu.int/rec/R-REC-P.838/en
"""
f = prepare_quantity(f, u.GHz, 'Frequency')
return __model.rain_specific_attenuation_coefficients(f, el, tau)
def rain_specific_attenuation(R, f, el, tau):
"""Compute the specific attenuation γ_R (dB/km) given the rainfall rate.
A method to compute the specific attenuation γ_R (dB/km) from rain. The
value is obtained from the rainfall rate R (mm/h) using a power law
relationship.
.. math::
\\gamma_R = k R^\\alpha
Parameters
----------
R : number, sequence, numpy.ndarray or Quantity
Rain rate (mm/h)
f : number or Quantity
Frequency (GHz)
el : number, sequence, or numpy.ndarray
Elevation angle of the receiver points
tau : number, sequence, or numpy.ndarray
Polarization tilt angle relative to the horizontal (degrees). Tau = 45
deg for circular polarization)
Returns
-------
γ_R: numpy.ndarray
Specific attenuation from rain (dB/km)
References
----------
[1] Rain height model for prediction methods:
https://www.itu.int/rec/R-REC-P.838/en
"""
R = prepare_quantity(R, u.mm / u.hr, 'Rain rate')
f = prepare_quantity(f, u.GHz, 'Frequency')
return __model.rain_specific_attenuation(R, f, el, tau) * u.dB / u.km
| python | MIT | e69587f75bdb7f8b1049259f36eb31a36ca5c570 | 2026-01-05T07:12:38.084174Z | false |
inigodelportillo/ITU-Rpy | https://github.com/inigodelportillo/ITU-Rpy/blob/e69587f75bdb7f8b1049259f36eb31a36ca5c570/itur/models/itu1144.py | itur/models/itu1144.py | # -*- coding: utf-8 -*-
"""
Interpolation methods for the geophysical properties used to compute
propagation effects. These methods are based on those in Recommendation
ITU-R P.1144-7.
References
--------
[1] Guide to the application of the propagation methods of Radiocommunication
Study Group 3: https://www.itu.int/rec/R-REC-P.1144/en
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.interpolate import griddata, RegularGridInterpolator
def is_regular_grid(lats_o, lons_o):
"""
Determinere whether the grids in lats_o and lons_o are both regular grids
or not.
A grid is regular if the difference (column-wise or row-wise)
between consecutive values is constant across the grid.
Parameters
-----------
lats_o : numpy.ndarray
Grid of latitude coordinates
lons_o : numpy.ndarray
Grid of longitude coordinates
Returns
--------
is_regular: boolean
"""
Delta_lons = np.unique(np.diff(lons_o, axis=1))
Delta_lats = np.unique(np.diff(lats_o, axis=0))
return (
np.allclose(Delta_lons, Delta_lons[0], rtol=1e-5)
and np.allclose(Delta_lats, Delta_lats[0], rtol=1e-5)
and (Delta_lons != 0).all()
and (Delta_lats != 0).all()
)
###############################################################################
# Nearest Neighbour Interpolation #
###############################################################################
def nearest_2D_interpolator(lats_o, lons_o, values):
"""
Produces a 2D interpolator function using the nearest value interpolation
method. If the grids are regular grids, uses the
scipy.interpolate.RegularGridInterpolator,
otherwise, scipy.intepolate.griddata
Values can be interpolated from the returned function as follows:
.. code-block:: python
f = nearest_2D_interpolator(lat_origin, lon_origin, values_origin)
interp_values = f(lat_interp, lon_interp)
Parameters
-----------
lats_o: numpy.ndarray
Latitude coordinates of the values usde by the interpolator
lons_o: numpy.ndarray
Longitude coordinates of the values usde by the interpolator
values: numpy.ndarray
Values usde by the interpolator
Returns
--------
interpolator: function
Nearest neighbour interpolator function
"""
# Determine if we are dealing with a regular grid
if is_regular_grid(lats_o[2:-2, 2:-2], lons_o[2:-2, 2:-2]):
return _nearest_2D_interpolator_reg(lats_o, lons_o, values)
else:
return _nearest_2D_interpolator_arb(lats_o, lons_o, values)
def _nearest_2D_interpolator_reg(lats_o, lons_o, values):
f = RegularGridInterpolator(
(
np.ascontiguousarray(np.flipud(lats_o[:, 0])),
np.ascontiguousarray(lons_o[0, :]),
),
np.ascontiguousarray(np.flipud(values)),
method="nearest",
bounds_error=False,
)
return f
def _nearest_2D_interpolator_arb(lats_o, lons_o, values):
return lambda x: griddata(
(np.ascontiguousarray(lats_o.ravel()), np.ascontiguousarray(lons_o.ravel())),
np.ascontiguousarray(values.ravel()),
(x[:, 0], x[:, 1]),
"nearest",
)
###############################################################################
# Bilinear Interpolation #
###############################################################################
def bilinear_2D_interpolator(lats_o, lons_o, values):
"""
Produces a 2D interpolator function using the bilinear interpolation
method. If the grids are regular grids, uses the
scipy.interpolate.RegularGridInterpolator,
otherwise, scipy.intepolate.griddata
Values can be interpolated from the returned function as follows:
.. code-block:: python
f = nearest_2D_interpolator(lat_origin, lon_origin, values_origin)
interp_values = f(lat_interp, lon_interp)
Parameters
-----------
lats_o: numpy.ndarray
Latitude coordinates of the values usde by the interpolator
lons_o: numpy.ndarray
Longitude coordinates of the values usde by the interpolator
values: numpy.ndarray
Values usde by the interpolator
Returns
--------
interpolator: function
Bilinear interpolator function
"""
if is_regular_grid(lats_o[2:-2, 2:-2], lons_o[2:-2, 2:-2]):
return _bilinear_2D_interpolator_reg(lats_o, lons_o, values)
else:
return _bilinear_2D_interpolator_arb(lats_o, lons_o, values)
def _bilinear_2D_interpolator_reg(lats_o, lons_o, values):
f = RegularGridInterpolator(
(
np.ascontiguousarray(np.flipud(lats_o[:, 0])),
np.ascontiguousarray(lons_o[0, :]),
),
np.ascontiguousarray(np.flipud(values)),
method="linear",
bounds_error=False,
)
return f
def _bilinear_2D_interpolator_arb(lats_o, lons_o, values):
return lambda x: griddata(
(np.ascontiguousarray(lats_o.ravel()), np.ascontiguousarray(lons_o.ravel())),
np.ascontiguousarray(values.ravel()),
(x[:, 0], x[:, 1]),
"linear",
)
###############################################################################
# Bicubic Interpolation #
###############################################################################
def bicubic_2D_interpolator(lats_o, lons_o, values):
"""
Produces a 2D interpolator function using the bicubic interpolation
method. Uses the scipy.intepolate.griddata method.
Values can be interpolated from the returned function as follows:
.. code-block:: python
f = nearest_2D_interpolator(lat_origin, lon_origin, values_origin)
interp_values = f(lat_interp, lon_interp)
Parameters
-----------
lats_o: numpy.ndarray
Latitude coordinates of the values usde by the interpolator
lons_o: numpy.ndarray
Longitude coordinates of the values usde by the interpolator
values: numpy.ndarray
Values usde by the interpolator
Returns
--------
interpolator: function
Bicubic interpolator function
"""
if is_regular_grid(lats_o[2:-2, 2:-2], lons_o[2:-2, 2:-2]):
return _bicubic_2D_interpolator_reg(lats_o, lons_o, values)
else:
return _bicubic_2D_interpolator_arb(lats_o, lons_o, values)
def _bicubic_2D_interpolator_arb(lats_o, lons_o, values):
return lambda x: griddata(
(np.ascontiguousarray(lats_o.ravel()), np.ascontiguousarray(lons_o.ravel())),
np.ascontiguousarray(values.ravel()),
(x[:, 0], x[:, 1]),
"cubic",
)
def _bicubic_2D_interpolator_reg(lats_o, lons_o, values):
lat_row = lats_o[1:-1, 1]
lon_row = lons_o[1, 1:-1]
I = values
def K(d):
d = np.abs(d)
return np.where(
np.logical_and(d >= 0, d <= 1),
1.5 * d**3 - 2.5 * d**2 + 1,
np.where(
np.logical_and(d >= 1, d <= 2),
-0.5 * d**3 + 2.5 * d**2 - 4 * d + 2,
0,
),
)
def interpolator(vect):
lat = vect[:, 0]
lon = vect[:, 1]
# Make sure that we do not hit the limit cases
R = (
(np.searchsorted(lat_row, lat, "right") - 1)
+ (np.searchsorted(lat_row, lat, "left") - 1)
) // 2
C = (
(np.searchsorted(lon_row, lon, "right") - 1)
+ (np.searchsorted(lon_row, lon, "right") - 1)
) // 2
diff_lats = np.diff(lat_row)[0]
diff_lons = np.diff(lon_row)[0]
r = (lat - lat_row[0]) / diff_lats + 1
c = (lon - lon_row[0]) / diff_lons + 1
RI_Rc = (
I[R, C] * K(c - C)
+ I[R, C + 1] * K(c - (C + 1))
+ I[R, C + 2] * K(c - (C + 2))
+ I[R, C + 3] * K(c - (C + 3))
)
RI_R1c = (
I[R + 1, C] * K(c - C)
+ I[R + 1, C + 1] * K(c - (C + 1))
+ I[R + 1, C + 2] * K(c - (C + 2))
+ I[R + 1, C + 3] * K(c - (C + 3))
)
RI_R2_c = (
I[R + 2, C] * K(c - C)
+ I[R + 2, C + 1] * K(c - (C + 1))
+ I[R + 2, C + 2] * K(c - (C + 2))
+ I[R + 2, C + 3] * K(c - (C + 3))
)
RI_R3_c = (
I[R + 3, C] * K(c - C)
+ I[R + 3, C + 1] * K(c - (C + 1))
+ I[R + 3, C + 2] * K(c - (C + 2))
+ I[R + 3, C + 3] * K(c - (C + 3))
)
return (
RI_Rc * K(r - R)
+ RI_R1c * K(r - (R + 1))
+ RI_R2_c * K(r - (R + 2))
+ RI_R3_c * K(r - (R + 3))
)
return interpolator
| python | MIT | e69587f75bdb7f8b1049259f36eb31a36ca5c570 | 2026-01-05T07:12:38.084174Z | false |
inigodelportillo/ITU-Rpy | https://github.com/inigodelportillo/ITU-Rpy/blob/e69587f75bdb7f8b1049259f36eb31a36ca5c570/itur/models/itu837.py | itur/models/itu837.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from astropy import units as u
from scipy.optimize import bisect
import scipy.stats as stats
from itur.models.itu1510 import surface_month_mean_temperature
from itur.models.itu1144 import bilinear_2D_interpolator
from itur.utils import (prepare_input_array, prepare_output_array,
load_data_interpolator, prepare_quantity,
get_input_type)
class __ITU837():
"""Characteristics of precipitation for propagation modelling
Available versions include:
* P.837-6 (02/12) (Superseded)
* P.837-7 (12/17) (Current version)
Not-available versions:
* P.837-1 (08/94) (Superseded)
* P.837-2 (10/99) (Superseded)
* P.837-3 (02/01) (Superseded)
* P.837-4 (04/03) (Superseded)
* P.837-5 (08/07) (Superseded)
"""
# This is an abstract class that contains an instance to a version of the
# ITU-R P.837 recommendation.
def __init__(self, version=7):
if version == 7:
self.instance = _ITU837_7()
elif version == 6:
self.instance = _ITU837_6()
# elif version == 5:
# self.instance = _ITU837_5()
# elif version == 4:
# self.instance = _ITU837_4()
# elif version == 3:
# self.instance = _ITU837_3()
# elif version == 2:
# self.instance = _ITU837_2()
# elif version == 1:
# self.instance = _ITU837_1()
else:
raise ValueError(
f"Version {version} is not implemented for the ITU-R P.837 model."
)
self._Pr6 = {}
self._Mt = {}
self._Beta = {}
self._R001 = {}
@property
def __version__(self):
return self.instance.__version__
def rainfall_probability(self, lat, lon):
# Abstract method to compute the rain height
return self.instance.rainfall_probability(lat, lon)
def rainfall_rate(self, lat, lon, p):
# Abstract method to compute the zero isoterm height
fcn = np.vectorize(self.instance.rainfall_rate, excluded=[0, 1],
otypes=[np.ndarray])
return np.array(fcn(lat, lon, p).tolist())
class _ITU837_7():
def __init__(self):
self.__version__ = 7
self.year = 2017
self.month = 6
self.link = 'https://www.itu.int/rec/R-REC-P.837-7-201706-I/en'
self.months = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
self._Mt = {}
self._R001 = {}
def Mt(self, lat, lon, m):
if not self._Mt:
for _m in self.months:
self._Mt[_m] = load_data_interpolator(
"837/v7_lat_mt.npz",
"837/v7_lon_mt.npz",
f"837/v7_mt_month{_m:02d}.npz",
bilinear_2D_interpolator,
)
# In this recommendation the longitude is encoded with format -180 to
# 180 whereas we always use 0 - 360 encoding
lon = np.array(lon)
lon[lon > 180] = lon[lon > 180] - 360
return self._Mt[m](
np.array([lat.ravel(), lon.ravel()]).T).reshape(lat.shape)
def R001(self, lat, lon):
if not self._R001:
self._R001 = load_data_interpolator(
'837/v7_lat_r001.npz', '837/v7_lon_r001.npz',
'837/v7_r001.npz', bilinear_2D_interpolator)
# In this recommendation the longitude is encoded with format -180 to
# 180 whereas we always use 0 - 360 encoding
lon = np.array(lon)
lon[lon > 180] = lon[lon > 180] - 360
return self._R001(
np.array([lat.ravel(), lon.ravel()]).T).reshape(lat.shape)
def rainfall_probability(self, lat_d, lon_d):
"""
"""
lat_f = lat_d.flatten()
lon_f = lon_d.flatten()
Nii = np.array([[31, 28.25, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]])
# Step 2: For each month, determine the monthly mean surface
# temperature
Tii = surface_month_mean_temperature(lat_f, lon_f, self.months).value.T
# Step 3: For each month, determine the monthly mean total rainfall
MTii = np.array([self.Mt(lat_f, lon_f, m) for m in self.months]).T
# Step 4: For each month, determine the monthly mean total rainfall
tii = Tii - 273.15
# Step 5: For each month number, calculate rii
rii = np.where(tii >= 0, 0.5874 * np.exp(0.0883 * tii), 0.5874) # Eq.1
# Step 6a For each month number, calculate the probability of rain:
P0ii = 100 * MTii / (24 * Nii * rii) # Eq. 2
# Step 7b:
rii = np.where(P0ii > 70, 100 / 70. * MTii / (24 * Nii), rii)
P0ii = np.where(P0ii > 70, 70, P0ii)
# Step 7: Calculate the annual probability of rain, P0anual
P0anual = np.sum(Nii * P0ii, axis=-1) / 365.25 # Eq. 3
return P0anual.reshape(lat_d.shape)
def rainfall_rate(self, lat_d, lon_d, p):
"""
"""
if p == 0.01:
return self.R001(lat_d, lon_d)
lat_f = lat_d.flatten()
lon_f = lon_d.flatten()
Nii = np.array([[31, 28.25, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]])
# Step 2: For each month, determine the monthly mean surface
# temperature
Tii = surface_month_mean_temperature(lat_f, lon_f, self.months).value.T
# Step 3: For each month, determine the monthly mean total rainfall
MTii = np.array([self.Mt(lat_f, lon_f, m) for m in self.months]).T
# Step 4: For each month, determine the monthly mean total rainfall
tii = Tii - 273.15
# Step 5: For each month number, calculate rii
rii = np.where(tii >= 0, 0.5874 * np.exp(0.0883 * tii), 0.5874)
# Step 6a For each month number, calculate the probability of rain:
P0ii = 100 * MTii / (24 * Nii * rii)
# Step 7b:
rii = np.where(P0ii > 70, 100 / 70. * MTii / (24 * Nii), rii)
P0ii = np.where(P0ii > 70, 70, P0ii)
# Step 7: Calculate the annual probability of rain, P0anual
P0anual = np.sum(Nii * P0ii, axis=-1) / 365.25
# Step 8: Compute the rainfall rate exceeded for p
def _ret_fcn(P0):
if p > P0:
return 0
else:
# Use a bisection method to determine
def f_Rp(Rref):
P_r_ge_Rii = P0ii * stats.norm.sf(
(np.log(Rref) + 0.7938 - np.log(rii)) / 1.26)
P_r_ge_R = np.sum(Nii * P_r_ge_Rii) / 365.25
return 100 * (P_r_ge_R / p - 1)
return bisect(f_Rp, 1e-10, 1000, xtol=1e-5)
fcn = np.vectorize(_ret_fcn)
return fcn(P0anual).reshape(lat_d.shape)
class _ITU837_6():
def __init__(self):
self.__version__ = 6
self.year = 2012
self.month = 2
self.link = 'https://www.itu.int/rec/R-REC-P.837-6-201202-I/en'
self._Pr6 = {}
self._Mt = {}
self._Beta = {}
def Pr6(self, lat, lon):
if not self._Pr6:
self._Pr6 = load_data_interpolator(
'837/esarain_lat_v5.npz', '837/esarain_lon_v5.npz',
'837/esarain_pr6_v5.npz', bilinear_2D_interpolator,
flip_ud=False)
return self._Pr6(
np.array([lat.ravel(), lon.ravel()]).T).reshape(lat.shape)
def Mt(self, lat, lon):
if not self._Mt:
self._Mt = load_data_interpolator(
'837/esarain_lat_v5.npz', '837/esarain_lon_v5.npz',
'837/esarain_mt_v5.npz', bilinear_2D_interpolator,
flip_ud=False)
return self._Mt(
np.array([lat.ravel(), lon.ravel()]).T).reshape(lat.shape)
def Beta(self, lat, lon):
if not self._Beta:
self._Beta = load_data_interpolator(
'837/esarain_lat_v5.npz', '837/esarain_lon_v5.npz',
'837/esarain_beta_v5.npz', bilinear_2D_interpolator,
flip_ud=False)
return self._Beta(
np.array([lat.ravel(), lon.ravel()]).T).reshape(lat.shape)
def rainfall_probability(self, lat_d, lon_d):
"""
"""
Pr6 = self.Pr6(lat_d, lon_d)
Mt = self.Mt(lat_d, lon_d)
Beta = self.Beta(lat_d, lon_d)
# Step 3: Convert MT and β to Mc and Ms as follows:
Ms = (1 - Beta) * Mt
# Step 4: Derive the percentage propability of rain in an average year,
# P0:
P0 = Pr6 * (1 - np.exp(-0.0079 * (Ms / Pr6))) # Eq. 1
return P0
def rainfall_rate(self, lat_d, lon_d, p):
"""
"""
Pr6 = self.Pr6(lat_d, lon_d)
Mt = self.Mt(lat_d, lon_d)
Beta = self.Beta(lat_d, lon_d)
# Step 3: Convert MT and β to Mc and Ms as follows:
Mc = Beta * Mt
Ms = (1 - Beta) * Mt
# Step 4: Derive the percentage propability of rain in an average year,
# P0:
P0 = np.where(Pr6 > 0,
Pr6 * (1 - np.exp(-0.0079 * (Ms / Pr6))),
0) # Eq. 1
# Step 5: Derive the rainfall rate, Rp, exceeded for p% of the average
# year, where p <= P0, from:
def computeRp(P0, Mc, Ms):
a = 1.09 # Eq. 2d
b = (Mc + Ms) / (21797 * P0) # Eq. 2e
c = 26.02 * b # Eq. 2f
A = a * b # Eq. 2a
B = a + c * np.log(p / P0) # Eq. 2b
C = np.log(p / P0) # Eq. 2c
Rp = (-B + np.sqrt(B**2 - 4 * A * C)) / (2 * A) # Eq. 2
return Rp
# The value of Rp can only be computed for those values where p > P0
Rp = np.where(np.isnan(P0) | (p > P0), 0, computeRp(P0, Mc, Ms))
return Rp
__model = __ITU837()
def change_version(new_version):
"""
Change the version of the ITU-R P.837 recommendation currently being used.
This function changes the model used for the ITU-R P.837 recommendation
to a different version.
Parameters
----------
new_version : int
Number of the version to use.
Valid values are:
* 7: Activates recommendation ITU-R P.837-7 (12/17) (Current version)
* 6: Activates recommendation ITU-R P.837-6 (02/12) (Superseded)
"""
global __model
__model = __ITU837(new_version)
def get_version():
"""
Obtain the version of the ITU-R P.837 recommendation currently being used.
Returns
-------
version: int
Version currently being used.
"""
return __model.__version__
def rainfall_probability(lat, lon):
"""
Compute the percentage probability of rain in an average year, P0, at a
given location.
Parameters
----------
lat : number, sequence, or numpy.ndarray
Latitudes of the receiver points
lon : number, sequence, or numpy.ndarray
Longitudes of the receiver points
Returns
-------
P0: numpy.ndarray
Percentage probability of rain in an average year (%)
References
----------
[1] Characteristics of precipitation for propagation modelling
https://www.itu.int/rec/R-REC-P.837/en
"""
type_output = get_input_type(lat)
lat = prepare_input_array(lat)
lon = prepare_input_array(lon)
lon = np.mod(lon, 360)
val = __model.rainfall_probability(lat, lon)
return prepare_output_array(val, type_output) * u.pct
def rainfall_rate(lat, lon, p):
"""
Compute the rainfall rate exceeded for p% of the average year at a
given location.
Parameters
----------
lat : number, sequence, or numpy.ndarray
Latitudes of the receiver points
lon : number, sequence, or numpy.ndarray
Longitudes of the receiver points
p : number
Percentage of time exceeded for p% of the average year
Returns
-------
R001: numpy.ndarray
Rainfall rate exceeded for p% of the average year
References
----------
[1] Characteristics of precipitation for propagation modelling
https://www.itu.int/rec/R-REC-P.837/en
"""
type_output = get_input_type(lat)
lat = prepare_input_array(lat)
lon = prepare_input_array(lon)
lon = np.mod(lon, 360)
val = __model.rainfall_rate(lat, lon, p)
return prepare_output_array(val, type_output) * u.mm / u.hr
def unavailability_from_rainfall_rate(lat, lon, R):
"""Compute the percentage of time of the average year that a given rainfall
rate (R) is exceeded at a given location
This method calls successively to `rainfall_rate` (sing bisection) with
different values of p.
Note: This method cannot operate in a vectorized manner.
Parameters
----------
lat : number
Latitude of the receiver point
lon : number
Longitude of the receiver point
R : number, sequence, or numpy.ndarray
Rainfall rate (mm/h)
Returns
-------
p: numpy.ndarray
Rainfall rate exceeded for p% of the average year
References
----------
[1] Characteristics of precipitation for propagation modelling
https://www.itu.int/rec/R-REC-P.837/en
"""
lat = prepare_input_array(lat)
lon = prepare_input_array(lon)
lon = np.mod(lon, 360)
R = prepare_quantity(R, u.mm / u.hr, 'Rain rate')
# TODO: Cehck for bound on R (between 0 and 200 mm/hr?)
def fcn(x):
return (rainfall_rate(lat, lon, x).value - R - 1e-6)
return bisect(fcn, 1e-5, 100, maxiter=50)
| python | MIT | e69587f75bdb7f8b1049259f36eb31a36ca5c570 | 2026-01-05T07:12:38.084174Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.