repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/agents/configs/llm_pysc2/config_harass.py | llm_pysc2/agents/configs/llm_pysc2/config_harass.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.config import ProtossAgentConfig
class ConfigPysc2_Harass(ProtossAgentConfig):
def __init__(self):
super(ConfigPysc2_Harass, self).__init__()
self.AGENTS_ALWAYS_DISABLE = [
'Airborne', 'Builder', 'Commander', 'Developer', 'Defender', 'CombatGroup4',
]
self.ENABLE_INIT_STEPS = False
self.ENABLE_AUTO_WORKER_MANAGE = False
self.ENABLE_AUTO_WORKER_TRAINING = False
# self.LLM_SIMULATION_TIME = 0
# self.MAX_LLM_QUERY_TIMES = 5
# self.MAX_LLM_WAITING_TIME = 10
# self.MAX_LLM_RUNTIME_ERROR_TIME = 30
# self.MAX_LLM_DECISION_FREQUENCY = 1
# self.MAX_NUM_ACTIONS = 3 | python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/agents/configs/llm_pysc2/config_defend.py | llm_pysc2/agents/configs/llm_pysc2/config_defend.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.config import ProtossAgentConfig
from llm_pysc2.lib.llm_action import *
class ConfigPysc2_Defend(ProtossAgentConfig):
def __init__(self):
super(ConfigPysc2_Defend, self).__init__()
self.AGENTS_ALWAYS_DISABLE = [
'Airborne', 'Builder', 'Commander', 'Developer', 'Defender',
'CombatGroup0', ' ', 'CombatGroup2', 'CombatGroup3', 'CombatGroup4',
'CombatGroup5', 'CombatGroup6', 'CombatGroup7', 'CombatGroup8', 'CombatGroup9'
] # CombatGroup1
self.ENABLE_INIT_STEPS = False
self.ENABLE_AUTO_WORKER_MANAGE = False
self.ENABLE_AUTO_WORKER_TRAINING = False
# self.LLM_SIMULATION_TIME = 0
# self.MAX_LLM_QUERY_TIMES = 5
# self.MAX_LLM_WAITING_TIME = 10
# self.MAX_LLM_RUNTIME_ERROR_TIME = 30
# self.MAX_LLM_DECISION_FREQUENCY = 1
# self.MAX_NUM_ACTIONS = 3
self.AGENTS = {
'CombatGroup1': {
'describe': "Protoss garrison troops commander, controls several Stalkers. "
"Responsible for intercepting enemy infiltrating forces.",
'llm': {
'basic_prompt': self.basic_prompt,
'translator_o': self.translator_o,
'translator_a': self.translator_a,
'img_fea': self.ENABLE_IMAGE_FEATURE,
'img_rgb': self.ENABLE_IMAGE_RGB,
'model_name': self.model_name,
'api_base': self.api_base,
'api_key': self.api_key,
},
'team': [
{'name': 'Stalker-1', 'unit_type': [units.Protoss.Stalker],
'game_group': 4, 'select_type': 'group'},
],
'action': {
units.Protoss.Stalker: PROTOSS_BASIC_ACTION_2 + [
{'name': 'Ability_Blink_Screen', 'arg': ['screen'],
'func': [(180, F.Effect_Blink_screen, ('queued', 'screen'))]},
{'name': 'Select_Unit_Blink_Screen', 'arg': ['tag', 'screen'],
'func': [(3, F.select_rect, ('select', 'screen1_tag', 'screen2_tag')),
(180, F.Effect_Blink_screen, ('now', 'screen'))]},
]
},
},
}
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/experiment_llm_pysc2.py | llm_pysc2/bin/experiment_llm_pysc2.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Harass, ConfigPysc2_Defend, ConfigPysc2_Combat
from llm_pysc2.agents import MainAgent, LLMAgent
import os
def get_config(task):
if task in [1, 2]:
config = ConfigPysc2_Harass()
for agent_name in list(config.AGENTS.keys()):
for team in config.AGENTS[agent_name]['team']:
team['task'] = [
{'time': None, 'pos': [52, 32], 'info': "Go to minimap coordinate [52, 32], and try to avoid been detected or attacked before arrival."},
{'time': None, 'pos': None, 'info': "Kill as much as enemy workers as possible."},
]
elif task in [3]:
config = ConfigPysc2_Defend()
for team in config.AGENTS['CombatGroup1']['team']:
team['task'] = [
{'time': '0:00', 'pos': None, 'info': "Protect our nexus and probes from enemy airdrops. At Game time 0:00, "
"2 airdrops detected from minimap [24, 32] and [12, 24] to [16, 32]"},
{'time': '0:10', 'pos': None, 'info': "Protect our nexus and probes from enemy airdrops. At Game time 0:10, "
"2 airdrops detected from minimap [20, 24] and [20, 40] to [16, 32]"},
{'time': '0:20', 'pos': None, 'info': "Protect our nexus and probes from enemy airdrops. At Game time 0:20, "
"2 airdrops detected from minimap [24, 32] and [12, 40] to [16, 32]"},
{'time': '0:30', 'pos': None, 'info': "Protect our nexus and probes from enemy airdrops. At Game time 0:30, "
"2 airdrops detected from minimap [24, 32] and [10, 32] to [16, 32]"},
]
elif task in [4, 5, 6]:
config = ConfigPysc2_Combat()
for agent_name in list(config.AGENTS.keys()):
for team in config.AGENTS[agent_name]['team']:
team['task'] = [
{'time': None, 'pos': [32, 32], 'info': "Go to minimap coordinate [32, 32]."},
{'time': '0:10', 'pos': None,
'info': "Kill as much as enemy units as possible. If no enemy found, hold the position."},
]
elif task in [7]:
config = ConfigPysc2_Combat()
config.ENABLE_COMMUNICATION = True
config.MAX_LLM_RUNTIME_ERROR_TIME = 60
config.AGENTS_ALWAYS_DISABLE.remove('Commander')
config.AGENTS['Commander']['team'][0]['task'] = [
{'time': None, 'pos': None, 'info': "Organize frontline commanders to collaborate in defeating enemy troops, "
"you should reach the goal and finish the battle before game time 1:30."},
]
elif task in [8]:
config = ConfigPysc2_Combat()
config.ENABLE_COMMUNICATION = True
config.AGENTS_ALWAYS_DISABLE.remove('Airborne')
config.AGENTS_ALWAYS_DISABLE.remove('Commander')
config.AGENTS_ALWAYS_DISABLE.remove('Developer')
config.AGENTS['Commander']['team'][0]['task'] = [
{'time': None, 'pos': None, 'info': "Organize a multiline combat to defeat enemy troops and kill their workers, "
"you should reach all the goals and finish the battle before game time 1:30."},
]
else:
raise AssertionError("wrong task index")
return config
task = 1
level = 1
map_name = f"pvz_task{task}_level{level}"
enable_image_rgb = False
enable_image_feature = False
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = get_config(task)
model_name = 'YOUR-MODEL-NAME'
api_base = 'YOUR-API-BASE'
api_key = 'YOUR-API-KEY'
config.reset_llm(model_name, api_base, api_key, enable_image_rgb, enable_image_feature)
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
if not (enable_image_rgb or enable_image_feature):
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.experiment_llm_pysc2.MainAgentLLMPysc2")
elif enable_image_rgb:
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.experiment_llm_pysc2.MainAgentLLMPysc2 "
f"--feature_screen_size 256 --feature_minimap_size 64 "
f"--rgb_screen_size 256 --rgb_minimap_size 64 "
f"--action_space RGB")
elif enable_image_feature: # parallel experiments with feature map obs do not available currently, set --parallel 1
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.experiment_llm_pysc2.MainAgentLLMPysc2 "
f"--feature_screen_size 256 --feature_minimap_size 64 "
f"--rgb_screen_size 0 --rgb_minimap_size 0 "
f"--render")
else:
print("Can not enable_image_rgb and enable_image_feature at the same time")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/experiment_llm_smac.py | llm_pysc2/bin/experiment_llm_smac.py |
# Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
def get_config(map_name):
llm_smac_configs = {
'1c3s5z': ConfigSmac_1c3s5z(),
'2c_vs_64zg': ConfigSmac_2c(),
'2s3z': ConfigSmac_2s3z(),
'2s_vs_1sc': ConfigSmac_2s(),
'3s5z': ConfigSmac_3s5z(),
'3s5z_vs_3s6z': ConfigSmac_3s5z(),
'3s_vs_3z': ConfigSmac_3s(),
'3s_vs_4z': ConfigSmac_3s(),
'3s_vs_5z': ConfigSmac_3s(),
}
if map_name in llm_smac_configs.keys():
return llm_smac_configs[map_name]
else:
raise AssertionError(f"wrong map_name: {map_name}")
map_name = '1c3s5z'
enable_image_rgb = False
enable_image_feature = False
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = get_config(map_name)
model_name = 'YOUR-MODEL-NAME'
api_base = 'YOUR-API-BASE'
api_key = 'YOUR-API-KEY'
config.reset_llm(model_name, api_base, api_key, enable_image_rgb, enable_image_feature)
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
if not (enable_image_rgb or enable_image_feature):
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.experiment_llm_smac.MainAgentLLMSmac")
elif enable_image_rgb:
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.experiment_llm_smac.MainAgentLLMSmac "
f"--feature_screen_size 256 --feature_minimap_size 64 "
f"--rgb_screen_size 256 --rgb_minimap_size 64 "
f"--action_space RGB")
elif enable_image_feature: # parallel experiments with feature map obs do not available currently, set --parallel 1
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.experiment_llm_smac.MainAgentLLMSmac "
f"--feature_screen_size 256 --feature_minimap_size 64 "
f"--rgb_screen_size 0 --rgb_minimap_size 0 "
f"--render")
else:
print("Can not enable_image_rgb and enable_image_feature at the same time")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/2c_vs_64zg.py | llm_pysc2/bin/llm_smac/2c_vs_64zg.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_2c()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 2c_vs_64zg --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.2c_vs_64zg.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/3s_vs_5z.py | llm_pysc2/bin/llm_smac/3s_vs_5z.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_3s()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 3s_vs_5z --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.3s_vs_3z.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/3s5z_vs_3s6z.py | llm_pysc2/bin/llm_smac/3s5z_vs_3s6z.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_3s5z()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 3s5z_vs_3s6z --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.3s5z_vs_3s6z.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/2s_vs_1sc.py | llm_pysc2/bin/llm_smac/2s_vs_1sc.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_2s()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 2s_vs_1sc --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.2s_vs_1sc.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/3s_vs_3z.py | llm_pysc2/bin/llm_smac/3s_vs_3z.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_3s()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 3s_vs_3z --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.3s_vs_3z.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/2s3z.py | llm_pysc2/bin/llm_smac/2s3z.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_2s3z()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 2s3z --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.2s3z.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/1c3s5z.py | llm_pysc2/bin/llm_smac/1c3s5z.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_1c3s5z()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 1c3s5z --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.1c3s5z.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/3s_vs_4z.py | llm_pysc2/bin/llm_smac/3s_vs_4z.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_3s()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 3s_vs_4z --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.3s_vs_3z.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_smac/3s5z.py | llm_pysc2/bin/llm_smac/3s5z.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_smac import *
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMSmac(MainAgent):
def __init__(self):
config = ConfigSmac_3s5z()
super(MainAgentLLMSmac, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map 3s5z --agent_race protoss --parallel 1 "
f"--agent llm_pysc2.bin.llm_smac.3s5z.MainAgentLLMSmac")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_pysc2/pvz_task2.py | llm_pysc2/bin/llm_pysc2/pvz_task2.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Harass
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = ConfigPysc2_Harass()
for team in config.AGENTS['CombatGroup8']['team']:
team['task'] = [
{'time': None, 'pos': [52, 32], 'info': "Go to minimap coordinate [52, 32], and try to avoid been detected or attacked before arrival."},
{'time': None, 'pos': None, 'info': "Kill as much as enemy workers as possible."},
]
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
level = 1
map_name = f"pvz_task2_level{level}"
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
"--agent llm_pysc2.bin.llm_pysc2.pvz_task2.MainAgentLLMPysc2")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_pysc2/pvz_task7.py | llm_pysc2/bin/llm_pysc2/pvz_task7.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Combat
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = ConfigPysc2_Combat()
config.ENABLE_COMMUNICATION = True
config.AGENTS_ALWAYS_DISABLE.remove('Commander')
config.AGENTS['Commander']['team'][0]['task'] = [
{'time': None, 'pos': None, 'info': "Organize frontline commanders to collaborate in defeating enemy troops, "
"you should reach the goal and finish the battle before game time 1:30."},
]
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
level = 1
map_name = f"pvz_task7_level{level}"
if __name__ == "__main__":
map_name = f"pvz_task7_level1"
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
"--agent llm_pysc2.bin.llm_pysc2.pvz_task7.MainAgentLLMPysc2")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_pysc2/pvz_task4.py | llm_pysc2/bin/llm_pysc2/pvz_task4.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Combat
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = ConfigPysc2_Combat()
for agent_name in list(config.AGENTS.keys()):
for team in config.AGENTS[agent_name]['team']:
team['task'] = [
{'time': None, 'pos': [32, 32], 'info': "Go to minimap coordinate [32, 32]."},
{'time': None, 'pos': None, 'info': "Kill as much as enemy units as possible. If no enemy found, hold the position."},
]
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
level = 1
map_name = f"pvz_task4_level{level}"
if __name__ == "__main__":
map_name = f"pvz_task4_level1"
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
"--agent llm_pysc2.bin.llm_pysc2.pvz_task4.MainAgentLLMPysc2")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_pysc2/pvz_task5.py | llm_pysc2/bin/llm_pysc2/pvz_task5.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Combat
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = ConfigPysc2_Combat()
for agent_name in list(config.AGENTS.keys()):
for team in config.AGENTS[agent_name]['team']:
team['task'] = [
{'time': None, 'pos': [32, 32], 'info': "Go to minimap coordinate [32, 32]."},
{'time': None, 'pos': None, 'info': "Kill as much as enemy units as possible. If no enemy found, hold the position."},
]
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
level = 1
map_name = f"pvz_task5_level{level}"
if __name__ == "__main__":
map_name = f"pvz_task5_level1"
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
"--agent llm_pysc2.bin.llm_pysc2.pvz_task5.MainAgentLLMPysc2")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_pysc2/pvz_task8.py | llm_pysc2/bin/llm_pysc2/pvz_task8.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Combat
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = ConfigPysc2_Combat()
config.ENABLE_COMMUNICATION = True
config.AGENTS_ALWAYS_DISABLE.remove('Airborne')
config.AGENTS_ALWAYS_DISABLE.remove('Commander')
config.AGENTS_ALWAYS_DISABLE.remove('Developer')
config.AGENTS['Commander']['team'][0]['task'] = [
{'time': None, 'pos': None, 'info': "Organize a multiline combat to defeat enemy troops and kill their workers, "
"must finish the battle before game time 1:30."},
]
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
level = 1
map_name = f"pvz_task8_level{level}"
if __name__ == "__main__":
map_name = f"pvz_task8_level1"
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
"--agent llm_pysc2.bin.llm_pysc2.pvz_task8.MainAgentLLMPysc2")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_pysc2/pvz_task1.py | llm_pysc2/bin/llm_pysc2/pvz_task1.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Harass
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = ConfigPysc2_Harass()
for team in config.AGENTS['CombatGroup7']['team']:
team['task'] = [
{'time': None, 'pos': [52, 32], 'info': "Go to minimap coordinate [52, 32], and try to avoid been detected or attacked before arrival."},
{'time': None, 'pos': None, 'info': "Kill as much as enemy workers as possible."},
]
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
level = 1
map_name = f"pvz_task1_level{level}"
if __name__ == "__main__":
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
"--agent llm_pysc2.bin.llm_pysc2.pvz_task1.MainAgentLLMPysc2")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_pysc2/pvz_task3.py | llm_pysc2/bin/llm_pysc2/pvz_task3.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Defend
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = ConfigPysc2_Defend()
for team in config.AGENTS['CombatGroup1']['team']:
team['task'] = [
{'time': '0:00', 'pos': None, 'info': "Protect our nexus and probes from enemy airdrops. At Game time 0:00, "
"2 airdrops detected from minimap [24, 32] and [12, 24] to [16, 32]"},
{'time': '0:10', 'pos': None, 'info': "Protect our nexus and probes from enemy airdrops. At Game time 0:10, "
"2 airdrops detected from minimap [20, 24] and [20, 40] to [16, 32]"},
{'time': '0:20', 'pos': None, 'info': "Protect our nexus and probes from enemy airdrops. At Game time 0:20, "
"2 airdrops detected from minimap [24, 32] and [12, 40] to [16, 32]"},
{'time': '0:30', 'pos': None, 'info': "Protect our nexus and probes from enemy airdrops. At Game time 0:30, "
"2 airdrops detected from minimap [24, 32] and [10, 32] to [16, 32]"},
]
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
level = 1
map_name = f"pvz_task3_level{level}"
if __name__ == "__main__":
map_name = f"pvz_task3_level1"
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
"--agent llm_pysc2.bin.llm_pysc2.pvz_task3.MainAgentLLMPysc2")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/bin/llm_pysc2/pvz_task6.py | llm_pysc2/bin/llm_pysc2/pvz_task6.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.agents.configs.llm_pysc2 import ConfigPysc2_Combat
from llm_pysc2.agents import MainAgent, LLMAgent
import os
class MainAgentLLMPysc2(MainAgent):
def __init__(self):
config = ConfigPysc2_Combat()
for agent_name in list(config.AGENTS.keys()):
for team in config.AGENTS[agent_name]['team']:
team['task'] = [
{'time': None, 'pos': [32, 32], 'info': "Go to minimap coordinate [32, 32]."},
{'time': None, 'pos': None, 'info': "Kill as much as enemy units as possible. If no enemy found, hold the position."},
]
super(MainAgentLLMPysc2, self).__init__(config, LLMAgent)
def step(self, obs):
return super().step(obs)
level = 1
map_name = f"pvz_task6_level{level}"
if __name__ == "__main__":
map_name = f"pvz_task6_level1"
os.system(f"python -m pysc2.bin.agent --map {map_name} --agent_race protoss --parallel 1 "
"--agent llm_pysc2.bin.llm_pysc2.pvz_task6.MainAgentLLMPysc2")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/maps/__init__.py | llm_pysc2/maps/__init__.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Register/import the maps, and offer a way to create one by name.
Users of maps should import this module:
from pysc2 import maps
and create the maps by name:
maps.get("MapName")
If you want to create your own map, then import the map lib and subclass Map.
Your subclass will be implicitly registered as a map that can be constructed by
name, as long as it is imported somewhere.
"""
from pysc2.maps import ladder
from pysc2.maps import lib
from pysc2.maps import melee
from pysc2.maps import mini_games
from pysc2.maps import llm_pysc2
from pysc2.maps import llm_smac
# Use `get` to create a map by name.
get = lib.get
get_maps = lib.get_maps
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/maps/llm_smac.py | llm_pysc2/maps/llm_smac.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pysc2.maps import lib
class llm_smac(lib.Map):
directory = "llm_smac"
download = ""
players = 2
game_steps_per_episode = 22 * 60 * 30 # 30 minute limit.
llm_smac_maps = {
"1c3s5z",
"2c_vs_64zg",
"2s3z",
"2s_vs_1sc",
"3s5z",
"3s5z_vs_3s6z",
"3s_vs_3z",
"3s_vs_4z",
"3s_vs_5z",
}
def get_smac_map_registry():
return llm_smac_maps
for name in llm_smac_maps:
globals()[name] = type(name, (llm_smac,), dict(filename=name))
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/maps/llm_pysc2.py | llm_pysc2/maps/llm_pysc2.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pysc2.maps import lib
class llm_pysc2(lib.Map):
directory = "llm_pysc2"
download = ""
players = 2
# game_steps_per_episode = 16 * 60 * 30 # 30 minute limit.
game_steps_per_episode = 22 * 60 * 30 # 30 minute limit.
llm_pysc2_maps = [
"debug_map",
"pvz_task1_level1",
"pvz_task2_level1",
"pvz_task3_level1",
"pvz_task4_level1",
"pvz_task5_level1",
"pvz_task6_level1",
"pvz_task7_level1",
"pvz_task8_level1",
"pvz_task1_level2",
"pvz_task2_level2",
"pvz_task3_level2",
"pvz_task4_level2",
"pvz_task5_level2",
"pvz_task6_level2",
"pvz_task7_level2",
"pvz_task8_level2",
"pvz_task1_level3",
"pvz_task2_level3",
"pvz_task3_level3",
"pvz_task4_level3",
"pvz_task5_level3",
"pvz_task6_level3",
"pvz_task7_level3",
"pvz_task8_level3",
]
for name in llm_pysc2_maps:
globals()[name] = type(name, (llm_pysc2,), dict(filename=name))
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/llm_client.py | llm_pysc2/lib/llm_client.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import google.generativeai as genai
from llamaapi import LlamaAPI
from zhipuai import ZhipuAI
import openai
from loguru import logger
import threading
import random
import time
# import json
def gpt_query_runtime(self, ):
llm_response = openai.ChatCompletion.create(
model=self.model_name,
messages=self.messages,
temperature=self.temperature
)
self.query_token_in = llm_response["usage"]["prompt_tokens"]
self.query_token_out = llm_response["usage"]["completion_tokens"]
self.llm_response = llm_response["choices"][0]["message"]["content"]
def claude_query_runtime(self, ):
llm_response = openai.ChatCompletion.create(
model=self.model_name,
messages=self.messages,
temperature=self.temperature
)
self.query_token_in = llm_response.usage.prompt_tokens
self.query_token_out = llm_response.usage.completion_tokens
self.llm_response = llm_response.choices[0].message.content
def llama_query_runtime(self, ):
llm_response = self.client.run({
'model': self.model_name,
'messages': self.messages,
'temperature': self.temperature,}
).json()
self.query_token_in = llm_response["usage"]["prompt_tokens"] if 'usage' in llm_response.keys() else 0
self.query_token_out = llm_response["usage"]["completion_tokens"] if 'usage' in llm_response.keys() else 0
self.llm_response = llm_response['choices'][0]["message"]["content"]
def glm_query_runtime(self, ):
llm_response = self.client.chat.completions.create(
model=self.model_name, # 填写需要调用的模型名称
messages=self.messages,
temperature=self.temperature
)
self.query_token_in = 0
self.query_token_out = 0
self.llm_response = llm_response.choices[0].message.content
def glm4v_query_runtime(self, ):
llm_response = self.client.chat.completions.create(
model=self.model_name, # 填写需要调用的模型名称
messages=self.messages,
temperature=self.temperature
)
self.query_token_in = llm_response.usage.prompt_tokens
self.query_token_out = llm_response.usage.completion_tokens
self.llm_response = llm_response.choices[0].message.content
# def gemini_query_runtime(self, ):
# self.llm_response = self.model.generate_content(
# messages=self.messages, generation_config=genai.types.GenerationConfig(temperature=self.temperature)).text
# def qwen2_query_runtime(self, ):
# llm_response = openai.ChatCompletion.create(
# model=self.model_name, # 填写需要调用的模型名称
# messages=self.messages,
# temperature=self.temperature
# )
# self.query_token_in = llm_response.usage.prompt_tokens
# self.query_token_out = llm_response.usage.completion_tokens
# self.llm_response = llm_response.choices[0].message.content
class GptClient:
def __init__(self, name, log_id, config):
self.model_name = config.AGENTS[name]['llm']['model_name']
self.api_base = config.AGENTS[name]['llm']['api_base']
self.api_key = config.AGENTS[name]['llm']['api_key']
self.temperature = config.temperature
openai.api_base = self.api_base
openai.api_key = self.api_key
self.agent_name = name
self.log_id = log_id
self.config = config
self.system_prompt = ''
self.example_i_prompt = ''
self.example_o_prompt = ''
self.messages = []
self.llm_response = None
self.query_runtime = gpt_query_runtime
if 'gpt' in self.model_name or self.model_name == 'default':
logger.info(f"[ID {self.log_id}] {self.agent_name} {self.model_name} GptClient initialized")
self.num_query = 0
self.query_time = 0
self.query_token_in = 0
self.query_token_out = 0
self.total_query_time = 0
self.total_query_token_in = 0
self.total_query_token_out = 0
self.ave_query_time = 0
self.ave_query_token_in = 0
self.ave_query_token_out = 0
def wrap_message(self, obs_prompt, base64_image):
if (base64_image is not None) and (self.model_name not in vision_model_names):
logger.warning(f"[ID {self.log_id}] {self.agent_name} {self.model_name}: Model do not accept img, img discarded")
if (base64_image is None) and (self.model_name in vision_model_names):
logger.warning(f"[ID {self.log_id}] {self.agent_name} {self.model_name}: Vision available but img disabled")
if (base64_image is None) or (self.model_name not in vision_model_names):
# 不包含图像的消息
self.messages = [
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": self.example_i_prompt},
{"role": "assistant", "content": self.example_o_prompt},
{"role": "user", "content": obs_prompt}
]
else:
# 包含图像的消息,按照指定格式
self.messages = [
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": self.example_i_prompt},
{"role": "assistant", "content": self.example_o_prompt},
# TODO: Incorrect img usage, to be update in recent commit
{"role": "user", "content": [
{"type": "text", "text": obs_prompt}, # obs_prompt
{"type": "image_url", "image_url": {
"url": f"data:image/png;base64,{base64_image}"
}}
]},
]
def query(self, obs_prompt, base64_image=None):
# 重置 messages 列表
self.wrap_message(obs_prompt, base64_image)
# 尝试发送请求并获取回复
max_retries = self.config.MAX_LLM_QUERY_TIMES
for retries in range(max_retries):
try:
# tracemalloc.start()
logger.success(f"[ID {self.log_id}] {self.agent_name} Start calling llm api!")
logger.debug(f"[ID {self.log_id}] {self.agent_name} input prompt: \n{obs_prompt}")
self.thread = threading.Thread(target=self.query_runtime, args=(self,))#保留“,”
self.thread.start()
# 超时错误
query_start_time = float(time.time())
while not isinstance(self.llm_response, str):
time.sleep(0.1)
if float(time.time()) - query_start_time > self.config.MAX_LLM_RUNTIME_ERROR_TIME:
logger.error(f"[ID {self.log_id}] {self.agent_name} LLM query runtime error")
raise RuntimeError(f"{self.agent_name} LLM query runtime error")
if isinstance(self.llm_response, str):
self.num_query += 1
self.query_time = float(time.time()) - query_start_time
self.total_query_time += self.query_time
self.total_query_token_in += self.query_token_in
self.total_query_token_out += self.query_token_out
self.ave_query_time = self.total_query_time / self.num_query
self.ave_query_token_in = self.total_query_token_in / self.num_query
self.ave_query_token_out = self.total_query_token_out / self.num_query
answer = self.llm_response
logger.success(f"[ID {self.log_id}] {self.agent_name} Get llm response!")
logger.debug(f"[ID {self.log_id}] {self.agent_name} llm response: \n{answer}")
self.llm_response = None
return answer
except Exception as e:
# 输出错误信息
logger.error(f"[ID {self.log_id}] {self.agent_name} Error when calling the OpenAI API: {e}")
# print(f"Error when calling the OpenAI API: {e}")
# 如果达到最大尝试次数,返回一个特定的回复
if retries >= max_retries - 1:
logger.error \
(f"[ID {self.log_id}] {self.agent_name} Maximum number of retries reached. The OpenAI API is not responding.")
return "I'm sorry, but I am unable to provide a response at this time due to technical difficulties."
# 重试前等待一段时间,使用 exponential backoff 策略
sleep_time = min((2 ** retries) + random.random(), 8 + random.random())
logger.info(f"[ID {self.log_id}] {self.agent_name} Waiting for {sleep_time} seconds before retrying...")
time.sleep(sleep_time)
logger.error(f"[ID {self.log_id}] {self.agent_name} Can not get llm response after try {max_retries} times!")
return f'[ID {self.log_id}] {self.agent_name} Can not get llm response after try {max_retries} times!'
class O1Client(GptClient):
def __init__(self, name, log_id, config):
super(O1Client, self).__init__(name, log_id, config)
self.query_runtime = gpt_query_runtime
self.temperature = 1 # Only the default (1) value is supported.
self.client = openai
logger.info(f"[ID {self.log_id}] {self.agent_name} {self.model_name} O1Client initialized")
def wrap_message(self, obs_prompt, base64_image):
super().wrap_message(obs_prompt, base64_image)
# 不包含图像的消息
self.messages = [
{"role": "user", "content": self.system_prompt},
{"role": "assistant", "content": "Understand."},
{"role": "user", "content": self.example_i_prompt},
{"role": "assistant", "content": self.example_o_prompt},
{"role": "user", "content": obs_prompt}
]
class ClaudeClient(GptClient):
def __init__(self, name, log_id, config):
super(ClaudeClient, self).__init__(name, log_id, config)
self.query_runtime = claude_query_runtime
self.client = openai
logger.info(f"[ID {self.log_id}] {self.agent_name} {self.model_name} ClaudeClient initialized")
class LlamaClient(GptClient):
def __init__(self, name, log_id, config):
super(LlamaClient, self).__init__(name, log_id, config)
self.query_runtime = llama_query_runtime
self.client = LlamaAPI(self.api_key, hostname=self.api_base)
logger.info(f"[ID {self.log_id}] {self.agent_name} {self.model_name} LlamaClient initialized")
class GlmClient(GptClient):
def __init__(self, name, log_id, config):
super(GlmClient, self).__init__(name, log_id, config)
self.query_runtime = glm_query_runtime
self.client = ZhipuAI(api_key=self.api_key)
logger.info(f"[ID {self.log_id}] {self.agent_name} {self.model_name} GlmClient initialized")
# class GeminiClient(GptClient):
# def __init__(self, name, log_id, config):
# super(GeminiClient).__init__(self, name, log_id, config)
# self.query_runtime = gemini_query_runtime
# self.model = genai.GenerativeModel(config.model_name)
# class QWen2Client(GptClient):
# def __init__(self, name, log_id, config):
# super(QWen2Client, self).__init__(name, log_id, config)
# self.query_runtime = qwen2_query_runtime
# self.client = openai
# for config's auto check
vision_model_names = [
'gpt-4o', 'gpt-4-1106-vision-preview', 'gpt-4v-1106', 'gpt-4v-0409',
'glm-4v', 'glm-4v-plus'
]
video_model_names = []
FACTORY = {
'default': GptClient,
'gpt-3.5-turbo': GptClient,
'gpt-3.5-turbo-1106': GptClient,
'gpt-4o': GptClient,
'gpt-4o-mini': GptClient,
'gpt-4-turbo': GptClient,
# 'gpt-4-1106-vision-preview': GptClient,
# 'gpt-4v-1106': GptClient,
# 'gpt-4v-0409': GptClient,
'o1-mini': O1Client,
'o1-preview': O1Client,
'claude-3-opus': ClaudeClient,
'claude-3-haiku': ClaudeClient,
'claude-3-sonnet': ClaudeClient,
'llama3-8b': LlamaClient,
'llama3-70b': LlamaClient,
'llama3.1-8b': LlamaClient,
'llama3.1-70b': LlamaClient,
'llama3.1-405b': LlamaClient,
'glm-4': GlmClient,
'glm-4-plus': GlmClient,
'glm-4-air': GlmClient,
'glm-4-airx': GlmClient,
'glm-4-flash': GlmClient,
'glm-4-flashx': GlmClient,
# 'glm-4v': GlmClient,
# 'glm-4v-plus': GlmClient,
# 'qwen2.5-7b-instruct': QWen2Client,
# 'qwen2:72b': QWen2Client, # debug for LAN LLM
# 'gemini': GeminiClient,
}
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/llm_communicate.py | llm_pysc2/lib/llm_communicate.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 通讯格式
# Communication:
# <MessageTo(AgentName, '''xxxxxxxxxx''')>
# <MessageTo(ChannelName, '''xxxxxxxxxx''')>
# <ListenTo(ChannelName)>
from loguru import logger
def communication_info_transmission(self: "llm_pysc2 MainAgent"):
if self.config.ENABLE_COMMUNICATION:
for receiver_name in self.AGENT_NAMES:
receiver_agent = self.agents[receiver_name]
receiver_agent.last_text_c_inp = ''
receiver_agent.last_text_c_tar = ''
# communication data transmission
for sender_name in self.AGENT_NAMES:
sender_agent = self.agents[sender_name]
cmo = sender_agent.communication_message_o
cmi = receiver_agent.communication_message_i
receiver_agent.communication_message_i = receiver_agent.communicator.receive(cmi, cmo, receiver_name, sender_name)
# Generate text communication info
# if len(receiver_agent.communication_message_i.keys()) > 0:
for sender_name in receiver_agent.communication_message_i.keys():
info_sender_name = f"\n\tIn {sender_name}" if 'Channel' in sender_name else f"\n\tFrom {sender_name}"
received_message = f"{receiver_agent.communication_message_i[sender_name]}"
receiver_agent.last_text_c_inp += f"{info_sender_name}: {received_message}"
if len(receiver_agent.last_text_c_inp) != 0:
receiver_agent.last_text_c_inp = f"\n\nCommunication information:" + receiver_agent.last_text_c_inp
# Generate Communication Target
receiver_agent.last_text_c_tar = "\n\nAvailable Communication Targets:"
for agent_name in self.AGENT_NAMES:
agent = self.agents[agent_name]
if agent_name != receiver_name and agent.enable:
description = agent.config.AGENTS[agent_name]['describe'] if agent_name in agent.config.AGENTS.keys() else ''
receiver_agent.last_text_c_tar += f"\n\t{agent_name}: {description}"
# Generate Communication Rules
receiver_agent.last_text_c_tar += "\nAvailable Communication Functions:"
receiver_agent.last_text_c_tar += "\n\t<MessageTo(AgentName, message)>"
receiver_agent.last_text_c_tar += "\n\t<MessageTo(ChannelName, message)>"
receiver_agent.last_text_c_tar += "\n\t<ListenTo(ChannelName)>"
receiver_agent.last_text_c_tar += "\nArgs explanation:"
receiver_agent.last_text_c_tar += "\n\t(1)AgentName: refers to a name mentioned in Available Communication Targets."
receiver_agent.last_text_c_tar += "\n\t(2)ChannelName: shape as Channel-i, i refers to an integer."
receiver_agent.last_text_c_tar += "\n\t(2)message: any text wrapped between ''' and '''."
logger.debug(f"[ID {self.log_id}] 7.0 LLMAgent {receiver_name} get communication message: ")
logger.debug(f"[ID {self.log_id}] LLMAgent communication_message_i: {receiver_agent.communication_message_i}")
logger.debug(f"[ID {self.log_id}] LLMAgent received message: {receiver_agent.last_text_c_tar}")
logger.success(f"[ID {self.log_id}] 7.0 All LLMAgent get communication message")
else:
logger.success(f"[ID {self.log_id}] 7.0 All LLMAgent skip communication stage")
class DefaultCommunicator():
def __init__(self, name, log_id, config):
self.agent_name = name
self.log_id = log_id
self.config = config
logger.info(f"[ID {self.log_id}] {self.agent_name} DefaultCommunicator initialized")
def receive(self, cmi: dict, cmo: dict, agent_name_receiver: str, agent_name_sender: str):
for key in cmo.keys():
if key == agent_name_receiver: # message send to me
cmi[agent_name_sender] = f"\n\t\t" + cmo[agent_name_receiver]
if key in cmi.keys() and 'Channel' in key: # message send to the channel currently listened to
cmi[key] += f"\n\t\tFrom {agent_name_sender}: {cmo[key]}"
return cmi
def send(self, raw_text_a: str) -> (dict, dict, str):
cmi, cmo, processed_text_c = {}, {}, ''
lines = raw_text_a.splitlines()
start_recognize = False
for line in lines:
if ("Communications:" in line) or ("Communication:" in line) or \
("communications:" in line) or ("Communication:" in line):
processed_text_c += line
start_recognize = True
if ("Actions:" in line) or ("Action:" in line) or \
("actions:" in line) or ("action:" in line):
start_recognize = False
if start_recognize:
if "<" in line and ">" in line and "(" in line and ")" in line:
action_text = line.split("<")[1].split(">")[0]
action_name = action_text.split("(")[0]
action_args = action_text.split("(")[1].split(")")[0]
if action_name == 'ListenTo':
message_receiver = action_args
if "Channel" in message_receiver:
cmi[action_args] = ''
processed_text_c += "\n" + line
elif action_name == 'MessageTo':
if "'''" in line and len(action_args.split("'''")) == 3:
message_text = action_args.split("'''")[1]
message_receiver = action_args.split("'''")[0].split(",")[0]
cmo[message_receiver] = message_text
processed_text_c += "\n" + line
else:
pass
return cmi, cmo, processed_text_c
PROTOSS_FACTORY = {'default': DefaultCommunicator}
TERRAN_FACTORY = {}
ZERG_FACTORY = {}
FACTORY = {
'protoss': PROTOSS_FACTORY,
'terran': TERRAN_FACTORY,
'zerg': ZERG_FACTORY,
}
if __name__ == "__main__":
from llm_pysc2.agents.configs.config import ProtossAgentConfig
config = ProtossAgentConfig()
communicator = DefaultCommunicator('AgentName', log_id=0, config=config)
# send message from CombatGroup0
text0 = \
"""
Communications:
<MessageTo(Commander, '''We are xxxxxxx, we need xxxxx, we plan to do xxxxx''')>
<MessageTo(Channel-1, '''We are yyyyyyy, we need yyyyy, we plan to do yyyyy''')>
<ListenTo(Channel-1)>
"""
cmi0, cmo0, processed_text_c0 = communicator.send(text0)
print("CombatGroup0 send info: \n", processed_text_c0)
print("CombatGroup0 send info recognized: ", cmo0)
print("--" * 25)
# send message from CombatGroup1
text1 = \
"""
Communications:
<MessageTo(Developer, '''We are aaaaaaa, we need aaaaa, we plan to do aaaaa''')>
<MessageTo(Channel-1, '''We are bbbbbbb, we need bbbbb, we plan to do bbbbb''')>
<ListenTo(Channel-1)>
"""
cmi1, cmo1, processed_text_c1 = communicator.send(text1)
print("CombatGroup1 send info: \n", processed_text_c1)
print("CombatGroup1 send info recognized: ", cmo1)
print("--" * 25)
# CombatGroup3 receive message
cmi = {"Channel-1": ''}
print("CombatGroup3 listen to: ", cmi)
cmi = communicator.receive(cmi, cmo0, 'CombatGroup3', 'CombatGroup0')
cmi = communicator.receive(cmi, cmo1, 'CombatGroup3', 'CombatGroup1')
print("CombatGroup3 received: ", cmi)
last_text_c_inp = f"Communication:"
for message_sender in cmi.keys():
if 'Channel' not in message_sender:
last_text_c_inp += f"\n\tFrom {message_sender}: {cmi[message_sender]}"
else:
last_text_c_inp += f"\n\tIn {message_sender}: {cmi[message_sender]}"
print(last_text_c_inp)
print("--" * 25)
# Developer receive message
cmi = {}
print("Developer listen to: ", cmi)
cmi = communicator.receive(cmi, cmo0, 'Developer', 'CombatGroup0')
cmi = communicator.receive(cmi, cmo1, 'Developer', 'CombatGroup1')
print("Developer received: ", cmi)
last_text_c_inp = f"Communication:"
for message_sender in cmi.keys():
if 'Channel' not in message_sender:
last_text_c_inp += f"\n\tFrom {message_sender}: {cmi[message_sender]}"
else:
last_text_c_inp += f"\n\tIn {message_sender}: {cmi[message_sender]}"
print(last_text_c_inp)
print("--" * 25)
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/llm_observation.py | llm_pysc2/lib/llm_observation.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llm_pysc2.lib.knowledge import protoss, zerg, terran
from llm_pysc2.lib.utils import *
from pysc2.lib import features, units, buffs
from pysc2.lib import renderer_human, colors
from PIL import ImageDraw, ImageFont, Image
from loguru import logger
import numpy as np
import pygame
import base64
import math
import glob
import io
import os
knowledge_dict = {}
knowledge_dict.update(protoss.DATA_PROTOSS)
knowledge_dict.update(terran.DATA_TERRAN)
knowledge_dict.update(zerg.DATA_ZERG)
unit_dict = {v: k for k, v in units.Neutral.__dict__.items() if
isinstance(v, int)}
unit_dict.update({v: k for k, v in units.Protoss.__dict__.items()
if isinstance(v, int)})
unit_dict.update({v: k for k, v in units.Terran.__dict__.items()
if isinstance(v, int)})
unit_dict.update({v: k for k, v in units.Zerg.__dict__.items() if
isinstance(v, int)})
# def get_rgb_screen(obs) -> np.ndarray or None:
# if hasattr(obs.observation, 'render_data') and hasattr(obs.observation.render_data, 'map'):
# return obs.observation.render_data.map # 返回RGB屏幕图像
# else:
# return None
#
# def get_rgb_minimap(obs) -> np.ndarray or None:
# if hasattr(obs.observation, 'render_data') and hasattr(obs.observation.render_data, 'minimap'):
# return obs.observation.render_data.minimap # 返回RGB小地图图像
# else:
# return None
# def get_feature_map_screen(obs, feature_name: str) -> np.ndarray or None:
# feature_layer = obs.observation.feature_screen
# if hasattr(feature_layer, feature_name):
# return getattr(feature_layer, feature_name)
# else:
# return None
#
# def get_feature_map_minimap(obs, feature_name: str) -> np.ndarray or None:
# feature_layer = obs.observation.feature_minimap
# if hasattr(feature_layer, feature_name):
# return getattr(feature_layer, feature_name)
# else:
# return None
def get_img_obs_fea(self, obs):
def draw_coordinate_axes(surf, screen_size):
"""在屏幕上绘制坐标轴和网格线,坐标范围固定为 0 到 128。"""
# 固定坐标范围为 0 到 screen_size
coord_range_x = screen_size
coord_range_y = screen_size
# 设置刻度和网格线数量
num_ticks = 9 # 可以根据需要调整,例如设置为 9,则刻度为每 16 个单位
# 计算固定坐标刻度,例如:[0, 16, 32, ..., 128]
fixed_ticks_x = np.linspace(0, coord_range_x, num_ticks)
fixed_ticks_y = np.linspace(0, coord_range_y, num_ticks)
# 获取图像尺寸
img_width, img_height = surf.surf.get_size()
# 将固定坐标映射到图像像素位置
x_positions = (fixed_ticks_x / coord_range_x) * img_width
y_positions = (fixed_ticks_y / coord_range_y) * img_height
# 绘制垂直网格线
for x in x_positions:
pygame.draw.line(surf.surf, colors.white, (x, 0), (x, img_height), 1)
# 绘制水平网格线
for y in y_positions:
pygame.draw.line(surf.surf, colors.white, (0, y), (img_width, y), 1)
# 尝试加载字体
try:
font = pygame.font.SysFont('arial', 12)
except IOError:
font = pygame.font.SysFont(None, 12)
# 绘制 X 轴刻度标签
for x, label in zip(x_positions, fixed_ticks_x.astype(int)):
text_surface = font.render(str(label), True, colors.white)
text_rect = text_surface.get_rect()
# 调整标签位置,防止超出边界
text_rect.topleft = (x + 2, 2)
surf.surf.blit(text_surface, text_rect)
# 绘制 Y 轴刻度标签
for y, label in zip(y_positions, fixed_ticks_y.astype(int)):
text_surface = font.render(str(label), True, colors.white)
text_rect = text_surface.get_rect()
# 调整标签位置,防止超出边界
text_rect.topleft = (2, y + 2)
surf.surf.blit(text_surface, text_rect)
return surf
"""
读取最新的综合特征图,并将其编码为 Base64 格式。
"""
if isinstance(obs, list):
observation = obs[0].observation
else:
observation = obs.observation
logger.debug(f"[ID {self.log_id}] LLMAgent {self.name}: Accessed observation via obs.observation")
# Log the keys of the observation (for debugging)
logger.debug(f"[ID {self.log_id}] LLMAgent {self.name}: Observation keys: {list(observation.keys())}")
# get surf from pysc2.lib.renderer_human.draw_screen.surf
global_surf_screen = renderer_human.global_surf_screen
if global_surf_screen is None:
logger.error(f"[ID {self.log_id}] {self.name} enabled img feature map but can't get the img, check if --render in your command")
return None
# draw lines
surf = draw_coordinate_axes(global_surf_screen, self.size_screen)
# surf to img
raw_str = pygame.image.tostring(surf.surf, 'RGB')
img = Image.frombytes('RGB', surf.surf.get_size(), raw_str)
# Save the image to a byte stream in memory
buffered = io.BytesIO()
img.save(buffered, format="PNG")
buffered.seek(0)
# Convert image byte stream to Base64 encoded string
base64_image = base64.b64encode(buffered.getvalue()).decode('utf-8')
if self.config.ENABLE_SAVE_IMAGES:
# Construct the save path, including the log directory, agent name, and "rgb_images" subdirectory
image_save_dir = os.path.join(self.log_dir_path, f"{self.name}", "fea_images")
os.makedirs(image_save_dir, exist_ok=True)
# Construct the file name, including the step
image_filename = f"fea_screen_loop{self.main_loop_step}_step{self.num_step}.png"
image_path = os.path.join(image_save_dir, image_filename)
# Save the image
try:
img.save(image_path)
logger.info(
f"[ID {self.log_id}] LLMAgent {self.name}: Saved feature map at step {self.num_step}, filename: {image_filename}")
except Exception as e:
logger.error(f"[ID {self.log_id}] LLMAgent {self.name}: Failed to save RGB image: {e}")
return base64_image
# use in SubAgent
def get_img_obs_rgb(self, obs):
"""
Extracts the RGB image from the observation, adds coordinate axes ranging from 0 to 128,
and returns the Base64 encoded string of the processed image.
If saving images is enabled in the configuration, the processed image is saved to a local file.
"""
# Check the structure of the obs object
if isinstance(obs, list):
observation = obs[0].observation
else:
observation = obs.observation
logger.debug(f"[ID {self.log_id}] LLMAgent {self.name}: Accessed observation via obs.observation")
# Log the keys of the observation (for debugging)
logger.debug(f"[ID {self.log_id}] LLMAgent {self.name}: Observation keys: {list(observation.keys())}")
# Check if 'rgb_screen' is in the observation
if 'rgb_screen' in observation:
rgb_screen = observation['rgb_screen']
logger.debug(f"[ID {self.log_id}] LLMAgent {self.name}: 'rgb_screen' is found in the observation.")
else:
logger.error(f"[ID {self.log_id}] LLMAgent {self.name}: 'rgb_screen' not found in the observation.")
return None
# Convert data type to uint8
rgb_screen = rgb_screen.astype('uint8')
# Convert NumPy array to PIL Image object
rgb_screen = np.array(rgb_screen)[:, :, ::-1] # BGR to RGB
img = Image.fromarray(rgb_screen, 'RGB')
# img = img.convert('RGB')
# Get image dimensions
img_width, img_height = img.size
# Create a drawing object
draw = ImageDraw.Draw(img)
# Fixed coordinate range
coord_range = self.size_screen # Coordinate axes range from 0 to 128
# Set the number of ticks and grid lines
num_ticks = 9 # Adjust as needed
# Compute fixed coordinate ticks, e.g., [0, 16, 32, ..., 128]
fixed_ticks = np.linspace(0, coord_range, num_ticks) # Fixed coordinate ticks
# Map fixed coordinates to image pixel positions
x_positions = (fixed_ticks / coord_range) * img_width # Map to image x-axis positions
y_positions = (fixed_ticks / coord_range) * img_height # Map to image y-axis positions
# Draw vertical grid lines
for x in x_positions:
draw.line([(x, 0), (x, img_height)], fill='white', width=1)
# Draw horizontal grid lines
for y in y_positions:
draw.line([(0, y), (img_width, y)], fill='white', width=1)
# Try to load a font
try:
font = ImageFont.truetype("arial.ttf", size=12)
logger.debug(f"[ID {self.log_id}] LLMAgent {self.name}: Loaded 'arial.ttf' font for drawing text.")
except IOError:
# Use default font if specified font is not available
font = ImageFont.load_default()
logger.warning(f"[ID {self.log_id}] LLMAgent {self.name}: Could not load 'arial.ttf'. Using default font.")
# Draw X-axis tick labels
for x, label in zip(x_positions, fixed_ticks.astype(int)):
# Adjust label position slightly to prevent clipping
draw.text((x + 2, 2), str(label), fill='white', font=font)
# Draw Y-axis tick labels
for y, label in zip(y_positions, fixed_ticks.astype(int)):
# Adjust label position slightly to prevent clipping
draw.text((2, y + 2), str(label), fill='white', font=font)
# Save the image to a byte stream in memory
buffered = io.BytesIO()
img.save(buffered, format="PNG")
buffered.seek(0)
# Convert image byte stream to Base64 encoded string
base64_image = base64.b64encode(buffered.getvalue()).decode('utf-8')
# Save the image to a local file if saving is enabled in the configuration
if self.config.ENABLE_SAVE_IMAGES:
# Get the game loop step from the observation as the step information
step = observation['game_loop'][0]
# Construct the save path, including the log directory, agent name, and "rgb_images" subdirectory
image_save_dir = os.path.join(self.log_dir_path, f"{self.name}", "rgb_images")
os.makedirs(image_save_dir, exist_ok=True)
# Construct the file name, including the step
image_filename = f"rgb_screen_loop{self.main_loop_step}_step{step}.png"
image_path = os.path.join(image_save_dir, image_filename)
# Save the image
try:
img.save(image_path)
logger.info(
f"[ID {self.log_id}] LLMAgent {self.name}: Saved RGB image at step {step}, filename: {image_filename}")
except Exception as e:
logger.error(f"[ID {self.log_id}] LLMAgent {self.name}: Failed to save RGB image: {e}")
return base64_image
def get_game_info(obs, agent) -> str:
# obtain time info
game_info = 'Game Info:'
game_loop = obs.observation.game_loop
game_s = str(int(game_loop / 22.4 % 60)) # SC2 runs at 22.4 game loops per second
game_m = str(int(game_loop / 22.4 // 60)) # SC2 runs at 22.4 game loops per second
if len(game_s) == 1:
game_s = '0' + game_s
game_info += f"\n\tTime: {game_m}:{game_s}"
# obtain player info, for agents except combat group
if 'CombatGroup' not in agent.name:
player = obs.observation.player
game_info += f"\n\tMinerals: {player.minerals}"
game_info += f"\n\tVespene: {player.vespene}"
game_info += f"\n\tSupply Total: {player.food_cap}"
game_info += f"\n\tSupply Left: {player.food_cap - player.food_used}"
game_info += f"\n\tSupply Used: {player.food_used}"
return game_info
def get_single_unit_info(unit, team_unit_screen_coord=None, size_screen=None) -> str:
unit_type_id = unit.unit_type
unit_name = unit_dict.get(unit_type_id, "Unknown")
# tag and pos
unit_info = f"\n\t\tUnit: {unit_name}"
if unit.unit_type not in UNIT_DONOT_NEED_TAG:
unit_info += f" Tag: {hex(unit.tag)}"
unit_info += f" ScreenPos: [{unit.x}, {unit.y}]"
total_health = unit.health + unit.shield
# distance to current team head unit
if unit.unit_type not in UNIT_DONOT_NEED_DIS:
if team_unit_screen_coord is not None and size_screen is not None:
ratio = int(size_screen / SCREEN_WORLD_GRID)
dist = math.sqrt((team_unit_screen_coord[0] - unit.x) ** 2 + (team_unit_screen_coord[1] - unit.y) ** 2) / ratio
unit_info += f" Distance: {int(dist)}"
# health, energy, build_progress, weapon_cooldown
unit_info += f" Health: {total_health}"
if unit.unit_type in knowledge_dict.keys():
total_health_max = knowledge_dict[unit.unit_type]['health'] + knowledge_dict[unit.unit_type]['shield']
if total_health_max > 0:
unit_info += f"({int(100 * total_health / total_health_max)} %)"
if unit.energy > 0:
unit_info += f" Energy: {unit.energy}"
if unit.build_progress < 100:
unit_info += f" Build_progress: {unit.energy}%"
if unit.build_progress == 100 and unit.alliance == features.PlayerRelative.SELF and unit.is_selected and \
unit.unit_type in knowledge_dict.keys() and 'weapon1_attack' in knowledge_dict[unit.unit_type].keys() \
and knowledge_dict[unit.unit_type]['weapon1_attack'] not in [0, -1]:
if unit.unit_type == units.Protoss.Phoenix and unit.order_id_0 == 32:
unit_info += f" Weapon Locked by GravitonBeam Ability"
elif unit.weapon_cooldown == 0:
unit_info += f" Weapon Ready"
elif unit.weapon_cooldown > 0:
unit_info += f" Weapon Cooldown Time: {unit.weapon_cooldown / 22:.2f}s"
else:
pass
if unit.build_progress == 100 and unit.buff_id_0 != 0:
unit_info += f" Buff: {str(buffs.Buffs(unit.buff_id_0))}"
if unit.build_progress == 100 and unit.buff_id_1 != 0:
unit_info += f" {str(buffs.Buffs(unit.buff_id_1))}"
return unit_info
def get_single_unit_type_knowledge(unit_type, log_id) -> str:
unit_type_knowledge = ''
if unit_type not in knowledge_dict.keys():
logger.warning(f"[ID {log_id}] do not find unit_type {str(unit_type)} in knowledge_dict")
return ''
if 'Protoss' in str(units.get_unit_type(unit_type)):
unit_type_knowledge += f"\n\t{str(units.Protoss(unit_type))}"
if 'Terran' in str(units.get_unit_type(unit_type)):
unit_type_knowledge += f"\n\t{str(units.Terran(unit_type))}"
if 'Zerg' in str(units.get_unit_type(unit_type)):
unit_type_knowledge += f"\n\t{str(units.Zerg(unit_type))}"
if 'description' in knowledge_dict[unit_type].keys():
unit_type_knowledge += f"\n\t\t{knowledge_dict[unit_type]['description']}"
else:
logger.error(
f"[ID {log_id}] do not find description of {str(unit_type)} in knowledge_dict")
unit_knowledge = knowledge_dict[unit_type]
unit_type_knowledge += f"\n\t\tUnit properties: {unit_knowledge['target_self'] + unit_knowledge['type_self']}"
if 'weapon1_attack_range' in unit_knowledge.keys() and unit_knowledge['weapon1_attack_range'] not in [0, -1]:
unit_type_knowledge += f"\n\t\tWeapon info: Attack Range {unit_knowledge['weapon1_attack_range']}"
if 'target' in unit_knowledge.keys() and len(unit_knowledge['target']) != 0:
unit_type_knowledge += f", target: {unit_knowledge['target']}"
if 'type_anti' in unit_knowledge.keys() and len(unit_knowledge['type_anti']) != 0:
unit_type_knowledge += f", anti: {unit_knowledge['type_anti']}"
if 'weapon1_attack' in unit_knowledge.keys() and unit_knowledge['weapon1_attack'] not in [0, -1]:
unit_type_knowledge += f", DPS(damage per second) {int(unit_knowledge['weapon1_attack'] * unit_knowledge['weapon1_attack_times'] / unit_knowledge['weapon1_cooldown'])}"
if 'weapon1_attack_bonus' in unit_knowledge.keys() and unit_knowledge['weapon1_attack_bonus'] not in [0, -1]:
unit_type_knowledge += f", DPS-anti {int((unit_knowledge['weapon1_attack'] + unit_knowledge['weapon1_attack_bonus']) * unit_knowledge['weapon1_attack_times'] / unit_knowledge['weapon1_cooldown'])}"
if 'ability' in unit_knowledge.keys():
unit_type_knowledge += f"\n\t\tunit abilities:"
for ability in unit_knowledge['ability'].keys():
unit_type_knowledge += f"\n\t\t\t{ability}: {unit_knowledge['ability'][ability]}"
return unit_type_knowledge
# 获取所属单位的信息
def get_teams_info_with_knowledge(agent) -> str:
teams_info = ''
ctrl_unit_type_total = []
ally_unit_type_total = []
enemy_unit_type_total = []
unit_types_total = []
# 获取小队单位的信息,对于单选型小队,一个单位算一队
for team in agent.teams:
team_obs_list = team['obs'] if (len(team['obs']) != 0 and len(team['unit_tags']) != 0) else None
if team['select_type'] == 'select' and len(team['obs']) != len(team['unit_tags']):
continue
if team_obs_list is None:
continue
for i in range(len(team_obs_list)):
ctrl_unit_type = []
ally_unit_type = []
enemy_unit_type = []
ctrl_unit_tags = []
ally_unit_tags = []
enemy_unit_tags = []
obs = team_obs_list[i]
curr_team_head_unit = None
ctrl_unit_screen_coord = [0, 0]
for unit in obs.observation.feature_units:
if unit.is_on_screen and unit.is_selected and unit.tag in team['unit_tags']:
ctrl_unit_type.append(unit.unit_type)
ctrl_unit_tags.append(unit.tag)
ctrl_unit_screen_coord[0] += unit.x
ctrl_unit_screen_coord[1] += unit.y
if team['select_type'] != 'select' and unit.tag == team['unit_tags'][0]:
curr_team_head_unit = unit
if team['select_type'] == 'select' and unit.tag == team['unit_tags'][i]:
curr_team_head_unit = unit
if unit.is_on_screen and unit.alliance in [1, 2] and not unit.is_selected:
ally_unit_type.append(unit.unit_type)
ally_unit_tags.append(unit.tag)
if unit.is_on_screen and unit.alliance == features.PlayerRelative.ENEMY:
if unit.unit_type in [units.Zerg.Larva]:
continue
enemy_unit_type.append(unit.unit_type)
enemy_unit_tags.append(unit.tag)
if len(ctrl_unit_tags) > 0:
ctrl_unit_screen_coord[0] = ctrl_unit_screen_coord[0] / len(ctrl_unit_tags)
ctrl_unit_screen_coord[1] = ctrl_unit_screen_coord[1] / len(ctrl_unit_tags)
else:
ctrl_unit_screen_coord = None
# 去重
ctrl_unit_type = list(set(ctrl_unit_type))
ally_unit_type = list(set(ally_unit_type))
enemy_unit_type = list(set(enemy_unit_type))
ctrl_unit_type_total += ctrl_unit_type
ally_unit_type_total += ally_unit_type
enemy_unit_type_total += enemy_unit_type
# 输出文本初始化
ctrl_units_info = ''
ally_units_info = ''
enemy_units_info = ''
if team['select_type'] == 'select':
teams_info += f"\n\nTeam {team['name']}-{i + 1} Info:"
else:
teams_info += f"\n\nTeam {team['name']} Info:"
arr = obs.observation['feature_minimap']['camera']
idx = np.nonzero(arr) # 获取特征图上非零值的坐标
minimap_x = int(idx[:][1].mean())
minimap_y = int(idx[:][0].mean())
teams_info += f"\n\tTeam minimap position: [{minimap_x}, {minimap_y}]"
size_screen = obs.observation.feature_screen.height_map.shape[0]
# controlled units
for unit_type in ctrl_unit_type:
for unit in obs.observation.feature_units:
if unit.unit_type == unit_type and unit.is_on_screen and unit.alliance == features.PlayerRelative.SELF \
and unit.is_selected and (0 < unit.x < size_screen and 0 < unit.y < size_screen):
ctrl_units_info += get_single_unit_info(unit)
if ctrl_units_info != '':
teams_info += "\n\tControlled Team Units:"
teams_info += ctrl_units_info
# ally units
for unit_type in ally_unit_type:
for unit in obs.observation.feature_units:
if unit.unit_type == unit_type and unit.is_on_screen and unit.alliance in [1, 2] and \
not unit.is_selected and (0 < unit.x < size_screen and 0 < unit.y < size_screen):
ally_units_info += get_single_unit_info(unit)
if ally_units_info != '':
teams_info += "\n\tNearby Ally Units:"
teams_info += ally_units_info
# enemy units
for unit_type in enemy_unit_type:
for unit in obs.observation.feature_units:
if unit.unit_type == unit_type and unit.is_on_screen and unit.alliance == features.PlayerRelative.ENEMY and \
(0 < unit.x < size_screen and 0 < unit.y < size_screen):
enemy_units_info += get_single_unit_info(unit, ctrl_unit_screen_coord, size_screen)
if enemy_units_info != '':
teams_info += "\n\tNearby Enemy Units:"
teams_info += enemy_units_info
ctrl_unit_type_total = list(set(ctrl_unit_type_total))
ally_unit_type_total = list(set(ally_unit_type_total))
enemy_unit_type_total = list(set(enemy_unit_type_total))
showed_unit = []
# controlled units description and abilities
unit_types_total = ctrl_unit_type_total + ally_unit_type_total + enemy_unit_type_total
teams_info += f"\n\nRelevant Knowledge:"
for unit_type in unit_types_total:
if unit_type not in knowledge_dict.keys():
logger.warning(f"[ID {agent.log_id}] do not find unit_type {str(unit_type)} in knowledge_dict")
continue
if unit_type in showed_unit:
continue
if 'Protoss' in str(units.get_unit_type(unit_type)):
teams_info += f"\n\t{str(units.Protoss(unit_type))}"
if 'Terran' in str(units.get_unit_type(unit_type)):
teams_info += f"\n\t{str(units.Terran(unit_type))}"
if 'Zerg' in str(units.get_unit_type(unit_type)):
teams_info += f"\n\t{str(units.Zerg(unit_type))}"
if 'description' in knowledge_dict[unit_type].keys():
teams_info += f"\n\t\t{knowledge_dict[unit_type]['description']}"
else:
logger.error(
f"[ID {agent.log_id}] do not find description of {str(unit_type)} in knowledge_dict")
unit_knowledge = knowledge_dict[unit_type]
teams_info += f"\n\t\tUnit properties: {unit_knowledge['target_self'] + unit_knowledge['type_self']}"
if 'weapon1_attack_range' in unit_knowledge.keys() and unit_knowledge['weapon1_attack_range'] not in [0, -1]:
teams_info += f"\n\t\tWeapon info: Attack Range {unit_knowledge['weapon1_attack_range']}"
if 'target' in unit_knowledge.keys() and len(unit_knowledge['target']) != 0:
teams_info += f", target: {unit_knowledge['target']}"
if 'type_anti' in unit_knowledge.keys() and len(unit_knowledge['type_anti']) != 0:
teams_info += f", anti: {unit_knowledge['type_anti']}"
if 'weapon1_attack' in unit_knowledge.keys() and unit_knowledge['weapon1_attack'] not in [0, -1]:
teams_info += f", DPS(damage per second) {int(unit_knowledge['weapon1_attack'] * unit_knowledge['weapon1_attack_times'] / unit_knowledge['weapon1_cooldown'])}"
if 'weapon1_attack_bonus' in unit_knowledge.keys() and unit_knowledge['weapon1_attack_bonus'] not in [0, -1]:
teams_info += f", DPS-anti {int((unit_knowledge['weapon1_attack'] + unit_knowledge['weapon1_attack_bonus']) * unit_knowledge['weapon1_attack_times'] / unit_knowledge['weapon1_cooldown'])}"
if 'ability' in unit_knowledge.keys() and unit_type in ctrl_unit_type_total:
teams_info += f"\n\t\tunit abilities:"
for ability in unit_knowledge['ability'].keys():
teams_info += f"\n\t\t\t{ability}: {unit_knowledge['ability'][ability]}"
showed_unit.append(unit_type)
return teams_info
# 根据obs获取合法动作,以文本格式输出,这个需要作为input prompt的一个独立部分
def get_valid_actions_from_obs(obs, agent) -> str:
text_valid_actions = "\n\nValid Actions:"
for team in agent.teams:
team_obs_list = team['obs'] if (len(team['obs']) != 0 and len(team['unit_tags']) != 0) else None
if team['select_type'] == 'select' and len(team['obs']) != len(team['unit_tags']) or team_obs_list is None:
continue
for i in range(len(team_obs_list)):
# determine current controlled unit types
ctrl_unit_type = []
obs = team_obs_list[i]
for unit in obs.observation.feature_units:
if unit.is_on_screen and unit.is_selected and unit.tag in team['unit_tags']:
ctrl_unit_type.append(unit.unit_type)
ctrl_unit_type = list(set(ctrl_unit_type))
# determine current team name
team_name = f"Team {team['name']}-{i+1}" if team['select_type'] == 'select' else f"Team {team['name']}"
text_valid_actions += f"\n\t{team_name} Valid Actions:"
# reduce to team action space
team_action_space = []
for unit_type in ctrl_unit_type:
if unit_type in agent.config.AGENTS[agent.name]['action'].keys():
team_action_space += agent.config.AGENTS[agent.name]['action'][unit_type]
else:
logger.error(f"[ID {agent.log_id}] cannot get valid actions of unit_type {unit_type}")
# reduce to obs.observation.available_actions
valid_actions = []
for action in team_action_space:
valid = True
for func_triple in action['func']:
if func_triple[0] not in obs.observation.available_actions:
valid = False
if valid:
valid_actions.append(action)
# TODO: special actions
# 这些动作是执行时临时选择建筑的,因此无法在obs中查看合法性,需要根据资源/前置条件/闲置建筑另行判断
for action in team_action_space:
if 'Build_' in action['name'] and '_Easy' in action['name']: # enough minerals and gas, easy mode, do not select worker
pass
if 'Train_' in action['name']: # enough minerals and gas, exist relevant building active == 0, and in power
pass
if 'WarpTrain_' in action['name']: # enough minerals and gas, exist relevant building active == 0, and in power
pass
if 'Research_' in action['name']: # enough minerals and gas, exist relevant building active == 0, and in power
pass
# record valid actions
for action in valid_actions:
arg = action['arg']
if len(arg) == 0:
text_valid_actions += f"\n\t\t<{action['name']}()>"
if len(arg) == 1:
text_valid_actions += f"\n\t\t<{action['name']}({arg[0]})>"
if len(arg) == 2:
text_valid_actions += f"\n\t\t<{action['name']}({arg[0]}, {arg[1]})>"
if len(arg) == 3:
text_valid_actions += f"\n\t\t<{action['name']}({arg[0]}, {arg[1]}, {arg[2]})>"
# record action arg explanation
size_screen = obs.observation.feature_screen.height_map.shape[0]
size_minimap = obs.observation.feature_minimap.height_map.shape[0]
text_valid_actions += f"\n\nAction Args: "
text_valid_actions += f"\n\t(1) tag: tag refers to a hexadecimal number, shape as 0x000000000."
text_valid_actions += f"\n\t(2) screen: screen refers to a screen coordinate, shape as [x, y], where x and y range from 0 to {size_screen}."
text_valid_actions += f"\n\t(3) minimap: minimap refers to a minimap coordinate, shape as [x, y], where x and y range from 0 to {size_minimap}."
text_valid_actions += f"\nFor example, when you want to use an action like <Action_Name(tag, screen)>, you should output like <Action_Name(0x100580001, [37, 55])>; when you want to use an action like <Action_Name(screen)>, you should output like <Action_Name([66, 78])>. "
text_valid_actions += f"What's more, You need to see clearly whether an action is using screen coordinates or minimap coordinates, If an action name as XXXX_Screen, it uses screen coordinate; if an action name as XXXX_Minimap, it uses minimap coordinate."
return text_valid_actions
def get_last_action_info(agent) -> str:
text_last_action = ""
if isinstance(agent.last_text_a_pro, str) and len(agent.last_text_a_pro) > 0:
text_last_action += f"\n\nLast Step {agent.last_text_a_pro}"
text_last_action += f"\nYou need to confirm whether the previous action finished executing, and based on this, determine whether to continue the old strategy or immediately take other actions."
return text_last_action
def get_task_info(agent) -> (str, int):
task_info = ''
for team in agent.config.AGENTS[agent.name]['team']:
if 'task' in team.keys() and len(team['task']) > 0 and len(team['obs']) > 0:
change_task = False
task = None
if team['select_type'] != 'select' or team['name'] == 'Empty':
task = team['task'][0]
obs = team['obs'][0]
idx = np.nonzero(obs.observation['feature_minimap']['camera'])
x, y = int(idx[:][1].mean()), int(idx[:][0].mean())
if task['pos'] is not None:
dist = math.sqrt((x - task['pos'][0]) ** 2 + (y - task['pos'][1]) ** 2)
if dist < 4:
change_task = True
if len(team['task']) > 1:
task1 = team['task'][1]
if task1['time'] is not None and isinstance(task1['time'], str) and ':' in task1['time']:
game_loop = obs.observation.game_loop
game_s = int(game_loop / 22 % 60) # SC2 runs at 22.4 game loops per second
game_m = int(game_loop / 22 // 60) # SC2 runs at 22.4 game loops per second
if int(task1['time'].split(":")[0]) < game_m or \
(int(task1['time'].split(":")[0]) == game_m and int(task1['time'].split(":")[1]) <= game_s):
change_task = True
if team['select_type'] == 'select':
pass
if change_task:
team['task'].pop(0)
if len(team['task']) > 0 and task is not None:
if team['name'] != 'Empty':
task_info += f"\n\tTeam {team['name']}' task: {task['info']}"
if team['name'] == 'Empty':
task_info += f"\n\tAgent task: {task['info']}"
if len(task_info) > 0:
task_info = f"\n\nTasks:" + task_info
return task_info
def get_communication_info(agent) -> str:
communication_info = agent.last_text_c_inp + agent.last_text_c_tar
return communication_info
def get_other_agents_info(agent) -> str: # for Commander only
other_agents_info = ''
other_agents = agent.other_agents
other_agents_unit_knowledge = ''
showed_unit_type = []
for agent_name in other_agents.keys():
agent_ = other_agents[agent_name]
other_agent_info = ''
for team in agent_.teams:
if team['name'] == 'Empty':
continue
if agent_.enable and len(team['unit_tags']) != 0 and team['select_type'] != 'select':
other_agent_info += f"\n\t\tTeam {team['name']}: {str(units.get_unit_type(team['unit_type'][0]))} x{len(team['unit_tags'])}"
if len(team['minimap_pos']) == 1:
other_agent_info += f", minimap position {team['minimap_pos'].pop(0)}"
if team['unit_type'][0] not in showed_unit_type:
other_agents_unit_knowledge += get_single_unit_type_knowledge(team['unit_type'][0], agent.log_id)
showed_unit_type.append(team['unit_type'][0])
if agent_.enable and len(team['unit_tags']) != 0 and team['select_type'] == 'select':
for i in range(len(team['unit_tags'])):
other_agent_info += f"\n\t\tTeam {team['name']}-{i}: {str(units.get_unit_type(team['unit_type'][0]))} x1"
if len(team['minimap_pos']) + i == len(team['unit_tags']):
other_agent_info += f", minimap position {team['minimap_pos'].pop(0)}"
if team['unit_type'][0] not in showed_unit_type:
other_agents_unit_knowledge += get_single_unit_type_knowledge(team['unit_type'][0], agent.log_id)
showed_unit_type.append(team['unit_type'][0])
if len(other_agent_info) != 0:
other_agent_info = f'\n\tAgent {agent_name}:' + other_agent_info
other_agents_info += other_agent_info
if len(other_agents_info) != 0:
other_agents_info = "\n\nGlobal agent info:" + other_agents_info
if len(other_agents_unit_knowledge) != 0:
other_agents_unit_knowledge = f"\n\nRelevant Knowledge:" + other_agents_unit_knowledge
return other_agents_info + other_agents_unit_knowledge
def get_alert_info(obs) -> str: # for Commander only
alert_info = ''
arr = obs.observation['feature_minimap']['alerts']
idx = np.nonzero(arr) # 获取特征图上非零值的坐标
for i in range(len(idx[0])):
alert_info += f"\n\tEngage with enemies in minimap [{idx[1][i]}, {idx[0][i]}]"
if len(alert_info) != 0:
alert_info = "\n\nAlert Info:" + alert_info
return alert_info
def get_warp_info(obs) -> str: # for Developer only
warp_source_info = ''
pylon_info = ''
prism_info = ''
for unit in obs.observation.raw_units:
if unit.alliance == features.PlayerRelative.SELF and unit.unit_type in [units.Protoss.WarpGate]:
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | true |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/llm_prompt.py | llm_pysc2/lib/llm_prompt.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BasePrompt:
def __init__(self):
self.sp = ''
self.eip = ''
self.eop = ''
self.screen_img_rgb_prompt = ''
self.screen_img_fea_prompt = ''
self.minimap_img_rgb_prompt = ''
self.minimap_img_fea_prompt = ''
class CombatGroupPrompt(BasePrompt):
def __init__(self, name, log_id, config):
super(CombatGroupPrompt, self).__init__()
self.name = name
self.config = config
self.log_id = log_id
# Part 1
self.sp = \
f"""
1.Identity
You are a {self.config.AGENTS[self.name]['describe']}.
Your should command your troops, complete the tactical tasks assigned by the superior. You will have several teams of units, you can command these teams to fight together or perform different tasks.
2.Rules
2.1 Try to kill more and loss less. Usually, concentrating all firepower on the same target(especially the closest enemy) can improve the strike effectiveness.
2.2 Try to kill enemy as quick as possible, retreat promptly when/before enemy reinforcements arrive.
2.3 When sacrificing your unit can earn much more profits, you can choose to sacrifice your unit.
2.4 Use your skills well to achieve optimal tactical results. Especially when controlling support units.
2.5 Always remember the tactical tasks given by superior. Sometimes you have to sacrifice whole team to ensure the achievement of tactical objectives.
3.Action Output
You should make decisions according to observed information, tactic task and rules, give analysis and decisions for each team. For example, if you have 2 teams name as 'Stalker-1' and 'Stalker-2', you should output as:
Analysis:
xxxxx
Actions:
Team Stalker-1:
xxxxx
Team Stalker-2:
xxxxx
"""
self.eip = \
"""
Game Info
Time: 0:32
Team Oracle-1 Info:
Team minimap position: [50, 32]
Controlled Team Units:
Unit: Oracle Tag: 0x100200001 Pos: (67, 59) Health: 100 Energy: 108 Weapon_cooldown: 0
Nearby Ally units:
Unit: Observer Tag: 0x100140001 Pos: (10, 70) Health: 70 Weapon_cooldown: 0
Nearby Enemy units:
Unit: Drone Tag: 0x101340001 Pos: (54, 40) Health: 40
Unit: Drone Tag: 0x101280001 Pos: (61, 58) Health: 40
Unit: Drone Tag: 0x1012c0001 Pos: (52, 70) Health: 40
Unit: Drone Tag: 0x1014c0001 Pos: (50, 62) Health: 40
Unit: Drone Tag: 0x101400001 Pos: (61, 63) Health: 40
Unit: Drone Tag: 0x101380001 Pos: (58, 89) Health: 40
Unit: Drone Tag: 0x101480001 Pos: (61, 71) Health: 18
Unit: Drone Tag: 0x101300001 Pos: (54, 94) Health: 40
Unit: Drone Tag: 0x101440001 Pos: (50, 72) Health: 40
Unit: Drone Tag: 0x101240001 Pos: (61, 63) Health: 40
Unit: Overlord Tag: 0x101500001 Pos: (18, 67) Health: 200
Unit: Hatchery Tag: 0x101100001 Pos: (34, 67) Health: 1500
Unit: SpawningPool Tag: 0x1011c0002 Pos: (50, 110) Health: 197 Build_progress: 10%
Unit: Queen Tag: 0x1000c0001 Pos: (50, 40) Health: 175 Energy: 25
Unit: Queen Tag: 0x100580001 Pos: (57, 54) Health: 175 Energy: 25
Here are some description of screen units:
Protoss.Oracle
A light, psionic, support and harassment ship. Can grant vision and harass light units and workers with its pulsar beam.(Cannot attack ground units before activating Pulsar Beam)
unit abilities:
Revelation: Always available. Active skill. Cost: 25 energy. Reveals enemy units and structures in an area, granting vision for 20 seconds. Also reveals cloaked or burrowed units or structures.
Pulsar Beam: Always available. Active skill. Cost: 25 energy (+1.96 energy per second). Enables the Oracle to attack ground units with high damage, particularly effective against light units.
Stasis Ward: Always available. Active skill. Cost: 50 energy. Places a cloaked stasis ward on the ground that traps enemy units in stasis for 21 seconds upon activation.
Protoss.Observer
A cloaking air unit that functions as a detector.
Protoss.StasisTrap
Cloaked structure created by the Oracle. Used to freeze incoming units.Permanent Cloaking:This unit is permanently cloaked. They cannot be seen or directly attacked by enemy forces, unless they have detector support.
Zerg.Drone
Harvests resources and spawns structures. Is sacrificed when creating new structures.The drone morphs into structures and harvests minerals and vespene gas.
Zerg.Overlord
Produces control and is no longer a detector like the StarCraft I version.
Zerg.Hatchery
Spawns larvae to be morphed into other zerg strains, generates creep and digests minerals and gas into a usable form. The queen is spawned directly from the hatchery.
Zerg.SpawningPool
Required for production of zerglings and queens and researches zergling upgrades.
Zerg.Queen
The queen a powerful attacking ground dwelling support unit ideal for zerg defense.
Valid Actions:
<Stop()>
<No_Operation()>
<Attack_Unit(tag)>
<Move_Screen(screen)>
<Move_Minimap(minimap)>
<Ability_OracleRevelation_Screen(screen)>
<Ability_StasisTrap_Screen(screen)>
Arg:
tag: refers to a hexadecimal number, shape as 0x000000000.
screen: refers to a screen coordinate, shape as [x, y], x and y range from 0 to 128.
minimap: refers to a minimap coordinate, shape as [x, y], x and y range from 0 to 64.
"""
self.eop = \
"""
Analysis:
We are controlling a team called Oracle-1, we have met several enemy Queens, Drones and Overlord.
Our goal is killing as much Drone, consider that we still have enough health and energy, we should choose drone to attack, and leave the area quickly.
Actions:
Team Oracle-1:
<Attack_Unit(0x101480001)>
<Move_Screen([67, 96])>
"""
# Part 2
if self.config.ENABLE_COMMUNICATION:
self.sp += \
"""
4.Communication Output
If there is Available Communicate Target, you should keep communicating with them by Communication functions. For example, if 'Commander' and 'CombatGroup4' in Available Communicate Target, you can output as:
Communications:
<MessageTo(Commander, '''xxxxxxxxxx''')>
<MessageTo(CombatGroup4, '''xxxxxxxxxx''')>
"""
self.eip += \
"""
Communication:
From Commander:
Your task is to attack the enemy workers of an enemy base near minimap [48,32]. Intelligence shows that two enemy Queens are located on the minimap [44,32]. Try to avoid being detected by enemy Queens before arriving.
Available Communication Tragets:
Commander: Protoss military supreme commander. Responsible for making macro decision through communication, and controls nexus for massrecall for tactical objectives.
Available Communication Functions:
<MessageTo(AgentName, message)>
<MessageTo(ChannelName, message)>
<ListenTo(ChannelName)>
Args explanation:
(1)AgentName: refers to a name mentioned in Available Communication Tragets.
(2)ChannelName: shape as Channel-i, i refers to an integer.
(2)message: any text wrapped between ''' and '''.
"""
self.eop += \
"""
Communications:
<MessageTo(Commander, '''Copy that, we have arrived enemy base, and started attack enemy workers''')>
"""
# Part 3
self.eip += \
f"""
Give each team no more than {self.config.MAX_NUM_ACTIONS} actions.
Now, start generating your analysis and actions:
"""
class CommanderPrompt(BasePrompt): # TODO: Design a prompt specifically for the supreme military commander
def __init__(self, name, log_id, config):
super(CombatGroupPrompt, self).__init__()
self.name = name
self.config = config
self.log_id = log_id
# self.sp = ''
# self.eip = ''
# self.eop = ''
class DeveloperPrompt(BasePrompt): # TODO: Design a prompt specifically for the supreme logistics commander
def __init__(self, name, log_id, config):
super(CombatGroupPrompt, self).__init__()
self.name = name
self.config = config
self.log_id = log_id
# self.sp = ''
# self.eip = ''
# self.eop = ''
PROTOSS_FACTORY = {
'default': CombatGroupPrompt,
'commander': CommanderPrompt,
'developer': DeveloperPrompt,
}
TERRAN_FACTORY = {}
ZERG_FACTORY = {}
FACTORY = {
'protoss': PROTOSS_FACTORY,
'terran': TERRAN_FACTORY,
'zerg': ZERG_FACTORY,
}
if __name__ == "__main__":
from llm_pysc2.agents.configs.config import ProtossAgentConfig
config = ProtossAgentConfig()
prompt = CombatGroupPrompt('CombatGroup1', log_id=0, config=config)
print("--" * 25 + "System Prompt" + "--" * 25)
print(prompt.sp)
print("--" * 25 + "Example Input Prompt" + "--" * 25)
print(prompt.eip)
print("--" * 25 + "Example Output Prompt" + "--" * 25)
print(prompt.eop) | python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/utils.py | llm_pysc2/lib/utils.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pysc2.lib import units, upgrades, buffs, actions
import numpy as np
import math
# Do not modify this variable
SCREEN_WORLD_GRID = 24
def get_tag_list(unit_list: list) -> list:
tag_list = []
for unit in unit_list:
tag_list.append(unit.tag)
return tag_list
def get_raw_unit_list_of_tags(obs, tags: "int or list") -> list:
raw_unit_list = []
for unit in obs.observation.raw_units:
if ((not isinstance(tags, list)) and unit.tag == tags) or (isinstance(tags, list) and unit.tag in tags):
raw_unit_list.append(unit)
return raw_unit_list
def get_feature_unit_list_of_tags(obs, tags: "int or list") -> list:
feature_unit_list = []
for unit in obs.observation.feature_units:
if ((not isinstance(tags, list)) and unit.tag == tags) or (isinstance(tags, list) and unit.tag in tags):
feature_unit_list.append(unit)
return feature_unit_list
def get_nearby_tag_list(center_unit, from_unit_list: list, dist: int = 15) -> list:
tag_list = []
center_unit_x = center_unit.x
center_unit_y = center_unit.y
for unit in from_unit_list:
if math.sqrt((unit.x - center_unit_x) ** 2 + (unit.y - center_unit_y) ** 2) <= dist:
tag_list.append(unit.tag)
return tag_list
def get_nearby_unit_list(center_unit, from_unit_list: list, dist: int = 15) -> list:
unit_list = []
center_unit_x = center_unit.x
center_unit_y = center_unit.y
for unit in from_unit_list:
if math.sqrt((unit.x - center_unit_x) ** 2 + (unit.y - center_unit_y) ** 2) <= dist:
unit_list.append(unit)
return unit_list
def get_dist(unit, unit_):
return math.sqrt((unit.x - unit_.x) ** 2 + (unit.y - unit_.y) ** 2)
def get_cos(unit1, unit2, unit3):
d1 = np.array([unit2.x - unit1.x, unit2.y - unit1.y])
d2 = np.array([unit3.x - unit2.x, unit3.y - unit2.y])
# result = float((d1[0] * d2[0] + d1[1] * d2[1]) / (np.linalg.norm(d1) * np.linalg.norm(d2)))
result = np.dot(d1, d2) / (np.linalg.norm(d1) * np.linalg.norm(d2))
return result
def get_relevant_team_dist(relevant_team_list, obs, curr_unit):
relevant_team_dist = []
for team in relevant_team_list:
if len(team['unit_tags']) == 0:
relevant_team_dist.append(99999)
else:
unit_r = None
for unit in obs.observation.raw_units:
if unit.tag == team['unit_tags'][0]:
unit_r = unit
if unit_r is None:
relevant_team_dist.append(99999)
else:
relevant_team_dist.append(get_dist(unit_r, curr_unit))
return relevant_team_dist
# TODO: Add Zerg and Terran buildings
BASE_BUILDING_NAMES = ['Nexus', 'Hatchery', 'Hive', 'Lair', 'CommandCenter', 'OrbitalCommand', 'PlanetaryFortress']
GAS_BUILDING_NAMES = ['Assimilator', 'AssimilatorRich', 'Extractor', 'ExtractorRich', 'Refinery', 'RefineryRich']
CREEP_BUILDING_NAMES = ['BanelingNest', 'CreepTumor', 'EvolutionChamber', 'Extractor', 'GreaterSpire', 'HydraliskDen',
'InfestationPit', 'LurkerDen', 'NydusNetwork', 'NydusCanal', 'RoachWarren', 'SpawningPool',
'SpineCrawler', 'Spire', 'SporeCrawler', 'UltraliskCavern']
POWER_BUILDING_NAMES = ['Gateway', 'Stargate', 'RoboticsFacility', 'CyberneticsCore', 'Forge', 'TwilightCouncil',
'FleetBeacon', 'RoboticsBay', 'TemplarArchive', 'DarkShrine', 'PhotonCannon', 'ShieldBattery']
SIZE5_BUILDING_NAMES = ['Nexus', 'Hatchery', 'Hive', 'Lair', 'CommandCenter', 'OrbitalCommand', 'PlanetaryFortress']
SIZE3_BUILDING_NAMES = ['Gateway', 'Stargate', 'RoboticsFacility', 'CyberneticsCore', 'Forge', 'TwilightCouncil',
'FleetBeacon', 'RoboticsBay', 'TemplarArchive', 'Assimilator', 'AssimilatorRich'] + \
[] + \
[]
SIZE2_BUILDING_NAMES = ['Pylon', 'DarkShrine', 'PhotonCannon', 'ShieldBattery'] + \
[] + \
[]
SIZE1_BUILDING_NAMES = ['StasisTrap'] + \
['CreepTumor'] + \
['SensorTower']
PROTOSS_BUILDING_TYPE = [
units.Protoss.Nexus, units.Protoss.Assimilator, units.Protoss.AssimilatorRich, units.Protoss.Pylon,
units.Protoss.CyberneticsCore, units.Protoss.Forge, units.Protoss.Gateway, units.Protoss.WarpGate,
units.Protoss.ShieldBattery, units.Protoss.PhotonCannon,
units.Protoss.TwilightCouncil, units.Protoss.Stargate, units.Protoss.RoboticsBay,
units.Protoss.TemplarArchive, units.Protoss.FleetBeacon, units.Protoss.RoboticsFacility,
units.Protoss.DarkShrine, units.Protoss.StasisTrap
]
ZERG_BUILDING_TYPE = [
units.Zerg.Hatchery, units.Zerg.Lair, units.Zerg.Hive, units.Zerg.Extractor, units.Zerg.ExtractorRich,
units.Zerg.SpawningPool, units.Zerg.EvolutionChamber, units.Zerg.HydraliskDen,
units.Zerg.Spire, units.Zerg.GreaterSpire, units.Zerg.BanelingNest,
units.Zerg.InfestationPit, units.Zerg.NydusNetwork, units.Zerg.NydusCanal,
units.Zerg.UltraliskCavern, units.Zerg.RoachWarren, units.Zerg.LurkerDen,
units.Zerg.SpineCrawler, units.Zerg.SpineCrawlerUprooted,
units.Zerg.SporeCrawler, units.Zerg.SporeCrawlerUprooted
]
TERRAN_BUILDING_TYPE = [
units.Terran.CommandCenter, units.Terran.OrbitalCommand, units.Terran.PlanetaryFortress,
units.Terran.Barracks, units.Terran.Bunker, units.Terran.Factory, units.Terran.Starport,
units.Terran.EngineeringBay, units.Terran.MissileTurret, units.Terran.SensorTower,
units.Terran.SupplyDepot, units.Terran.Refinery, units.Terran.GhostAcademy,
units.Terran.Armory, units.Terran.FusionCore, units.Terran.Reactor, units.Terran.TechLab,
units.Terran.BarracksTechLab, units.Terran.FactoryTechLab, units.Terran.StarportTechLab,
]
BUILDING_TYPE = PROTOSS_BUILDING_TYPE + ZERG_BUILDING_TYPE + TERRAN_BUILDING_TYPE
BASE_BUILDING_TYPE = [
units.Protoss.Nexus,
units.Terran.CommandCenter, units.Terran.OrbitalCommand, units.Terran.PlanetaryFortress,
units.Zerg.Hatchery, units.Zerg.Lair, units.Zerg.Hive
]
WORKER_TYPE = [
units.Protoss.Probe,
units.Terran.SCV, units.Terran.MULE, # 注意 MULE不能采集瓦斯
units.Zerg.Drone
]
MINERAL_TYPE = [
units.Neutral.MineralField, units.Neutral.MineralField750, units.Neutral.MineralField450,
units.Neutral.RichMineralField, units.Neutral.RichMineralField750,
units.Neutral.PurifierRichMineralField, units.Neutral.PurifierRichMineralField750,
units.Neutral.BattleStationMineralField, units.Neutral.BattleStationMineralField750,
units.Neutral.PurifierMineralField, units.Neutral.PurifierMineralField750,
units.Neutral.PurifierRichMineralField, units.Neutral.PurifierRichMineralField750,
units.Neutral.LabMineralField, units.Neutral.LabMineralField750
]
GAS_TYPE = [
units.Neutral.VespeneGeyser, units.Neutral.RichVespeneGeyser, units.Neutral.ProtossVespeneGeyser,
units.Neutral.PurifierVespeneGeyser, units.Neutral.ShakurasVespeneGeyser]
GAS_BUILDING_TYPE = [
units.Protoss.Assimilator, units.Protoss.AssimilatorRich,
units.Terran.Refinery, units.Terran.RefineryRich,
units.Zerg.Extractor, units.Zerg.ExtractorRich
]
TRANSPORTER_TYPE = [
units.Protoss.WarpPrism, units.Protoss.WarpPrismPhasing,
units.Zerg.OverlordTransport, units.Zerg.NydusCanal, units.Zerg.NydusNetwork,
units.Terran.Medivac,
]
OTHER_ACCESSBLE_UNIT_TYPE = [ # 不计gas_building
units.Zerg.Hatchery, units.Zerg.Lair, units.Zerg.Hive,
units.Terran.Bunker # +人族基地?
]
ACCESSBLE_UNIT_TYPE = TRANSPORTER_TYPE + GAS_BUILDING_TYPE + OTHER_ACCESSBLE_UNIT_TYPE
BOOSTABLE_TYPE = [
units.Protoss.Nexus, units.Protoss.Gateway, units.Protoss.CyberneticsCore, units.Protoss.Forge,
units.Protoss.TwilightCouncil, units.Protoss.TemplarArchive, units.Protoss.DarkShrine,
units.Protoss.Stargate, units.Protoss.FleetBeacon, units.Protoss.RoboticsBay, units.Protoss.RoboticsFacility,
]
# used in llm_observation
UNIT_DONOT_NEED_TAG = \
[units.Protoss.Interceptor,
units.Zerg.Broodling, units.Zerg.Locust, units.Zerg.LocustFlying, units.Zerg.Larva]
UNIT_DONOT_NEED_DIS = \
[units.Protoss.Interceptor,
units.Zerg.Broodling, units.Zerg.Locust, units.Zerg.LocustFlying, units.Zerg.Larva,
units.Zerg.Zergling, units.Zerg.Baneling]
# used in locked_func1
UNIT_DONOT_NEED_GATHER = \
[units.Protoss.Interceptor, units.Protoss.AdeptPhaseShift, units.Protoss.DisruptorPhased,
units.Zerg.Broodling, units.Zerg.Locust, units.Zerg.LocustFlying, units.Zerg.Larva]
# zerg_tech_upgrades = {
# 'Melee Attacks': [
# upgrades.Upgrades.ZergMeleeWeaponsLevel1,
# upgrades.Upgrades.ZergMeleeWeaponsLevel2,
# upgrades.Upgrades.ZergMeleeWeaponsLevel3
# ],
# 'Missile Attacks': [
# upgrades.Upgrades.ZergMissileWeaponsLevel1,
# upgrades.Upgrades.ZergMissileWeaponsLevel2,
# upgrades.Upgrades.ZergMissileWeaponsLevel3
# ],
# 'Flyer Attacks': [
# upgrades.Upgrades.ZergFlyerWeaponsLevel1,
# upgrades.Upgrades.ZergFlyerWeaponsLevel2,
# upgrades.Upgrades.ZergFlyerWeaponsLevel3
# ],
# 'Ground Carapace': [
# upgrades.Upgrades.ZergGroundArmorsLevel1,
# upgrades.Upgrades.ZergGroundArmorsLevel2,
# upgrades.Upgrades.ZergGroundArmorsLevel3
# ],
# 'Flyer Carapace': [
# upgrades.Upgrades.ZergFlyerArmorsLevel1,
# upgrades.Upgrades.ZergFlyerArmorsLevel2, upgrades.Upgrades.ZergFlyerArmorsLevel3
# ],
# 'Burrow': [upgrades.Upgrades.Burrow],
# 'Centrifugal Hooks': [upgrades.Upgrades.CentrificalHooks],
# 'Adrenal Glands': [upgrades.Upgrades.AdrenalGlands],
# 'Adaptive Talons': [upgrades.Upgrades.AdaptiveTalons],
# 'Anabolic Synthesis': [upgrades.Upgrades.AnabolicSynthesis],
# 'Chitinous Plating': [upgrades.Upgrades.ChitinousPlating],
# 'Glial Reconstitution': [upgrades.Upgrades.GlialReconstitution],
# 'Grooved Spines': [upgrades.Upgrades.GroovedSpines],
# 'Metabolic Boost': [upgrades.Upgrades.MetabolicBoost],
# 'Muscular Augments': [upgrades.Upgrades.MuscularAugments],
# 'Neural Parasite': [upgrades.Upgrades.NeuralParasite],
# 'Pathogen Glands': [upgrades.Upgrades.PathogenGlands],
# 'Pneumatized Carapace': [upgrades.Upgrades.PneumatizedCarapace],
# 'Tunneling Claws': [upgrades.Upgrades.TunnelingClaws]
# }
#
# # Protoss重要升级和建筑ID
# protoss_tech_upgrades = {
# 'Ground Weapons': [upgrades.Upgrades.ProtossGroundWeaponsLevel1,
# upgrades.Upgrades.ProtossGroundWeaponsLevel2,
# upgrades.Upgrades.ProtossGroundWeaponsLevel3],
# 'Ground Armors': [upgrades.Upgrades.ProtossGroundArmorsLevel1,
# upgrades.Upgrades.ProtossGroundArmorsLevel2,
# upgrades.Upgrades.ProtossGroundArmorsLevel3],
# 'Air Weapons': [upgrades.Upgrades.ProtossAirWeaponsLevel1,
# upgrades.Upgrades.ProtossAirWeaponsLevel2,
# upgrades.Upgrades.ProtossAirWeaponsLevel3],
# 'Air Armors': [upgrades.Upgrades.ProtossAirArmorsLevel1,
# upgrades.Upgrades.ProtossAirArmorsLevel2,
# upgrades.Upgrades.ProtossAirArmorsLevel3],
# 'Shields': [upgrades.Upgrades.ProtossShieldsLevel1,
# upgrades.Upgrades.ProtossShieldsLevel2,
# upgrades.Upgrades.ProtossShieldsLevel3],
# 'Blink': [upgrades.Upgrades.Blink],
# 'Charge': [upgrades.Upgrades.Charge],
# 'Extended Thermal Lance': [upgrades.Upgrades.ExtendedThermalLance],
# 'Gravitic Booster': [upgrades.Upgrades.GraviticBooster],
# 'Gravitic Drive': [upgrades.Upgrades.GraviticDrive],
# 'Graviton Catapult': [upgrades.Upgrades.GravitonCatapult],
# 'Psi Storm': [upgrades.Upgrades.PsiStorm],
# 'Resonating Glaives': [upgrades.Upgrades.ResonatingGlaives],
# 'Shadow Strike': [upgrades.Upgrades.ShadowStrike],
# 'Warp Gate Research': [upgrades.Upgrades.WarpGateResearch]
# }
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/llm_action.py | llm_pysc2/lib/llm_action.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pysc2.lib.actions import FUNCTIONS as F
from pysc2.lib import features
from llm_pysc2.lib.utils import *
from loguru import logger
import random
import re
# standard action object
AN_ACTION = {'name': '', 'arg': [], 'func': []}
# actions for smac tasks, ACTION_SMAC for tasks that attack is enough
PROTOSS_BASIC_ACTION_SMAC = [
{'name': 'Attack_Unit', 'arg': ['tag'],
'func': [(12, F.Attack_screen, ('queued', 'screen_tag'))]},
# {'name': 'Select_Unit_Attack_Unit', 'arg': ['tag', 'tag'], # single unit control
# 'func': [(3, F.select_rect, ('select', 'screen1_tag', 'screen2_tag')),
# (12, F.Attack_screen, ('queued', 'screen_tag2'))]},
]
# actions for smac tasks, ACTION_SMAC2 for those moving is indispensable
PROTOSS_BASIC_ACTION_SMAC2 = [
{'name': 'Attack_Unit', 'arg': ['tag'],
'func': [(12, F.Attack_screen, ('queued', 'screen_tag'))]},
{'name': 'Move_Screen', 'arg': ['screen'],
'func': [(331, F.Move_screen, ('queued', 'screen'))]},
# {'name': 'Select_Unit_Attack_Unit', 'arg': ['tag', 'tag'], # single unit control
# 'func': [(3, F.select_rect, ('select', 'screen1_tag', 'screen2_tag')),
# (12, F.Attack_screen, ('queued', 'screen_tag2'))]},
# {'name': 'Select_Unit_Move_Screen', 'arg': ['tag', 'screen'], # single unit control
# 'func': [(3, F.select_rect, ('select', 'screen1_tag', 'screen2_tag')),
# (331, F.Move_screen, ('queued', 'screen'))]},
]
# actions for sc2 unit, 1 for buildings
PROTOSS_BASIC_ACTION_1 = [
{'name': 'Stop', 'arg': [], 'func': [(453, F.Stop_quick, ('now'))]},
{'name': 'No_Operation', 'arg': [], 'func': [(0, F.no_op, ())]},
{'name': 'Stop_Building', 'arg': [], 'func': [(454, F.Stop_Building_quick, ('queued'))]},
# {'name': 'Stop_Building_Unit', 'arg': ['tag'],
# 'func': [(573, F.llm_pysc2_move_camera, ('world_tag')),
# (3, F.select_rect, ('select', 'screen1_tag', 'screen2_tag')),
# (454, F.Stop_Building_quick, ('queued'))]},
]
# actions for sc2 unit, 2 for units capable of launching attacks
PROTOSS_BASIC_ACTION_2 = [
{'name': 'Stop', 'arg': [], 'func': [(453, F.Stop_quick, ('now'))]},
{'name': 'No_Operation', 'arg': [], 'func': [(0, F.no_op, ())]},
{'name': 'Hold_Position', 'arg': [], 'func': [(274, F.HoldPosition_quick, ('queued'))]},
{'name': 'Move_Minimap', 'arg': ['minimap'], 'func': [(332, F.Move_minimap, ('queued', 'minimap'))]},
{'name': 'Move_Screen', 'arg': ['screen'], 'func': [(331, F.Move_screen, ('queued', 'screen'))]},
{'name': 'Attack_Unit', 'arg': ['tag'], 'func': [(12, F.Attack_screen, ('queued', 'screen_tag'))]},
# {'name': 'Attack_Screen', 'arg': ['screen'], 'func': [(12, F.Attack_screen, ('queued', 'screen'))]},
# {'name': 'Board_WarpPrism', 'arg': ['screen'], 'func': [(331, F.Move_screen, ('queued', 'screen_tag'))]},
]
# actions for sc2 unit, 3 for those unable to attack
PROTOSS_BASIC_ACTION_3 = [
{'name': 'Stop', 'arg': [], 'func': [(453, F.Stop_quick, ('now'))]},
{'name': 'No_Operation', 'arg': [], 'func': [(0, F.no_op, ())]},
{'name': 'Hold_Position', 'arg': [], 'func': [(274, F.HoldPosition_quick, ('queued'))]},
{'name': 'Move_Minimap', 'arg': ['minimap'], 'func': [(332, F.Move_minimap, ('queued', 'minimap'))]},
{'name': 'Move_Screen', 'arg': ['screen'], 'func': [(331, F.Move_screen, ('queued', 'screen'))]},
# {'name': 'Board_WarpPrism', 'arg': ['screen'], 'func': [(331, F.Move_screen, ('queued', 'screen_tag'))]},
]
# WarpTrain of protoss WarpGates
PROTOSS_ACTION_WARPTRAIN = [
{'name': 'Warp_Adept_Near', 'arg': ['tag'],
'func': [(8, F.select_warp_gates, ('select')), (573, F.llm_pysc2_move_camera, ('world_tag')),
(505, F.TrainWarp_Adept_screen, ('queued', 'screen_tag'))]}, # tag for WarpprismPhasing/Pylon
{'name': 'Warp_DarkTemplar_Near', 'arg': ['tag'],
'func': [(8, F.select_warp_gates, ('select')), (573, F.llm_pysc2_move_camera, ('world_tag')),
(506, F.TrainWarp_DarkTemplar_screen, ('queued', 'screen_tag'))]}, # tag for WarpprismPhasing/Pylon
{'name': 'Warp_HighTemplar_Near', 'arg': ['tag'],
'func': [(8, F.select_warp_gates, ('select')), (573, F.llm_pysc2_move_camera, ('world_tag')),
(507, F.TrainWarp_HighTemplar_screen, ('queued', 'screen_tag'))]}, # tag for WarpprismPhasing/Pylon
{'name': 'Warp_Sentry_Near', 'arg': ['tag'],
'func': [(8, F.select_warp_gates, ('select')), (573, F.llm_pysc2_move_camera, ('world_tag')),
(508, F.TrainWarp_Sentry_screen, ('queued', 'screen_tag'))]}, # tag for WarpprismPhasing/Pylon
{'name': 'Warp_Stalker_Near', 'arg': ['tag'],
'func': [(8, F.select_warp_gates, ('select')), (573, F.llm_pysc2_move_camera, ('world_tag')),
(509, F.TrainWarp_Stalker_screen, ('queued', 'screen_tag'))]}, # tag for WarpprismPhasing/Pylon
{'name': 'Warp_Zealot_Near', 'arg': ['tag'],
'func': [(8, F.select_warp_gates, ('select')), (573, F.llm_pysc2_move_camera, ('world_tag')),
(510, F.TrainWarp_Zealot_screen, ('queued', 'screen_tag')),
(510, F.TrainWarp_Zealot_screen, ('queued', 'screen_tag')),
(510, F.TrainWarp_Zealot_screen, ('queued', 'screen_tag')),
(510, F.TrainWarp_Zealot_screen, ('queued', 'screen_tag')),]}, # tag for WarpprismPhasing/Pylon
# {'name': 'Warp_One_Adept_Screen', 'arg': ['screen'],
# 'func': [(8, F.select_warp_gates, ('select')), (505, F.TrainWarp_Adept_screen, ('queued', 'screen'))]},
# {'name': 'Warp_One_DarkTemplar_Screen', 'arg': ['screen'],
# 'func': [(8, F.select_warp_gates, ('select')), (506, F.TrainWarp_DarkTemplar_screen, ('queued', 'screen'))]},
# {'name': 'Warp_One_HighTemplar_Screen', 'arg': ['screen'],
# 'func': [(8, F.select_warp_gates, ('select')), (507, F.TrainWarp_HighTemplar_screen, ('queued', 'screen'))]},
# {'name': 'Warp_One_Sentry_Screen', 'arg': ['screen'],
# 'func': [(8, F.select_warp_gates, ('select')), (508, F.TrainWarp_Sentry_screen, ('queued', 'screen'))]},
# {'name': 'Warp_One_Stalker_Screen', 'arg': ['screen'],
# 'func': [(8, F.select_warp_gates, ('select')), (509, F.TrainWarp_Stalker_screen, ('queued', 'screen'))]},
# {'name': 'Warp_One_Zealot_Screen', 'arg': ['screen'],
# 'func': [(8, F.select_warp_gates, ('select')), (510, F.TrainWarp_Zealot_screen, ('queued', 'screen'))]},
]
# Idle production buildings will be automatically selected by LLMAgent._add_func_for_train_and_research()
PROTOSS_ACTION_TRAIN = [
# Nexus, BN
{'name': 'Train_Mothership', 'arg': [], 'func': [(541, F.Train_Mothership_quick, ('queued'))]},
# GateWay BG
{'name': 'Train_Adept', 'arg': [], 'func': [(457, F.Train_Adept_quick, ('queued'))]},
{'name': 'Train_DarkTemplar', 'arg': [], 'func': [(465, F.Train_DarkTemplar_quick, ('queued'))]},
{'name': 'Train_HighTemplar', 'arg': [], 'func': [(471, F.Train_HighTemplar_quick, ('queued'))]},
{'name': 'Train_Sentry', 'arg': [], 'func': [(491, F.Train_Sentry_quick, ('queued'))]},
{'name': 'Train_Stalker', 'arg': [], 'func': [(493, F.Train_Stalker_quick, ('queued'))]},
{'name': 'Train_Zealot', 'arg': [], 'func': [(503, F.Train_Zealot_quick, ('queued'))]},
# StarGate VS
{'name': 'Train_Oracle', 'arg': [], 'func': [(482, F.Train_Oracle_quick, ('queued'))]},
{'name': 'Train_Phoenix', 'arg': [], 'func': [(484, F.Train_Phoenix_quick, ('queued'))]},
{'name': 'Train_VoidRay', 'arg': [], 'func': [(500, F.Train_VoidRay_quick, ('queued'))]},
{'name': 'Train_Tempest', 'arg': [], 'func': [(495, F.Train_Tempest_quick, ('queued'))]},
{'name': 'Train_Carrier', 'arg': [], 'func': [(461, F.Train_Carrier_quick, ('queued'))]},
# RoboticFacility VR
{'name': 'Train_Observer', 'arg': [], 'func': [(481, F.Train_Observer_quick, ('queued'))]},
{'name': 'Train_WarpPrism', 'arg': [], 'func': [(501, F.Train_WarpPrism_quick, ('queued'))]},
{'name': 'Train_Immortal', 'arg': [], 'func': [(473, F.Train_Immortal_quick, ('queued'))]},
{'name': 'Train_Colossus', 'arg': [], 'func': [(462, F.Train_Colossus_quick, ('queued'))]},
{'name': 'Train_Disruptor', 'arg': [], 'func': [(466, F.Train_Disruptor_quick, ('queued'))]},
]
# Idle technology buildings will be automatically selected by LLMAgent._add_func_for_train_and_research()
PROTOSS_ACTION_RESEARCH = [
# CyberneticsCore BY
{'name': 'Research_ProtossAirArmor', 'arg': [],
'func': [(381, F.Research_ProtossAirArmor_quick, ('queued'))]},
{'name': 'Research_ProtossAirWeapons', 'arg': [],
'func': [(385, F.Research_ProtossAirWeapons_quick, ('queued'))]},
{'name': 'Research_WarpGate', 'arg': [],
'func': [(428, F.Research_WarpGate_quick, ('queued'))]},
# Forge BF
{'name': 'Research_ProtossGroundArmor', 'arg': [],
'func': [(389, F.Research_ProtossGroundArmor_quick, ('queued'))]},
{'name': 'Research_ProtossGroundWeapons', 'arg': [],
'func': [(393, F.Research_ProtossGroundWeapons_quick, ('queued'))]},
{'name': 'Research_ProtossShields', 'arg': [],
'func': [(397, F.Research_ProtossShields_quick, ('queued'))]},
# TwilightCouncil VC
{'name': 'Research_Charge', 'arg': [],
'func': [(359, F.Research_Charge_quick, ('queued'))]},
{'name': 'Research_Blink', 'arg': [],
'func': [(356, F.Research_Blink_quick, ('queued'))]},
{'name': 'Research_AdeptResonatingGlaives', 'arg': [],
'func': [(351, F.Research_AdeptResonatingGlaives_quick, ('queued'))]},
# FleetBeacon VF (Void ray upgrade and Tempest upgrade are not realised in pysc2)
{'name': 'Research_PhoenixAnionPulseCrystals', 'arg': [],
'func': [(379, F.Research_PhoenixAnionPulseCrystals_quick, ('queued'))]},
# RoboticsBay VB
{'name': 'Research_ExtendedThermalLance', 'arg': [],
'func': [(364, F.Research_ExtendedThermalLance_quick, ('queued'))]},
{'name': 'Research_GraviticBooster', 'arg': [],
'func': [(366, F.Research_GraviticBooster_quick, ('queued'))]},
{'name': 'Research_GraviticDrive', 'arg': [],
'func': [(367, F.Research_GraviticDrive_quick, ('queued'))]},
# TemplarArchive VT
{'name': 'Research_PsiStorm', 'arg': [],
'func': [(401, F.Research_PsiStorm_quick, ('queued'))]},
# DarkShrine VD
{'name': 'Research_ShadowStrike', 'arg': [],
'func': [(404, F.Research_ShadowStrike_quick, ('queued'))]},
]
# Standard build actions
PROTOSS_ACTION_BUILD = [
# tag for Vespene Geyser
{'name': 'Build_Nexus_Near', 'arg': ['tag'],
'func': [(573, F.llm_pysc2_move_camera, ('world_tag')), (65, F.Build_Nexus_screen, ('queued', 'screen_tag'))]},
# tag for Vespene Geyser
{'name': 'Build_Assimilator_Near', 'arg': ['tag'],
'func': [(40, F.Build_Assimilator_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_Nexus_Screen', 'arg': ['screen'],
'func': [(65, F.Build_Nexus_screen, ('queued', 'screen'))]},
{'name': 'Build_Assimilator_Screen', 'arg': ['screen'],
'func': [(40, F.Build_Assimilator_screen, ('queued', 'screen'))]},
{'name': 'Build_Pylon_Screen', 'arg': ['screen'],
'func': [(70, F.Build_Pylon_screen, ('queued', 'screen'))]},
{'name': 'Build_Gateway_Screen', 'arg': ['screen'],
'func': [(57, F.Build_Gateway_screen, ('queued', 'screen'))]},
{'name': 'Build_CyberneticsCore_Screen', 'arg': ['screen'],
'func': [(48, F.Build_CyberneticsCore_screen, ('queued', 'screen'))]},
{'name': 'Build_Forge_Screen', 'arg': ['screen'],
'func': [(55, F.Build_Forge_screen, ('queued', 'screen'))]},
{'name': 'Build_PhotonCannon_Screen', 'arg': ['screen'],
'func': [(69, F.Build_PhotonCannon_screen, ('queued', 'screen'))]},
{'name': 'Build_ShieldBattery_Screen', 'arg': ['screen'],
'func': [(525, F.Build_ShieldBattery_screen, ('queued', 'screen'))]},
{'name': 'Build_TwilightCouncil_Screen', 'arg': ['screen'],
'func': [(101, F.Build_TwilightCouncil_screen, ('queued', 'screen'))]},
{'name': 'Build_TemplarArchive_Screen', 'arg': ['screen'],
'func': [(100, F.Build_TemplarArchive_screen, ('queued', 'screen'))]},
{'name': 'Build_DarkShrine_Screen', 'arg': ['screen'],
'func': [(49, F.Build_DarkShrine_screen, ('queued', 'screen'))]},
{'name': 'Build_Stargate_Screen', 'arg': ['screen'],
'func': [(88, F.Build_Stargate_screen, ('queued', 'screen'))]},
{'name': 'Build_FleetBeacon_Screen', 'arg': ['screen'],
'func': [(54, F.Build_FleetBeacon_screen, ('queued', 'screen'))]},
{'name': 'Build_RoboticsBay_Screen', 'arg': ['screen'],
'func': [(81, F.Build_RoboticsBay_screen, ('queued', 'screen'))]},
{'name': 'Build_RoboticsFacility_Screen', 'arg': ['screen'],
'func': [(82, F.Build_RoboticsFacility_screen, ('queued', 'screen'))]},
]
# Simplified build actions
PROTOSS_ACTION_EASY_BUILD = [
# tag for Vespene Geyser
{'name': 'Build_Nexus_Near', 'arg': ['tag'],
'func': [(65, F.Build_Nexus_screen, ('queued', 'screen_tag'))]},
# tag for Vespene Geyser
{'name': 'Build_Assimilator_Near', 'arg': ['tag'],
'func': [(40, F.Build_Assimilator_screen, ('queued', 'screen_tag'))]},
# tag for WarpprismPhasing/Pylon
{'name': 'Build_Pylon_Near', 'arg': ['tag'],
'func': [(70, F.Build_Pylon_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_Gateway_Near', 'arg': ['tag'],
'func': [(57, F.Build_Gateway_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_CyberneticsCore_Near', 'arg': ['tag'],
'func': [(48, F.Build_CyberneticsCore_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_Forge_Near', 'arg': ['tag'],
'func': [(55, F.Build_Forge_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_PhotonCannon_Near', 'arg': ['tag'],
'func': [(69, F.Build_PhotonCannon_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_ShieldBattery_Near', 'arg': ['tag'],
'func': [(525, F.Build_ShieldBattery_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_TwilightCouncil_Near', 'arg': ['tag'],
'func': [(101, F.Build_TwilightCouncil_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_TemplarArchive_Near', 'arg': ['tag'],
'func': [(100, F.Build_TemplarArchive_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_DarkShrine_Near', 'arg': ['tag'],
'func': [(49, F.Build_DarkShrine_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_Stargate_Near', 'arg': ['tag'],
'func': [(88, F.Build_Stargate_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_FleetBeacon_Near', 'arg': ['tag'],
'func': [(54, F.Build_FleetBeacon_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_RoboticsBay_Near', 'arg': ['tag'],
'func': [(81, F.Build_RoboticsBay_screen, ('queued', 'screen_tag'))]},
{'name': 'Build_RoboticsFacility_Near', 'arg': ['tag'],
'func': [(82, F.Build_RoboticsFacility_screen, ('queued', 'screen_tag'))]},
]
# Unit Abilities
PROTOSS_ACTION_ABILITY = [
{'name': 'Morph_WarpPrismPhasingMode', 'arg': [],
'func': [(329, F.Morph_WarpPrismPhasingMode_quick, ('queued'))]},
{'name': 'Morph_WarpPrismTransportMode', 'arg': [],
'func': [(330, F.Morph_WarpPrismTransportMode_quick, ('queued'))]},
{'name': 'Morph_SurveillanceMode', 'arg': [],
'func': [(538, F.Morph_SurveillanceMode_quick, ('queued'))]},
{'name': 'Morph_ObserverMode', 'arg': [],
'func': [(535, F.Morph_ObserverMode_quick, ('queued'))]},
{'name': 'Morph_Archon', 'arg': [],
'func': [(296, F.Morph_Archon_quick, ('queued'))]},
{'name': 'Ability_PrismaticAlignment', 'arg': [],
'func': [(244, F.Effect_VoidRayPrismaticAlignment_quick, ('queued'))]},
{'name': 'Ability_CancelPhaseShift', 'arg': [],
'func': [(453, F.Stop_quick, ('queued'))]},
{'name': 'Ability_GuardianShield', 'arg': [],
'func': [(197, F.Effect_GuardianShield_quick, ('queued'))]},
{'name': 'Ability_PulsarBeamOn', 'arg': [],
'func': [(38, F.Behavior_PulsarBeamOn_quick, ('queued'))]},
{'name': 'Ability_ChronoBoost_Unit', 'arg': ['tag'],
'func': [(573, F.llm_pysc2_move_camera, ('world_tag')), (527, F.Effect_ChronoBoostEnergyCost_screen, ('queued', 'screen_tag'))]},
{'name': 'Ability_NexusMassRecall_Near', 'arg': ['tag'],
'func': [(573, F.llm_pysc2_move_camera, ('world_tag')), (529, F.Effect_MassRecall_Nexus_screen, ('queued', 'screen_tag'))]},
{'name': 'Ability_MothershipMassRecall_Near', 'arg': ['tag'],
'func': [(573, F.llm_pysc2_move_camera, ('world_tag')), (208, F.Effect_MassRecall_screen, ('queued', 'screen_tag'))]},
{'name': 'Ability_ShadowStride_Unit', 'arg': ['tag'],
'func': [(182, F.Effect_ShadowStride_screen, ('queued', 'screen_tag'))]},
{'name': 'Ability_GravitonBeam_Unit', 'arg': ['tag'],
'func': [(196, F.Effect_GravitonBeam_screen, ('queued', 'screen_tag'))]},
{'name': 'Ability_Blink_Screen', 'arg': ['screen'],
'func': [(180, F.Effect_Blink_screen, ('queued', 'screen'))]},
{'name': 'Ability_AdeptPhaseShift_Screen', 'arg': ['screen'],
'func': [(177, F.Effect_AdeptPhaseShift_screen, ('queued', 'screen'))]},
{'name': 'Ability_PsiStorm_Screen', 'arg': ['screen'],
'func': [(218, F.Effect_PsiStorm_screen, ('queued', 'screen'))]},
{'name': 'Ability_PurificationNova_Screen', 'arg': ['screen'],
'func': [(219, F.Effect_PurificationNova_screen, ('queued', 'screen'))]},
{'name': 'Ability_ForceField_Screen', 'arg': ['screen'],
'func': [(193, F.Effect_ForceField_screen, ('queued', 'screen'))]},
{'name': 'Ability_TimeWarp_Screen', 'arg': ['screen'],
'func': [(241, F.Effect_TimeWarp_screen, ('queued', 'screen'))]},
{'name': 'Ability_OracleRevelation_Screen', 'arg': ['screen'],
'func': [(214, F.Effect_OracleRevelation_screen, ('queued', 'screen'))]},
{'name': 'Ability_StasisTrap_Screen', 'arg': ['screen'],
'func': [(90, F.Build_StasisTrap_screen, ('queued', 'screen'))]},
{'name': 'Load_Unit', 'arg': ['tag'],
'func': [(287, F.Load_screen, ('queued', 'screen_tag'))]},
{'name': 'Unload_Screen', 'arg': ['screen'],
'func': [(516, F.UnloadAllAt_screen, ('queued', 'screen'))]},
{'name': 'Lock_Nexus_Near', 'arg': ['tag'],
'func': [(70, F.Build_Pylon_screen, ('queued', 'screen_tag'))]},
{'name': 'Lock_Assimilator_Near', 'arg': ['tag'],
'func': [(40, F.Build_Assimilator_screen, ('queued', 'screen_tag'))]},
{'name': 'Select_Unit_Blink_Screen', 'arg': ['tag', 'screen'],
'func': [(3, F.select_rect, ('select', 'screen1_tag', 'screen2_tag')),
(180, F.Effect_Blink_screen, ('queued', 'screen'))]},
]
# 目标类型查询函数,获取Research和Train所需的源单位类型
def find_unit_type_the_func_belongs_to(func_id, race):
if race == 'protoss':
if func_id in [541, 485]:
return units.Protoss.Nexus
if func_id in [457, 465, 471, 491, 493, 503]:
return units.Protoss.Gateway
if func_id in [482, 484, 500, 495, 461]:
return units.Protoss.Stargate
if func_id in [481, 501, 473, 462, 466]:
return units.Protoss.RoboticsFacility
if func_id in [381, 385, 428]:
return units.Protoss.CyberneticsCore
if func_id in [389, 393, 397]:
return units.Protoss.Forge
if func_id in [359, 356, 351]:
return units.Protoss.TwilightCouncil
if func_id in [379]: # 缺失两个升级选项
return units.Protoss.FleetBeacon
if func_id in [364, 366, 367]:
return units.Protoss.RoboticsBay
if func_id in [401]:
return units.Protoss.TemplarArchive
if func_id in [404]:
return units.Protoss.DarkShrine
if race == 'zerg':
pass # Support for Zerg is undergoing
if race == 'terran':
pass # Support for Terran is undergoing
return None
def find_building_size(build_name: str) -> int:
if build_name in SIZE5_BUILDING_NAMES: # Support for Terran/Zerg is undergoing
return 5
elif build_name in SIZE3_BUILDING_NAMES: # Support for Terran/Zerg is undergoing
return 3
elif build_name in SIZE2_BUILDING_NAMES: # Support for Terran/Zerg is undergoing
return 2
elif build_name in SIZE1_BUILDING_NAMES: # Support for Terran/Zerg is undergoing
return 1
else:
return 0
# Find idle unit tag, conditions: ours + reigh-type + already-built + not-active
def find_idle_unit_tag(obs, unit_type, queued_source_unit_tag_list):
for unit in obs.observation.raw_units:
if unit.alliance == features.PlayerRelative.SELF and unit.build_progress == 100 and unit.active == 0 \
and unit_type is not None and unit.unit_type == unit_type and unit.tag not in queued_source_unit_tag_list:
return unit.tag
return None
# Parameter verification
def get_arg_minimap(obs, minimap: list, size_minimap, action_name) -> (tuple, bool): # 小地图坐标,校验范围
if isinstance(minimap, list) and len(minimap) == 2 and isinstance(minimap[0], (int, float)) and isinstance(minimap[1], (int, float)):
x = min(max(0, minimap[0]), size_minimap)
y = min(max(0, minimap[1]), size_minimap)
if 'Attack' in action_name and obs.observation.feature_minimap.player_relative[x][y] in [1, 2]:
return f'({x}, {y}) is alliance', False
if 'Load' in action_name and obs.observation.feature_minimap.player_relative[x][y] not in [1, 2]:
return f'({x}, {y}) is not alliance', False
if 'Follow' in action_name and obs.observation.feature_minimap.player_relative[x][y] not in [1, 2]:
return f'({x}, {y}) is not alliance', False
return (x, y), True
return f'minimap={minimap}, unknown error', False
# Parameter verification
def get_arg_screen(obs, screen: list, size_screen, action_name) -> (tuple, bool): # 屏幕坐标,校验范围
if isinstance(screen, list) and len(screen) == 2 and isinstance(screen[0], (int, float)) and isinstance(screen[1], (int, float)):
x = int(min(max(0, screen[0]), size_screen))
y = int(min(max(0, screen[1]), size_screen))
if 'Attack' in action_name and obs.observation.feature_screen.player_relative[x][y] in [1, 2]:
return f'({x}, {y}) is alliance', False
if 'Load' in action_name and obs.observation.feature_screen.player_relative[x][y] not in [1, 2]:
return f'({x}, {y}) is not alliance', False
if 'Follow' in action_name and obs.observation.feature_screen.player_relative[x][y] not in [1, 2]:
return f'({x}, {y}) is not alliance', False
return (x, y), True
return f'input arg {screen} error', False
# Parameter verification, for build
def get_arg_screen_build(obs, screen: list, size_screen, action_name) -> (tuple, bool): # 标准建造,校验地点和建造条件
building_name = action_name.split('Build_')[1].split('_Screen')[0]
building_size = find_building_size(building_name)
if isinstance(screen, list) and len(screen) == 2 and isinstance(screen[0], (int, float)) and isinstance(screen[1], (
int, float)) and building_size != 0:
ratio = int(size_screen / SCREEN_WORLD_GRID)
x0 = int(min(max(0, screen[0]), size_screen))
y0 = int(min(max(0, screen[1]), size_screen))
x1 = int(min(max(0, screen[0]), size_screen) - ratio * (building_size - 1) / 2)
y1 = int(min(max(0, screen[1]), size_screen) - ratio * (building_size - 1) / 2)
if building_name in POWER_BUILDING_NAMES and obs.observation.feature_screen.power[x0][y0] == 0:
return f'({x0}, {y0}) need power', False
if building_name in CREEP_BUILDING_NAMES and obs.observation.feature_screen.creep[x0][y0] == 0:
return f'({x0}, {y0}) need creep', False
for i in range(building_size):
for j in range(building_size):
x = int(x1 + i * ratio)
y = int(y1 + j * ratio)
if not (0 < x < size_screen and 0 < y < size_screen):
return f'({x0}, {y0}) too close to screen edge', False
if obs.observation.feature_screen.buildable[x][y] != 1:
return f'area near ({x0}, {y0}) not buildable', False
if obs.observation.feature_screen.pathable[x][y] != 1:
return f'area near ({x0}, {y0}) not pathable', False
if obs.observation.feature_screen.player_relative[x][y] not in [0, 1]:
return f'area near ({x0}, {y0}) not blocked', False
return (x0, y0), True
return f'input arg {screen} error', False
# Parameter verification, tag to world coordinate
def get_arg_world_tag(obs, tag: int, x_offset, y_offset, world_range) -> (tuple, bool): # 获取指定tag单位的世界坐标
for unit in obs.observation.raw_units:
if unit.tag == tag:
x = unit.x + x_offset
y = max(0, world_range - unit.y + y_offset)
return (x, y), True
tag = hex(tag) if isinstance(tag, int) else tag
return f'cannot find unit {tag}', False
# Parameter verification, tag to screen coordinate
def get_arg_screen_tag(obs, tag: int, size_screen, action_name) -> (tuple, bool): # 获取指定tag单位的屏幕坐标
for unit in obs.observation.feature_units:
if unit.tag == tag:
x, y = unit.x, unit.y
if 'Attack' in action_name and unit.alliance in [1, 2]:
return f'({x}, {y}) is alliance', False
if 'Load' in action_name and unit.alliance not in [1]:
return f'({x}, {y}) is not alliance', False
if 'Follow' in action_name and unit.alliance not in [1, 2]:
return f'({x}, {y}) is not alliance', False
if 'MassRecall' in action_name and unit.alliance not in [1]:
return f'({x}, {y}) is not alliance', False
if 'Chrono_Boost' in action_name and (unit.alliance not in [1] or unit.unit_type not in BOOSTABLE_TYPE):
return f'({x}, {y}) is not boostable', False
if 'Board_' in action_name and (unit.alliance not in [1] or unit.unit_type not in TRANSPORTER_TYPE):
return f'({x}, {y}) is not a transporter', False
if unit.is_on_screen and (0 < unit.x < size_screen and 0 < unit.y < size_screen):
return (unit.x, unit.y), True
tag = hex(tag) if isinstance(tag, int) else tag
return f'cannot find unit {tag} on screen', False
# Parameter verification, tag to rect screen coordinates
def get_arg_screen_tag_sclect_rect(obs, tag: int, size_screen, func_arg_name) -> (tuple, bool): # 获取指定tag附近单位群的中心坐标
for unit in obs.observation.feature_units:
if unit.tag == tag:
if unit.alliance not in [1]:
return f'({unit.y}, {unit.y}) is not alliance', False
if not (0 < unit.x < size_screen and 0 < unit.y < size_screen):
return f'unit {tag} ({unit.x}, {unit.y}) not no screen', False
if func_arg_name == 'screen' and unit.is_on_screen:
x = max(0, min(int(unit.x - size_screen / 64), size_screen))
y = max(0, min(int(unit.y - size_screen / 64), size_screen))
return (x, y), True
if func_arg_name == 'screen2' and unit.is_on_screen:
x = max(0, min(int(unit.x + size_screen / 64), size_screen))
y = max(0, min(int(unit.y + size_screen / 64), size_screen))
return (x, y), True
tag = hex(tag) if isinstance(tag, int) else tag
return f'cannot find unit {tag} on screen', False
# Parameter verification, tag to screen coordinate, for recall
def get_arg_screen_tag_recall(obs, tag: int, size_screen, action_name) -> (tuple, bool): # 获取指定tag附近单位群的中心坐标
for unit in obs.observation.feature_units:
if unit.tag == tag:
x, y = unit.x, unit.y
if unit.alliance not in [1]:
return f'({x}, {y}) is not alliance', False
if unit.is_on_screen and (0 < unit.x < size_screen and 0 < unit.y < size_screen):
return (unit.x, unit.y), True
tag = hex(tag) if isinstance(tag, int) else tag
return f'cannot find unit {tag} on screen', False
# Parameter verification, tag to screen coordinate, for warp
def get_arg_screen_tag_warp(obs, tag: int, size_screen, action_name) -> (tuple, bool): # 获取指定tag附近可折跃单位的坐标
n = 0
for unit in obs.observation.feature_units:
max_try = 72
if unit.tag == tag:
if unit.unit_type not in [units.Protoss.Pylon, units.Protoss.WarpPrismPhasing]:
return f'tag {unit.tag}({unit.unit_type}) is not Pylon(60) or WarpPrismPhasing(136)', False
else:
radius = [2, 3, 4, 5, 6] if unit.unit_type == units.Protoss.Pylon else [1, 2, 3]
angles = [0, 45, 90, 135, 180, 225, 270, 315]
while n < max_try:
r = radius[random.randint(0, len(radius) - 1)]
a = angles[random.randint(0, len(angles) - 1)]
x = int(unit.x + r * (size_screen / SCREEN_WORLD_GRID) * math.cos(math.radians(a)))
y = int(unit.y + r * (size_screen / SCREEN_WORLD_GRID) * math.sin(math.radians(a)))
if (0 < x < size_screen and 0 < y < size_screen) and \
obs.observation.feature_screen.power[x][y] == 1 and \
obs.observation.feature_screen.pathable[x][y] == 1 and \
obs.observation.feature_screen.unit_type[x][y] == 0 and \
obs.observation.feature_screen.build_progress[x][y] == 0 and \
obs.observation.feature_screen.unit_shields[x][y] == 0:
return (x, y), True
else:
n = n + 1
if n == 36:
return f'cannot find valid position to warp unit', False
tag = hex(tag) if isinstance(tag, int) else tag
return f'cannot find unit {tag} on screen', False
# Parameter verification, tag to screen coordinate, for gas building
def get_arg_screen_tag_gas_building(obs, tag: int, size_screen, action_name) -> (tuple, bool):
# find vesoene gesyer raw_unit
unit_r = None
for unit in obs.observation.raw_units:
if unit.tag == tag:
unit_r = unit
if unit_r is None:
tag = hex(tag) if isinstance(tag, int) else tag
return f'cannot find unit {tag}', False
# confirm if is possible to construct
for unit in obs.observation.feature_units:
if unit.tag == tag:
base_nearby = False
for unit_ in obs.observation.raw_units:
if unit_.alliance == features.PlayerRelative.SELF and unit_.unit_type in BASE_BUILDING_TYPE and \
math.sqrt((unit_.x - unit_r.x) ** 2 + (unit_.y - unit_r.y) ** 2) < 10:
base_nearby = True
if not base_nearby:
return f'tag {unit.tag}({unit.unit_type}) is far away from our base building', False
if unit.unit_type not in GAS_TYPE:
return f'tag {unit.tag}({unit.unit_type}) is not VespeneGeyser(342 344 608 880 881)', False
if unit.is_on_screen and (0 < unit.x < size_screen and 0 < unit.y < size_screen):
return (unit.x, unit.y), True
tag = hex(tag) if isinstance(tag, int) else tag
return f'cannot find unit {tag} on screen', False
# Parameter verification, tag to screen coordinate, for base building
def get_arg_world_tag_base_building(obs, tag: int, x_offset, y_offset, world_range) -> (tuple, bool):
def find_nearby_raw_mg(unit_g):
nearby_resource_unit_dict = {}
for unit in obs.observation.raw_units:
if unit.unit_type in MINERAL_TYPE:
dist = math.sqrt((unit.x - unit_g.x) ** 2 + (unit.y - unit_g.y) ** 2)
if dist < 16:
nearby_resource_unit_dict[dist] = unit
if unit.unit_type in GAS_TYPE:
dist = math.sqrt((unit.x - unit_g.x) ** 2 + (unit.y - unit_g.y) ** 2)
if dist < 16:
nearby_resource_unit_dict[dist] = unit
return nearby_resource_unit_dict.values()
def artificial_force_field_iteration_world(unit_list, x, y):
k, r, m = 0.5, 7, 1
vespene_r, vespene_m = 8, 1
mineral_r, mineral_m = 7, 1
n, bad_n, fx, fy = 0, 0, 0, 0
for unit in unit_list:
bad = False
if unit.unit_type in GAS_TYPE:
r, m = vespene_r, vespene_m
if unit.unit_type in MINERAL_TYPE:
r, m = mineral_r, mineral_m
d = math.sqrt((unit.x - x) ** 2 + (unit.y - y) ** 2)
f = k * (r - d) * m
fx += f * (x - unit.x) / d
fy += f * (y - unit.y) / d
n += 1
if unit.unit_type in GAS_TYPE and not (7 < d < 10):
bad = True
if unit.unit_type in MINERAL_TYPE and not (6 < d < 9):
bad = True
if bad:
bad_n += 1
return (x + fx / n), (y + fy / n), bad_n
for unit in obs.observation.raw_units:
if unit.tag == tag:
if unit.unit_type not in GAS_TYPE + MINERAL_TYPE:
return f'tag {unit.tag}({unit.unit_type}) is not VespeneGaser', False
mineral_list = find_nearby_raw_mg(unit)
n, x0, y0 = 0, 0, 0
for mineral in mineral_list:
n += 1
x0 += mineral.x
y0 += mineral.y
x0 = x0 / n
y0 = y0 / n
for i in range(16):
x0, y0, bad_n = artificial_force_field_iteration_world(mineral_list, x0, y0)
x = int(x0 + x_offset)
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | true |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/__init__.py | llm_pysc2/lib/__init__.py | python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false | |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/log_analyse.py | llm_pysc2/lib/log_analyse.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import shutil
from pysc2.env import environment
# analyse one experiment folder
def analyse(experiment_folder, delete_unfinished):
win, tie, lose = 0, 0, 0
score_cumulative = None
score_by_category = None
score_by_vital = None
obs_lists = [] # 1 experiment with several episodes
obs_list = []
# find saved obs
obs_folder_paths = []
obs_list_pkl_paths = []
for file_or_dir_name in os.listdir(experiment_folder):
file_or_dir_path = os.path.join(experiment_folder, f"{file_or_dir_name}")
if 'obs' in file_or_dir_name and os.path.isdir(file_or_dir_path):
obs_folder_paths.append(file_or_dir_path)
if 'obs-list-episode' in file_or_dir_name and '.pkl' in file_or_dir_name:
obs_list_pkl_paths.append(file_or_dir_path)
# load saved obs
if len(obs_list_pkl_paths) > 0:
print(f"Start reading obs list")
for obs_list_pkl_path in obs_list_pkl_paths:
f = open(obs_list_pkl_path, 'rb')
obs_list = pickle.load(f)
obs_lists.append(obs_list)
print(f"Successfully read obs list from [{obs_list_pkl_path}]", '\n' + "--" * 25)
elif len(obs_list_pkl_paths) == 0 and len(obs_folder_paths) > 0:
print(f"Start reading obs list from obs folder")
for obs_folder_path in obs_folder_paths:
for obs_pkl_name in os.listdir(obs_folder_path):
obs_pkl_path = os.path.join(obs_folder_path, obs_pkl_name)
with open(obs_pkl_path, 'rb') as f:
obs = pickle.load(f)
obs_list.append(obs)
print(f"Successfully read obs list from obs folder [{obs_folder_path}]", '\n' + "--" * 25)
obs_lists.append(obs_list)
obs_list = []
else:
print(f"\033[1;31m No saved obs found! \033[0m", '\n' + "--" * 25)
# analyse
for obs_list in obs_lists:
raw_units_list = []
if len(obs_list) > 0 and obs_list[-1].step_type != environment.StepType.LAST: # unfinished experiment
print("Possible unfinished experiment")
if delete_unfinished:
shutil.rmtree(experiment_folder)
continue
for obs in obs_list:
raw_units = obs.observation.raw_units
raw_units_list.append(raw_units)
if obs.step_type == environment.StepType.LAST:
if obs.reward == 1 and obs.discount == 0:
win += 1
if obs.reward == 0 and obs.discount == 0:
tie += 1
if obs.reward == -1 and obs.discount == 0:
lose += 1
score_c = obs.observation.score_cumulative
score_bc = obs.observation.score_by_category
score_bv = obs.observation.score_by_vital
score_cumulative = score_c if score_cumulative is None else score_cumulative + score_c
score_by_category = score_bc if score_by_category is None else score_by_category + score_bc
score_by_vital = score_bv if score_by_vital is None else score_by_vital + score_bv
return score_cumulative, score_by_category, score_by_vital, win, tie, lose
# analyse all experiment folder in llm_log
def analyse_all(start_time: int, end_time=0, delete_unfinished=False):
num_experiments = 0
total_score_cumulative, total_score_by_category, total_score_by_vital = None, None, None
total_damage_dealt, total_damage_taken, total_healed = 0, 0, 0
total_killed_minerals, total_killed_vespene = 0, 0
total_lost_minerals, total_lost_vespene = 0, 0
total_win, total_tie, total_lose = 0, 0, 0
log_folder_names = os.listdir(os.path.dirname(os.path.abspath(__file__)))
for folder_name in log_folder_names:
if '-' in folder_name and start_time <= int(folder_name.split('-')[0]) <= max(end_time, start_time + 1):
experiment_folder = f'./{folder_name}'
score_cumulative, score_by_category, score_by_vital, win, tie, lose = analyse(experiment_folder, delete_unfinished)
if total_score_cumulative is None:
total_score_cumulative = score_cumulative
total_score_by_category = score_by_category
total_score_by_vital = score_by_vital
elif (score_cumulative is not None) and (score_by_category is not None) and (score_by_vital is not None):
total_score_cumulative += score_cumulative
total_score_by_category += score_by_category
total_score_by_vital += score_by_vital
else:
continue
num_experiments = num_experiments + win + tie + lose
total_win += win
total_tie += tie
total_lose += lose
if num_experiments > 0:
print(f"num_experiments={num_experiments}")
print("--" * 25)
total_damage_dealt = total_score_by_vital[0][0] + total_score_by_vital[0][1]
total_damage_taken = total_score_by_vital[1][0] + total_score_by_vital[1][1]
total_healed = total_score_by_vital[2][0] + total_score_by_vital[2][1]
total_killed_minerals = total_score_by_category.killed_minerals.army + total_score_by_category.killed_minerals.economy
total_killed_vespene = total_score_by_category.killed_vespene.army + total_score_by_category.killed_vespene.economy
total_lost_minerals = total_score_by_category.lost_minerals.army + total_score_by_category.lost_minerals.economy
total_lost_vespene = total_score_by_category.lost_vespene.army + total_score_by_category.lost_vespene.economy
total_killed_resource = total_killed_minerals + 2 * total_killed_vespene
total_lost_resource = total_lost_minerals + 2 * total_lost_vespene
rate_win = 100 * total_win / num_experiments
rate_tie = 100 * total_tie / num_experiments
rate_lose = 100 * total_lose / num_experiments
ave_damage_dealt = total_damage_dealt / num_experiments
ave_damage_taken = total_damage_taken / num_experiments
ave_healed = total_healed / num_experiments
ave_killed_resource = total_killed_resource / num_experiments
ave_lost_resource = total_lost_resource / num_experiments
ave_kill_death_ratio = ave_killed_resource / ave_lost_resource if ave_lost_resource != 0 else None
result = {
'rate_win': rate_win,
'rate_tie': rate_tie,
'rate_lose': rate_lose,
'ave_damage_dealt': ave_damage_dealt,
'ave_damage_taken': ave_damage_taken,
'ave_healed': ave_healed,
'ave_killed_resource': ave_killed_resource,
'ave_lost_resource': ave_lost_resource,
'ave_kill_death_ratio': ave_kill_death_ratio,
}
return result
else:
print(f"\033[1;31m Error num_experiments == 0! \033[0m")
return None
start_time = 20240000000000
end_time = 20250000000000
result = analyse_all(start_time, end_time)
from pprint import pprint
pprint(result) | python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/log_show.py | llm_pysc2/lib/log_show.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
file_name = "o.txt"
# file_name = "a_pro.txt" # processed text action
# file_name = "a_raw.txt" # raw text action, may consist analysis and communication info
# file_name = "c_inp.txt" # input communication info
# file_name = "c_out.txt" # output communication info
with open(file_name, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
print("--" * 25 + f" Loop {i} " + "--" * 25)
dic = json.loads(lines[i])
print(dic[f"{i}"])
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/data_recorder.py | llm_pysc2/lib/data_recorder.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import shutil
from pysc2.env import environment
class DataRecorder():
def __init__(self, save_dir, save_level=0):
"""
Args:
save_dir:
MainAgent.log_dir_path
save_level:
0 for only first and last step,
1 for important steps that unit changes,
2 add obs that action may be important,
3 for all obs
"""
self.obs_list = []
self.save_dir = save_dir
# save_level:
self.save_level = save_level
self.last_step_unit_tags = []
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
def _save_temp(self, obs, num_episode, num_step):
self.obs_list.append(obs)
save_dir_temp = f"{self.save_dir}/obs{num_episode}"
obs_save_path = f"{save_dir_temp}/step{num_step}.pkl"
if not os.path.exists(save_dir_temp):
os.mkdir(save_dir_temp)
with open(obs_save_path, 'wb') as f:
pickle.dump(obs, f)
def _save_all(self, obs, num_episode):
result = ''
if obs.step_type == environment.StepType.LAST:
if obs.reward == 1 and obs.discount == 0:
result = '-win'
if obs.reward == 0 and obs.discount == 0:
result = '-tie'
if obs.reward == -1 and obs.discount == 0:
result = '-lose'
obs_save_path = f"{self.save_dir}/obs-list-episode{num_episode}{result}.pkl"
with open(obs_save_path, 'wb') as f:
pickle.dump(self.obs_list, f)
print(f"Successfully save episode obs in {obs_save_path}")
try: # To avoid errors caused by insufficient permissions
save_dir_temp = f"{self.save_dir}/obs{num_episode}"
shutil.rmtree(save_dir_temp)
print(f"Successfully delete temp obs directory")
except:
pass
def _is_unit_appear_or_disappear(self, obs):
step_unit_tags = []
unit_appear_or_disappear = False
for unit in obs.observation.raw_units:
step_unit_tags.append(unit.tag)
if unit.tag not in self.last_step_unit_tags:
unit_appear_or_disappear = True
if len(step_unit_tags) != len(self.last_step_unit_tags):
unit_appear_or_disappear = True
self.last_step_unit_tags = step_unit_tags
return unit_appear_or_disappear
def step(self, obs, num_episode, num_step):
if obs.step_type == environment.StepType.MID:
if self.save_level >= 0:
pass
if self.save_level >= 1 and self._is_unit_appear_or_disappear(obs):
self._save_temp(obs, num_episode, num_step)
if self.save_level >= 2 and 9 < obs.observation.last_actions[0] < 573:
self._save_temp(obs, num_episode, num_step)
if self.save_level >= 3:
self._save_temp(obs, num_episode, num_step)
if obs.step_type == environment.StepType.FIRST:
self.obs_list = []
self._save_temp(obs, num_episode, num_step)
if obs.step_type == environment.StepType.LAST:
self._save_temp(obs, num_episode, num_step)
self._save_all(obs, num_episode)
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/knowledge/zerg.py | llm_pysc2/lib/knowledge/zerg.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pysc2.lib import units
DATA_ZERG = {
units.Zerg.Drone: {
# wiki英文 Unit
'name': 'Drone',
'name_cn': '工蜂',
# wiki英文 Details
'description': 'Harvests resources and spawns structures. Is sacrificed when creating new structures.'
'The drone morphs into structures and harvests minerals and vespene gas.',
# 可以对什么对象造成伤害,ground/air
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target': ['ground'],
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['light', 'biological'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 40, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 3.94, # wiki中文 基本信息/移动速度
# 作战
'weapon1_type_anti': [],
'weapon1_target': ['ground'],
'weapon1_attack': 5, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 0.2, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 1.07, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 1, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 12, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 50, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'train from Larva.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
},
'ability': { # wiki英文 作战/技能,只记录主动技能
},
# 前置条件,这一部分需要游戏知识
'require_building': '',
'require_tech': '',
'require_unit': 'sacrifice 1 Larva',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Zerg.Zergling: {
# wiki英文 Unit
'name': 'Zergling',
'name_cn': '跳虫',
# wiki英文 Details
'description': 'Fast but weak melee attacker ideal for swarming attacks in large numbers.',
'target': ['ground'],
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['light', 'biological'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 35, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 4.13, # wiki中文 基本信息/移动速度
# 作战
'weapon1_type_anti': [],
'weapon1_target': ['ground'],
'weapon1_attack': 5, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 1, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0.5, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 0.5, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 8.5, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 25, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'morph from Larva.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
'Metabolic Boost': 'Increases zergling movement speed. The zerglings "grow wings".'
'Cost 100 mineral, 100 gas and 79 seconds. In Spawning Pool.',
'Adrenal Glands': 'Increases zergling attack rate to 0.35.'
'Cost 200 mineral, 200 gas and 93 seconds. In Spawning Pool.'
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'Baneling Morph': 'Available after Baneling Nest built.'
'Transforms zergling into baneling, a glowing green creature with a suicide attack.'
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Spawning Pool',
'require_tech': '',
'require_unit': 'sacrifice 1 Larve',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': 'Baneling(after Baneling Nest built)',
},
units.Zerg.Queen: {
# wiki英文 Unit
'name': 'Queen',
'name_cn': '虫后',
# wiki英文 Details
'description': 'The queen a powerful attacking ground dwelling support unit ideal for zerg defense.',
# 可以对什么对象造成伤害,ground/air
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target': ['ground', 'air'],
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['biological', 'psionic'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
'type_anti': [],
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
# 生存
'health': 175, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 0.9375, # wiki中文 基本信息/移动速度
# 作战
'weapon1_target': ['ground'],
'weapon1_type_anti': [],
'weapon1_attack': 4, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 5, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 2, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0.71, # wiki中文 作战/武器/攻击间隔
'weapon2_target': ['air'],
'weapon2_type_anti': [],
'weapon2_attack': 9, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 7, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0.71, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 2, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 36, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 150, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'produced from Hatchery/Lair/Hive.', # wiki中文 生产/生产设施
# 技能
'energy': 200, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'Create Creep Tumor': 'Always available(can only be used once by a creep tumor).'
'The queen forces a creep tumor out of her bowels.Alternatively, each creep tumor '
'may create a single additional creep tumor, using the same build time but no energy '
'cost. This changes the tumor s appearance. Creep tumors can only be created upon '
'the creep, and within a range.'
'Cost 25 energy and 11 seconds,cooldown:13.57 seconds',
'Spawn Larva': 'Always available.'
'Queens can target a hatchery/lair/hive, causing it to eventually spawn four larvae. '
'These join any already-present larvae. The larva-producing structure will not naturally '
'produce any more larvae until the total falls below three. The larva count for any given '
'hatchery/lair/hive cannot exceed nineteen, no matter how many times spawn larva is cast. '
'Cost 25 energy and 29 seconds,cooldown:1.8 seconds.',
'Transfusion': 'Always available.'
'The queen instantly restores 75 hit points to target biological unit or structure, '
'plus an additional 50 health over the next 7.14 seconds. Can only be used while on creep.'
'cost 50 energy ,cooldown:1 seconds. ',
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Spawning pool',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Zerg.Baneling: {
# wiki英文 Unit
'name': 'Baneling',
'name_cn': '爆虫',
# wiki英文 Details
'description': 'This green rolling unit is mutated from the zergling. It has a huge suicidal attack. '
'A huge swarm of banelings will create devastating results to a base of enemy.',
# 可以对什么对象造成伤害,ground/air
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['biological'],
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': ['light', 'structure'],
# 生存
'health': 30, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 3.5, # wiki中文 基本信息/移动速度
# 作战
'weapon1_target': ['ground'],
'weapon1_type_anti': ['light'],
'weapon1_attack': 16, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 19, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 2, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 1, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0.59, # wiki中文 作战/武器/攻击间隔
'weapon2_target': ['ground'],
'weapon2_type_anti': ['structure'],
'weapon2_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 80, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 5, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 1, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 0.5, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 14, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 25, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 25, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'morph from Zergling.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
'Centrifugal Hooks': 'Increases the speed of banelings to 4.13.'
'Cost 100 mineral, 100 gas and 71 seconds. In Baneling Nest.Required Lair.'
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'Enable Attack Building': 'Always available.'
'Allows banelings to automatically target structures.',
'Disnable Attack Building': 'Always available.'
'do not Allows banelings to automatically target structures.',
'Explode': 'Always available.'
'Banelings can be ordered to detonate, instantly dealing damage around them, '
'even if burrowed. The ability is not smartcast.'
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Baneling Nest',
'require_tech': '',
'require_unit': 'sacrifice 1 Zergling',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Zerg.OverlordTransport: {
'name': 'OverlordTransport',
'name_cn': '运输王虫',
'description': 'Zerg Air Transport Unit, used for airdrops and harass enemy base',
'target': [],
'target_self': ['air'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['armored', 'biological'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 200, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 0.902, # wiki中文 基本信息/移动速度
# 作战
'weapon1_type_anti': [],
'weapon1_target': [],
'weapon1_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': -8, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 18, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 100, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'morph from Larva.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
'Pneumatized Carapace': 'ncrease speed of the overlord from 0.902 to 2.63, overlords with Ventral Sacs '
'from 1.099 to 2.83, and the overseer 2.62 to 4.73".'
'Cost 100 mineral, 100 gas and 43 seconds. '
'In Hatchery (with at least 1 other lair or hive)/Lair/Hive.',
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'Mutate Ventral Sacs': 'Available after Lair built.'
'Upgrades an individual overlord, allowing it to transport units. '
'Increases movement speed to 1.28 (3.00 with Pneumatized Carapace).',
'Overseer Morph': 'Available after Lair built'
'The overlord can morph into an overseer, which becomes a detector.',
'Excrete Creep': 'Available after Lair built.'
'The overlord creates a 2x2 patch of creep through an activated ability, '
'the radius steadily expanding.duartion 11 seconds',
},
# 前置条件,这一部分需要游戏知识
'require_building': '',
'require_tech': '',
'require_unit': 'sacrifice 1 Overlord',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Zerg.Overlord: {
# wiki英文 Unit
'name': 'Overlord',
'name_cn': '王虫',
# wiki英文 Details
'description': 'Produces control and is no longer a detector like the StarCraft I version.',
'target': [],
'target_self': ['air'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['armored', 'biological'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 200, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 0.902, # wiki中文 基本信息/移动速度
# 作战
'weapon1_type_anti': [],
'weapon1_target': [],
'weapon1_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': -8, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 18, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 100, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'morph from Larva.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
'Pneumatized Carapace': 'ncrease speed of the overlord from 0.902 to 2.63, overlords with Ventral Sacs '
'from 1.099 to 2.83, and the overseer 2.62 to 4.73".'
'Cost 100 mineral, 100 gas and 43 seconds. '
'In Hatchery (with at least 1 other lair or hive)/Lair/Hive.',
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'Mutate Ventral Sacs': 'Available after Lair built.'
'Upgrades an individual overlord, allowing it to transport units. '
'Increases movement speed to 1.28 (3.00 with Pneumatized Carapace).',
'Overseer Morph': 'Available after Lair built'
'The overlord can morph into an overseer, which becomes a detector.',
'Excrete Creep': 'Available after Lair built.'
'The overlord creates a 2x2 patch of creep through an activated ability, '
'the radius steadily expanding.duartion 11 seconds',
},
# 前置条件,这一部分需要游戏知识
'require_building': '',
'require_tech': '',
'require_unit': 'sacrifice 1 Larva',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': 'Overseer(After Lair Built), OverlordTransport(After Lair Built)',
},
units.Zerg.Overseer: {
# wiki英文 Unit
'name': 'Overseer',
'name_cn': '眼虫',
# wiki英文 Details
'description': 'Produces control and is no longer a detector like the StarCraft I version.'
'This unit or structure can detect cloaked, burrowed, duplicated and hallucination enemies.',
'target': [],
'target_self': ['air'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['armored', 'biological'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 200, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 2.62, # wiki中文 基本信息/移动速度
# 作战
'weapon1_type_anti': [],
'weapon1_target': [],
'weapon1_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': -8, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 18, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 50, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 50, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'morph from Overload.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
'Pneumatized Carapace': 'ncrease speed of the overlord from 0.902 to 2.63, overlords with Ventral Sacs '
'from 1.099 to 2.83, and the overseer 2.62 to 4.73".'
'Cost 100 mineral, 100 gas and 43 seconds. '
'In Hatchery (with at least 1 other lair or hive)/Lair/Hive.'
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'Contaminate': 'Always available'
'The overseer disables a building, which becomes covered in slime. '
'It cannot produce units, larva or research upgrades while contaminated.'
'cost 125 energy,duration 30 seconds',
'Spawn Changeling': 'Always available'
'The overseer can create a changeling, which can spy on enemy forces.'
'cost 50 energy,duration 150 seconds',
'Oversight Mode': 'Always available'
'The overseer can switch to Oversight Mode. '
'In Oversight Mode the overseer is immobile and gains 50% increased sight radius. '
'Can switch back to Overseer Mode to move again.'
'need 3 seconds',
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Lair',
'require_tech': '',
'require_unit': 'sacrifice 1 Overload',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Zerg.Roach: {
# wiki英文 Unit
'name': 'Roach',
'name_cn': '蟑螂',
# wiki英文 Details
'description': 'Exceptionally tough short ranged unit able to quickly regenerate and move while burrowed.',
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['armored', 'biological'],
'type_anti': [],
# 生存
'health': 145, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 3.15, # wiki中文 基本信息/移动速度
# 作战
'weapon1_target': ['ground'],
'weapon1_type_anti': [],
'weapon1_attack': 16, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 2, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 4, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 1.43, # wiki中文 作战/武器/攻击间隔
# 'weapon2_target': ['ground'],
# 'weapon2_type_anti': [],
# 'weapon2_attack': 16, # wiki中文 作战/武器/伤害,基础伤害
# 'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
# 'weapon2_attack_upgrade_base_damage_offset': 2, # wiki中文 作战/武器/伤害,攻击升级偏移量
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | true |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/knowledge/protoss.py | llm_pysc2/lib/knowledge/protoss.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pysc2.lib import units, actions
# minerals gas building time
protoss_research_conditions = {
# CyberneticsCore BY
actions.FUNCTIONS.Research_ProtossAirArmorLevel1_quick.id:
{'m': 100, 'g': 100, 'b': units.Protoss.CyberneticsCore, 't': 180}, # map from Research_ProtossAirArmor_quick
actions.FUNCTIONS.Research_ProtossAirWeaponsLevel1_quick.id:
{'m': 100, 'g': 100, 'b': units.Protoss.CyberneticsCore, 't': 180}, # map from Research_ProtossAirWeapons_quick
actions.FUNCTIONS.Research_ProtossAirArmorLevel2_quick.id:
{'m': 175, 'g': 175, 'b': units.Protoss.CyberneticsCore, 't': 215},
actions.FUNCTIONS.Research_ProtossAirWeaponsLevel2_quick.id:
{'m': 175, 'g': 175, 'b': units.Protoss.CyberneticsCore, 't': 215},
actions.FUNCTIONS.Research_ProtossAirArmorLevel3_quick.id:
{'m': 250, 'g': 250, 'b': units.Protoss.CyberneticsCore, 't': 250},
actions.FUNCTIONS.Research_ProtossAirWeaponsLevel3_quick.id:
{'m': 250, 'g': 250, 'b': units.Protoss.CyberneticsCore, 't': 250},
actions.FUNCTIONS.Research_WarpGate_quick.id:
{'m': 50, 'g': 50, 'b': units.Protoss.CyberneticsCore, 't': 140},
# Forge BF
actions.FUNCTIONS.Research_ProtossGroundArmorLevel1_quick.id:
{'m': 100, 'g': 100, 'b': units.Protoss.Forge, 't': 170}, # map from Research_ProtossGroundArmor_quick
actions.FUNCTIONS.Research_ProtossGroundWeaponsLevel1_quick.id:
{'m': 100, 'g': 100, 'b': units.Protoss.Forge, 't': 170}, # map from Research_ProtossGroundWeapon_quick
actions.FUNCTIONS.Research_ProtossShieldsLevel1_quick.id:
{'m': 150, 'g': 150, 'b': units.Protoss.Forge, 't': 170}, # map from Research_ProtossShield_quick
actions.FUNCTIONS.Research_ProtossGroundArmorLevel2_quick.id:
{'m': 150, 'g': 150, 'b': units.Protoss.Forge, 't': 203},
actions.FUNCTIONS.Research_ProtossGroundWeaponsLevel2_quick.id:
{'m': 150, 'g': 150, 'b': units.Protoss.Forge, 't': 203},
actions.FUNCTIONS.Research_ProtossShieldsLevel2_quick.id:
{'m': 200, 'g': 200, 'b': units.Protoss.Forge, 't': 203},
actions.FUNCTIONS.Research_ProtossGroundArmorLevel3_quick.id:
{'m': 200, 'g': 200, 'b': units.Protoss.Forge, 't': 235},
actions.FUNCTIONS.Research_ProtossGroundWeaponsLevel3_quick.id:
{'m': 200, 'g': 200, 'b': units.Protoss.Forge, 't': 235},
actions.FUNCTIONS.Research_ProtossShieldsLevel3_quick.id:
{'m': 250, 'g': 250, 'b': units.Protoss.Forge, 't': 235},
# TwilightCouncil VC
actions.FUNCTIONS.Research_AdeptResonatingGlaives_quick.id:
{'m': 100,'g': 100, 'b':units.Protoss.TwilightCouncil, 't': 140},
actions.FUNCTIONS.Research_Blink_quick.id:
{'m': 150,'g': 150, 'b':units.Protoss.TwilightCouncil, 't': 170},
actions.FUNCTIONS.Research_Charge_quick.id:
{'m': 100, 'g': 100, 'b': units.Protoss.TwilightCouncil, 't': 140},
# FleetBeacon VF
actions.FUNCTIONS.Research_PhoenixAnionPulseCrystals_quick.id:
{'m': 150, 'g': 150, 'b': units.Protoss.FleetBeacon, 't': 90},
# actions.FUNCTIONS.Research_VoidRayFluxVanes_quick.id:
# {'m': 100, 'g': 100, 'b': units.Protoss.FleetBeacon, 't': 80}, # Do not realised in pysc2
# actions.FUNCTIONS.Research_TempestTectonicDestabilizers_quick.id:
# {'m': 150, 'g': 150, 'b': units.Protoss.FleetBeacon, 't': 140}, # Do not realised in pysc2
# RoboticsBay VB
actions.FUNCTIONS.Research_ExtendedThermalLance_quick.id:
{'m': 150, 'g': 150, 'b': units.Protoss.RoboticsBay, 't': 140},
actions.FUNCTIONS.Research_GraviticBooster_quick.id:
{'m': 100, 'g': 100, 'b': units.Protoss.RoboticsBay, 't': 80},
actions.FUNCTIONS.Research_GraviticDrive_quick.id:
{'m': 100, 'g': 100, 'b': units.Protoss.RoboticsBay, 't': 80},
# TemplarArchive VT
actions.FUNCTIONS.Research_PsiStorm_quick.id:
{'m': 200, 'g': 200, 'b': units.Protoss.TemplarArchive, 't': 110},
# DarkShrine VD
actions.FUNCTIONS.Research_ShadowStrike_quick.id:
{'m': 100, 'g': 100, 'b': units.Protoss.DarkShrine, 't': 140},
}
# minerals gas building time supply
protoss_train_conditions = {
# Nexus, BN
actions.FUNCTIONS.Train_Mothership_quick.id:
{'m': 300, 'g': 300, 'b': units.Protoss.Nexus, 't': 125, 's': 6},
# Gateway, BG
actions.FUNCTIONS.Train_Zealot_quick.id:
{'m': 100, 'g': 0, 'b': units.Protoss.Gateway, 't': 38, 's': 2},
actions.FUNCTIONS.Train_Stalker_quick.id:
{'m': 125, 'g': 50, 'b': units.Protoss.Gateway, 't': 42, 's': 2},
actions.FUNCTIONS.Train_Adept_quick.id:
{'m': 100, 'g': 25, 'b': units.Protoss.Gateway, 't': 42, 's': 2},
actions.FUNCTIONS.Train_Sentry_quick.id:
{'m': 50, 'g': 100, 'b': units.Protoss.Gateway, 't': 32, 's': 2},
actions.FUNCTIONS.Train_HighTemplar_quick.id:
{'m': 50, 'g': 150, 'b': units.Protoss.Gateway, 't': 55, 's': 2},
actions.FUNCTIONS.Train_DarkTemplar_quick.id:
{'m': 125, 'g': 125, 'b': units.Protoss.Gateway, 't': 55, 's': 2},
# Stargate, VS
actions.FUNCTIONS.Train_Oracle_quick.id:
{'m': 150, 'g': 150, 'b': units.Protoss.Stargate, 't': 52, 's': 3},
actions.FUNCTIONS.Train_Phoenix_quick.id:
{'m': 150, 'g': 100, 'b': units.Protoss.Stargate, 't': 35, 's': 2},
actions.FUNCTIONS.Train_VoidRay_quick.id:
{'m': 250, 'g': 150, 'b': units.Protoss.Stargate, 't': 60, 's': 4},
actions.FUNCTIONS.Train_Tempest_quick.id:
{'m': 250, 'g': 175, 'b': units.Protoss.Stargate, 't': 60, 's': 5},
actions.FUNCTIONS.Train_Carrier_quick.id:
{'m': 350, 'g': 250, 'b': units.Protoss.Stargate, 't': 90, 's': 6},
# RoboticsFacility, VB
actions.FUNCTIONS.Train_Observer_quick.id:
{'m': 25, 'g': 75, 'b': units.Protoss.RoboticsFacility, 't': 25, 's': 1},
actions.FUNCTIONS.Train_WarpPrism_quick.id:
{'m': 250, 'g': 0, 'b': units.Protoss.RoboticsFacility, 't': 50, 's': 2},
actions.FUNCTIONS.Train_Immortal_quick.id:
{'m': 275, 'g': 100, 'b': units.Protoss.RoboticsFacility, 't': 55, 's': 4},
actions.FUNCTIONS.Train_Colossus_quick.id:
{'m': 300, 'g': 200, 'b': units.Protoss.RoboticsFacility, 't': 75, 's': 6},
actions.FUNCTIONS.Train_Disruptor_quick.id:
{'m': 150, 'g': 150, 'b': units.Protoss.RoboticsFacility, 't': 50, 's': 4},
}
# minerals gas building time length
protoss_build_conditions = {
actions.FUNCTIONS.Build_Nexus_screen.id:
{'m': 400, 'g': 0, 'b': None, 't': 100, 'l': 5},
actions.FUNCTIONS.Build_Assimilator_screen.id:
{'m': 75, 'g': 0, 'b': None, 't': 30, 'l': 3},
actions.FUNCTIONS.Build_Pylon_screen.id:
{'m': 100, 'g': 0, 'b': None, 't': 25, 'l': 2},
actions.FUNCTIONS.Build_Gateway_screen.id:
{'m': 150, 'g': 0, 'b': None, 't': 65, 'l': 3},
actions.FUNCTIONS.Build_CyberneticsCore_screen.id:
{'m': 150, 'g': 0, 'b': units.Protoss.Gateway, 't': 50, 'l': 3},
actions.FUNCTIONS.Build_Forge_screen.id:
{'m': 150, 'g': 0, 'b': units.Protoss.Gateway, 't': 45, 'l': 3},
actions.FUNCTIONS.Build_PhotonCannon_screen.id:
{'m': 150, 'g': 0, 'b': units.Protoss.Forge, 't': 40, 'l': 2},
actions.FUNCTIONS.Build_ShieldBattery_screen.id:
{'m': 100, 'g': 0, 'b': units.Protoss.CyberneticsCore, 't': 40, 'l': 2},
actions.FUNCTIONS.Build_TwilightCouncil_screen.id:
{'m': 150, 'g': 100, 'b': units.Protoss.CyberneticsCore, 't': 50, 'l': 3},
actions.FUNCTIONS.Build_TemplarArchive_screen.id:
{'m': 150, 'g': 200, 'b': units.Protoss.TwilightCouncil, 't': 50, 'l': 3},
actions.FUNCTIONS.Build_DarkShrine_screen.id:
{'m': 150, 'g': 150, 'b': units.Protoss.TwilightCouncil, 't': 100, 'l': 2},
actions.FUNCTIONS.Build_Stargate_screen.id:
{'m': 150, 'g': 150, 'b': units.Protoss.CyberneticsCore, 't': 60, 'l': 3},
actions.FUNCTIONS.Build_FleetBeacon_screen.id:
{'m': 300, 'g': 200, 'b': units.Protoss.Stargate, 't': 60, 'l': 3},
actions.FUNCTIONS.Build_RoboticsFacility_screen.id:
{'m': 150, 'g': 100, 'b': units.Protoss.CyberneticsCore, 't': 65, 'l': 3},
actions.FUNCTIONS.Build_RoboticsBay_screen.id:
{'m': 150, 'g': 150, 'b': units.Protoss.RoboticsFacility, 't': 30, 'l': 3},
}
DATA_PROTOSS = {
units.Protoss.Adept: {
# wiki英文 Unit
'name': 'Adept',
'name_cn': '使徒',
# wiki英文 Details
'description': 'Ground-only ranged attack unit, armed with psionic transfer ability to teleport to '
'nearby locations for harassment.',
# 可以对什么对象造成伤害,ground/air
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['light', 'biological'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': ['light'],
# 生存
'health': 70, # wiki中文 基本信息/生命值
'shield': 70, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 2.5, # wiki中文 基本信息/移动速度
# 作战
# 作战 weapon1
'weapon1_target': ['ground'],
'weapon1_type_anti': ['light'],
'weapon1_attack': 10, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 12, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 4, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 2.25, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 2, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 28, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 42, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 100, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 25, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'train from GateWay or warp from WarpGate.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
'Research_ResonatingGlaives': 'Increase the attack speed of the Adept by 45%.'
'Cost 100 mineral, 100 gas and 100 seconds. In DarkShrine.',
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'Effect_PsionicTransfer': 'Active skill.Always avaliable. Cooldowm 11 seconds.'
'Projects an invulnerable psionic image that can move but not attack.'
' After 7 seconds, the adept teleports to the images location. '
'The shade may be canceled at any time, and the adept would not teleport. '
'The shade has a sight radius of 2.',
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Gateway/WarpGate , have Cybernetics Core',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Protoss.Archon: {
# wiki英文 Unit
'name': 'Archon',
'name_cn': '执政官',
# wiki英文 Details
'description': 'Created by merging two templar units, the archon is a powerful melee unit with a very durable '
'force shield and a strong energy-based attack.',
# 可以对什么对象造成伤害,ground/air
'target': ['ground', 'air'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['psionic', 'massive'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': ['biological'],
# 生存
'health': 10, # wiki中文 基本信息/生命值
'shield': 350, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 2.8125, # wiki中文 基本信息/移动速度
# 作战
# 作战 weapon1
'weapon1_target': ['ground', 'air'],
'weapon1_type_anti': ['biological'],
'weapon1_attack': 25, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 10, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 3, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 3, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 1.754, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 4, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 12, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 4, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'train from GateWay or warp from WarpGate.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
},
'ability': { # wiki英文 作战/技能,只记录主动技能
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Gateway/WarpGate, have CyberneticsCore, have TwilightCouncil, '
'have TemplarArchives/DarkShrine',
'require_tech': '',
'require_unit': 'sacrifice 2 templars',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Protoss.Carrier: {
# wiki英文 Unit
'name': 'Carrier',
'name_cn': '航母',
# wiki英文 Details
'description': 'A powerful air unit. Carriers do not have their own attacks but '
'create interceptors to fight for them.',
# 可以对什么对象造成伤害,ground/air
'target': ['ground', 'air'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['air'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['armored', 'massive', 'mechanical'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 300, # wiki中文 基本信息/生命值
'shield': 150, # wiki中文 基本信息/护盾
'health_armor': 2, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 1.875, # wiki中文 基本信息/移动速度
# 作战
# 作战 weapon1
'weapon1_target': ['ground', 'air'],
'weapon1_type_anti': [],
'weapon1_attack': 5, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 8, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 16, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 3.14, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 6, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 90, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 350, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 250, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'Stargate.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'BuildInterceptor': 'Passive ability. Always available. The Carrier is produced with four interceptors. '
'Carriers may produce up to eight interceptors as an auto-cast ability. '
'Cost: 15 minerals per interceptor, 9 seconds each.',
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Gateway/WarpGate, have Stargate, have Cybernetics Core, have FleetBeacon',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Protoss.Colossus: {
# wiki英文 Unit
'name': 'Colossus',
'name_cn': '巨像',
# wiki英文 Details
'description': 'The large quad-legged vehicle fires lasers in a splash pattern well-suited to '
'destroying swarms of weaker units. This unit can also traverse differences in terrain height '
'due to its long legs, and will appear to step over ledges and other obstacles '
'due to the inverse kinematics system.',
# 可以对什么对象造成伤害,ground/air
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground', 'air'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['armored', 'massive', 'mechanical'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': ['light'],
# 生存
'health': 200, # wiki中文 基本信息/生命值
'shield': 150, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 2.25, # wiki中文 基本信息/移动速度
# 作战
# 作战 weapon1
'weapon1_target': ['ground'],
'weapon1_type_anti': ['light'],
'weapon1_attack': 10, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 5, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 7, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 2, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 1.5, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 6, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 75, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 300, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 200, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'Robotics Facility.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
'ExtendedThermalLance': 'Increases the range of the colossus attack from 7 to 9. '
'Cost: 150 minerals, 150 gas, 100 seconds. Requires Robotics Bay.',
},
'ability': { # wiki英文 作战/技能,只记录主动技能
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Gateway/WarpGate, have Cybernetics Core, have Robotics Facility, have Robotics Bay',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Protoss.DarkTemplar: {
# wiki英文 Unit
'name': 'DarkTemplar',
'name_cn': '黑暗圣堂武士',
# wiki英文 Details
'description': 'A permanently cloaked stealth warrior.',
# 可以对什么对象造成伤害,ground/air
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['light', 'biological', 'psionic'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 40, # wiki中文 基本信息/生命值
'shield': 80, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 2.8125, # wiki中文 基本信息/移动速度
# 作战
'attack': 45, # wiki中文 作战/武器/伤害,基础伤害
'attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'attack_upgrade_base_damage_offset': 5, # wiki中文 作战/武器/伤害,攻击升级偏移量
'attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'attack_range': 1, # wiki中文 作战/武器/射程,近战为1
'attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'cooldown': 1.7694, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 2, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 45, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 55, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 125, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 123, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'train from GateWay or warp from WarpGate.', # wiki中文 生产/生产设施
# 技能
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': { # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade
'Research_ShadowStrike': 'Enable ShadowStride ability.'
'Cost 100 mineral, 100 gas and 100 seconds. In DarkShrine.',
},
'ability': { # wiki英文 作战/技能,只记录主动技能
'ArchonWarp': 'Always available. Any combination of two high templar/dark templar can '
'sacrifice themselves, transforming into an archon.',
'ShadowStride': 'Available after Research_ShadowStrike. Cooldown 14 seconds.'
'Teleports the Dark Templar to a nearby target location. Has a 0.71 attack delay after use.'
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Gateway/WarpGate, have Cybernetics Core, have TwilightCouncil, have DarkShrine',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': 'Archon',
},
units.Protoss.Disruptor: {
# wiki英文 Unit
'name': 'Disruptor',
'name_cn': '干扰者',
# wiki英文 Details
'description': 'Light ground mechanized support unit, '
'armed with energy spikes to wreak havoc against swaths of ground forces.',
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['armored', 'mechanical'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 100, # wiki中文 基本信息/生命值
'shield': 100, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 2.25, # wiki中文 基本信息/移动速度
# 作战
# 作战 weapon1
'weapon1_target': ['ground'],
'weapon1_type_anti': ['shield'],
'weapon1_attack': 145, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 55, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 9, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 21, # wiki中文 作战/武器/攻击间隔
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 4, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 50, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 150, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 150, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'Robotics Facility', # wiki中文 生产/生产设施
# 技能
'energy': 0, # 默认值为0
'upgrade': {
},
'ability': {
'PurificationNova': 'Always available. Active skill. Shoots a ball of energy at a target point that can be '
'controlled and lasts 2 seconds. After two seconds, the energy ball explodes, '
'dealing 155 (+55 vs shields) damage to nearby enemies in a 1.3575 radius. '
'Has a 0.7 second charge time after being dropped from a transport. '
'Cooldown: 14.3 seconds.',
},
# 前置条件,这一部分需要游戏知识
'require_building': 'have Gateway, have Cybernetics Core, have Robotics Facility, have Robotics Bay',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Protoss.HighTemplar: {
# wiki英文 Unit
'name': 'Hightemplar',
'name_cn': '高阶圣堂武士',
# wiki英文 Details
'description': 'A physically fragile unit with strong psychic abilities.',
# 可以对什么对象造成伤害,ground/air
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
# wiki中文 基本信息/属性
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_self': ['light', 'biological', 'psionic'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 40, # wiki中文 基本信息/生命值
'shield': 40, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 2.0156, # wiki中文 基本信息/移动速度
# 作战
# 作战 weapon1
'weapon1_target': ['ground'],
'weapon1_type_anti': [],
'weapon1_attack': 4, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | true |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/knowledge/terran.py | llm_pysc2/lib/knowledge/terran.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pysc2.lib import units
DATA_TERRAN = {
units.Terran.SCV: {
'name': 'SCV',
'name_cn': 'SCV',
'description': "The builder and resource gatherer of the terran race. "
"Its Repair ability can be set to 'autocast'",
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
'type_self': ['light', 'biological', 'mechanical'],
'type_anti': [],
'health': 45, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 3.94, # wiki中文 基本信息/移动速度
'weapon1_target': ['ground'],
'weapon1_type_anti': [],
'weapon1_attack': 5, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 1, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 1.07, # wiki中文 作战/武器/攻击间隔
# 作战 weapon2
'weapon2_target': [],
'weapon2_type_anti': [],
'weapon2_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0,
'supply': 1, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 17, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 50, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': ['CommandCenter', 'OrbitalCommand', 'PlanetaryFortress'], # wiki中文 生产/生产设施
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': {}, # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade },
"ability": {
# "Repair": "Always available. Restores mechanical unit or building, terran or protoss. "
# "Repairing costs resources. The ability can be autocast.",
# "Worker Abilities": "Always available. The SCV builds structures and harvests minerals and vespene gas. "
},
# 前置条件,这一部分需要游戏知识
'require_building': 'CommandCenter',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Terran.Marine: {
'name': 'Marine',
'name_cn': '陆战队员',
'description': 'The basic terran infantry, able to upgrade hit points with a shield.',
'target': ['ground', 'air'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
'type_self': ['light', 'biological'],
'type_anti': [],
'health': 45, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 0, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 3.15,
'weapon1_target': ['ground', 'air'],
'weapon1_type_anti': [],
'weapon1_attack': 6, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 1, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 5, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 1, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0.8608, # wiki中文 作战/武器/攻击间隔
# 作战 weapon2
'weapon2_target': [],
'weapon2_type_anti': [],
'weapon2_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0,
'supply': 1, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 25, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 50, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'Barracks.', # wiki中文 生产/生产设施
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': {
'Stimpack': 'upgraded in TechLab.Every Marine and Marauder gain the Stimpack ability.'
'Cost 100 mineral, 100 gas and 100 seconds. Required TechLab.',
'Combat Shield': 'upgraded in TechLab.Every Marine gains a Combat Shield that gives 10 hitpoints.'
'Cost 100 mineral, 100 gas and 79 seconds. Required TechLab.'},
# wiki英文 升级,不记录公共升级,如ground_weapon_upgrade },
"ability": {
"Stimpack": "Upgrade in TechLab. Increases the movement speed and firing rate by 50% "
"for 11 seconds at the cost of 10 HP for a Marine."
},
'require_building': 'Barracks',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Terran.CommandCenter: {
'name': 'CommandCenter ',
'name_cn': '指挥中心',
'description': 'Produces SCVs and serves as a drop off point for processing of minerals and gas. '
'Has the ability to carry up to five SCVs and can upgrade to the powerfully armed '
'PlanetaryFortress or the ability oriented OrbitalCommand.Provding 15 supply.',
'target': [],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
'type_self': ['armored', 'mechanical', 'structure'],
'type_anti': [],
'health': 1500, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 2, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': '', # wiki中文 基本信息/移动速度
'weapon1_target': [],
'weapon1_type_anti': [],
'weapon1_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0, # wiki中文 作战/武器/攻击间隔
# 作战 weapon2
'weapon2_target': [],
'weapon2_type_anti': [],
'weapon2_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0,
'supply': 0, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 71, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 400, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'SCV', # wiki中文 生产/生产设施
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': {
"Neosteel Armor": "Always available. Upgrades the armor of structures by 2 (excluding auto-turrets). "
"Increases the cargo space of bunkers by 2. Also provides extra cargo space for "
"CommandCenters and PlanetaryFortresses. Cost: 150 mineral, 150 gas, 140 s."
}, # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade },
"ability": {
'OrbitalCommand': 'Always available.Transform into OrbitalCommand',
'PlanetaryFortress': 'Always available.Transform into PlanetaryFortress',
"Load": "Always available. "
"Enables the Terran player to load SCVs into a CommandCenter or PlanetaryFortress.",
"Lift off": "Always available. "
"This structure can lift off, enabling it to fly. It cannot produce units or conduct "
"research while in flight, and must leave add-ons behind.",
"Land": 'Available after Lift off. Select the building while it is in the air and use the Land '
'command to have it settle down at a designated location. When landing, ensure there is enough '
'space and that no units or other structures are obstructing the landing spot.',
"Unload": "Always available. Unload all SCVs."
},
# 前置条件,这一部分需要游戏知识
'require_building': '',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': 'SCV',
},
units.Terran.OrbitalCommand: {
'name': 'OrbitalCommand',
'name_cn': '轨道控制基地',
# wiki英文 Details
'description': 'This upgrade to the CommandCenter provides scanner sweeps to reveal troop movements through '
'the fog of war and detect cloaked or burrowed units, summons MULEs, '
'and can increase the supply generated by SupplyDepots.',
'target': [],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
'type_self': ['armored', 'mechanical', 'structure'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 1500, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 1, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': '', # wiki中文 基本信息/移动速度
# 作战
'weapon1_target': [],
'weapon1_type_anti': [],
'weapon1_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0, # wiki中文 作战/武器/攻击间隔
# 作战 weapon2
'weapon2_target': [],
'weapon2_type_anti': [],
'weapon2_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0,
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 0, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 25, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 150, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'CommandCenter.', # wiki中文 生产/生产设施
'energy': 200, # wiki中文 基本信息/能量,默认值为0
'upgrade': {''}, # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade },
'ability': { # wiki英文 作战/技能,只记录主动技能
'Calldown: MULE': 'Always available.An OrbitalCommand summons one MULE via drop pod. If the ability is '
'targeted on a mineral patch the MULE will begin mining automatically, extracting 45 '
'minerals per trip. The ability may be used only on areas uncovered by the fog of war.'
'cost 50 energy,Duration: 64 s',
'Calldown: Extra Supplies': 'Always available.The OrbitalCommand can target a SupplyDepot to permanently '
'grant it an extra 8 supply. The modified SupplyDepots look different, '
'but are still capable of raising and lowering themselves.'
'cost 50 energy,Duration: 3 s',
'Scanner Sweep': 'Always available.The OrbitalCommand scans a target location on the map revealing '
'cloaked, burrowed or hallucinated units temporarily.'
'cost 50 energy,Duration: 9 s',
'Lift off': 'Always available. This structure can lift off, enabling it to fly. '
'It cannot produce units or conduct research while in flight, and must leave add-ons behind.',
"Land": 'Available after Lift off.Select the building while it is in the air and use the Land command '
'to have it settle down at a designated location. When landing, ensure there is enough space '
'and that no units or other structures are obstructing the landing spot.',
"Load": "Always available. "
"Enables the Terran player to load SCVs into a CommandCenter or PlanetaryFortress.",
"Unload": "Always available. "
"Unload all SCVs."
},
# 前置条件,这一部分需要游戏知识
'require_building': 'Barracks',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': 'MULE',
},
units.Terran.PlanetaryFortress: {
'name': 'PlanetaryFortress',
'name_cn': '行星要塞',
# wiki英文 Details
'description': 'This immobile upgrade of the CommandCenter grants it weapons to attack enemy '
'ground units and a large boost to armor.Provides 11 supply.',
'target': ['ground'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
'type_self': ['armored', 'mechanical', 'structure'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 1500, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 3, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 2, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 0, # wiki中文 基本信息/移动速度
'weapon1_target': ['ground'],
'weapon1_type_anti': [],
'weapon1_attack': 40, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 6, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 1.43, # wiki中文 作战/武器/攻击间隔
# 作战 weapon2
'weapon2_target': [],
'weapon2_type_anti': [],
'weapon2_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0,
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 0, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 36, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 150, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 150, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'CommandCenter.', # wiki中文 生产/生产设施
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': {
'Hi-Sec Auto Tracking': "Increases the attack range of automated defenses: missile turrets, auto-turrets, "
"point defense drones, and the PlanetaryFortress by +1. "
"Cost: 100 mineral, 100 gas, Duration: 57 s.",
"Neosteel Armor": "Always available. "
"Upgrades the armor of structures by 2 (excluding auto-turrets). Increases the cargo "
"space of bunkers by 2. Also provides extra cargo space for CommandCenters and "
"PlanetaryFortresses. Cost: 150 mineral, 150 gas, 140 s."
}, # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade },
'ability': { # wiki英文 作战/技能,只记录主动技能
'Load': 'Enables the Terran player to load SCVs into a CommandCenter or PlanetaryFortress.',
'Unload': 'Unload all SCVs'
},
# 前置条件,这一部分需要游戏知识
'require_building': 'Engineering Bay',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Terran.Barracks: {
'name': 'Barracks',
'name_cn': '兵营',
# wiki英文 Details
'description': 'Produces terran infantry units.Marine/Reaper/Marauder '
'(with TechLab)/Ghost (with TechLab and GhostAcademy).',
'target': [],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
'type_self': ['armored', 'mechanical', 'structure'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 1000, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 2, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 0, # wiki中文 基本信息/移动速度
# 作战
'weapon1_target': [],
'weapon1_type_anti': [],
'weapon1_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0, # wiki中文 作战/武器/攻击间隔
# 作战 weapon2
'weapon2_target': [],
'weapon2_type_anti': [],
'weapon2_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0,
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 0, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 46, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 150, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'CommandCenter.', # wiki中文 生产/生产设施
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': {}, # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade },
'ability': { # wiki英文 作战/技能,只记录主动技能
"Lift off": "Always available. This structure can lift off, enabling it to fly. It cannot produce units "
"or conduct research while in flight, and must leave add-ons behind.",
"Land": 'Available after Lift off.Select the building while it is in the air and use the Land command '
'to have it settle down at a designated location. When landing, ensure there is enough space '
'and that no units or other structures are obstructing the landing spot.',
'Create Reactor': 'Always available. This structure can create a Reactor.',
'Create TechLab': 'Always available. This structure can create a TechLab.'
},
# 前置条件,这一部分需要游戏知识
'require_building': '',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': ['Factory', 'GhostAcademy'],
'activate_tech': '',
'activate_unit': 'Marine/Reaper/Marauder (with TechLab)/Ghost (with TechLab and GhostAcademy)',
},
units.Terran.Bunker: {
'name': 'Bunker',
'name_cn': '地堡',
# wiki英文 Details
'description': 'Provides protection for terran infantry.',
'target': ['ground', 'air(with Marine)'],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
'type_self': ['armored', 'mechanical', 'structure'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 400, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 2, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 0, # wiki中文 基本信息/移动速度
# 作战
'weapon1_target': [],
'weapon1_type_anti': [],
'weapon1_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon1_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon1_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon1_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon1_cooldown': 0, # wiki中文 作战/武器/攻击间隔
# 作战 weapon2
'weapon2_target': [],
'weapon2_type_anti': [],
'weapon2_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon2_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon2_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
'weapon2_attack_upgrade_bonus_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量(克制伤害)
'weapon2_attack_range': 0, # wiki中文 作战/武器/射程,近战为1
'weapon2_attack_times': 0, # wiki中文 作战/武器/多重攻击,没有时为1重攻击
'weapon2_cooldown': 0,
# 对于可移动单位是训练成本/ 对于建筑是建造成本
'supply': 0, # wiki中文 基本信息/占用舱载空间,即在游戏中占用的人口
'warp_time': 0, # wiki中文 生产/生产时间2,单位,秒,没有时置零
'produce_time': 29, # wiki中文 生产/生产时间1,单位,秒
'produce_cost_mineral': 100, # wiki中文 生产/成本,水晶(蓝色)
'produce_cost_gas': 0, # wiki中文 生产/成本,瓦斯(绿色)
'produce_from': 'SCV', # wiki中文 生产/生产设施
'energy': 0, # wiki中文 基本信息/能量,默认值为0
'upgrade': {}, # wiki英文 升级,不记录公共升级,如ground_weapon_upgrade },
'ability': { # wiki英文 作战/技能,只记录主动技能
'Salvage': 'Always available. Removes the structure and returns 75% of the structures mineral and vespene '
'gas cost. The process may not be aborted once started. '
'A bunker cannot be salvaged while units are still in it.'
},
# 前置条件,这一部分需要游戏知识
'require_building': 'Barrack',
'require_tech': '',
'require_unit': '',
# 激活选项,这一部分需要游戏知识
'activate_building': '',
'activate_tech': '',
'activate_unit': '',
},
units.Terran.Factory: {
'name': 'Factory',
'name_cn': '工厂',
# wiki英文 Details
'description': 'Produces terran vehicle units.',
'target': [],
# 自己可以受到什么类型伤害,ground/air,只有神族的巨像等少数单位能够同时受到两种伤害
'target_self': ['ground'],
'type_self': ['armored', 'mechanical', 'structure'],
# wiki中文 作战/武器,克制的对象(有伤害加成的)
# biological生物/mechanical机械/psionic灵能/light轻甲/armored重甲/massive巨型/structure建筑
'type_anti': [],
# 生存
'health': 1250, # wiki中文 基本信息/生命值
'shield': 0, # wiki中文 基本信息/护盾
'health_armor': 1, # wiki中文 基本信息/护甲 无升级时
'armor_upgrade_offset': 2, # wiki中文 基本信息/护甲 每次护甲升级时的偏移量
'speed': 0, # wiki中文 基本信息/移动速度
'weapon1_target': [],
'weapon1_type_anti': [],
'weapon1_attack': 0, # wiki中文 作战/武器/伤害,基础伤害
'weapon1_attack_bonus': 0, # wiki中文 作战/武器/伤害,克制附加伤害
'weapon1_attack_upgrade_base_damage_offset': 0, # wiki中文 作战/武器/伤害,攻击升级偏移量
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | true |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/knowledge/__init__.py | llm_pysc2/lib/knowledge/__init__.py |
from llm_pysc2.lib.knowledge.neutral import DATA_NEUTRAL
from llm_pysc2.lib.knowledge.protoss import DATA_PROTOSS
from llm_pysc2.lib.knowledge.terran import DATA_TERRAN
from llm_pysc2.lib.knowledge.zerg import DATA_ZERG
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/llm_pysc2/lib/knowledge/neutral.py | llm_pysc2/lib/knowledge/neutral.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pysc2.lib import units
DATA_NEUTRAL = {}
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/setup.py | setup.py | #!/usr/bin/env python
# coding: utf-8
import io
import os
import sys
from shutil import rmtree
from distutils.core import setup
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'xflow-net'
DESCRIPTION = 'a python library for graph flow'
URL = 'https://xflow.network/'
EMAIL = 'zchen@cse.msstate.edu'
AUTHOR = 'XGraphing'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.0.28'
# What packages are required for this module to be executed?
# TODO cosasi could be remove if no longer using our customized cosasi package
REQUIRED = [
'networkx', 'ndlib', 'torch_geometric', 'cosasi'
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
# TODO package_data could be remove if no longer using our customized cosasi package
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
package_data={'xflow': ["method/cosasi/source_inference/algorithm_details.json"]},
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
"Operating System :: OS Independent"
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/util.py | xflow/util.py | import random
import xflow.method.cosasi as co
import numpy as np
# TODO make seeds changable
# def run (graph, diffusion, seeds, method, eval, epoch, budget, output):
def run (graph, diffusion, method, eval, epoch, budget, output):
print("Running " + eval.upper() + " :")
for graph_fn in graph:
try:
print(graph_fn.__name__)
g, config = graph_fn()
print(g)
seeds = random.sample(list(g.nodes()), 10)
for method_fn in method:
try:
print(method_fn.__name__)
baselines = ['eigen', 'degree', 'pi', 'sigma', 'Netshield', 'IMRank']
if method_fn.__name__ in baselines:
sims = method_fn(g, config, budget=10)
baselines = ['RIS']
if method_fn.__name__ in baselines:
sims = method_fn(g, config, budget=10)
baselines = ['greedy', 'celf', 'celfpp']
if method_fn.__name__ in baselines:
for diffusion_fn in diffusion:
try:
print(diffusion_fn.__name__)
if eval == 'im':
sims = method_fn(g, config, budget, rounds=epoch, model=diffusion_fn.__name__, beta=0.1)
if eval == 'ibm':
sims = method_fn(g, config, budget, seeds, rounds=epoch, model=diffusion_fn.__name__, beta=0.1)
except Exception as e:
print(f"Error when calling {diffusion_fn.__name__}: {str(e)}")
if method_fn.__name__ == 'netsleuth':
# todo seed shoule be changable
seed = 10
random.seed(seed)
np.random.seed(seed)
contagion = co.StaticNetworkContagion(
G=g,
model="si",
infection_rate=0.1,
# recovery_rate=0.005, # for SIS/SIR models
number_infected = 2,
seed=seed
)
contagion.forward(steps = 16)
step = 15
# This obtains the indices of all vertices in the infected category at the 15th step of the simulation.
I = contagion.get_infected_subgraph(step=step)
print(I)
sims = method_fn(I=I, G=g, hypotheses_per_step=1)
true_source = contagion.get_source()
evals = sims.evaluate(true_source)
top_dis= evals["distance"]["top score's distance"]
print(top_dis)
except Exception as e:
print(f"Error when calling {method_fn.__name__}: {str(e)}")
except Exception as e:
print(f"Error when calling {graph_fn.__name__}: {str(e)}") | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/seed.py | xflow/seed.py | import numpy as np
import torch_geometric.datasets as ds
import random
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
def random(seed):
random.seed(seed)
np.random.seed(seed)
return seed
# todo
def degree():
return
# todo
def eigen():
return | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/test_forwards.py | xflow/test_forwards.py | import numpy as np
import random
import networkx as nx
from networkx import Graph
from torch_geometric.data.data import Data
from . import xflow
from xflow.dataset.nx import connSW
from xflow.flow_tasks import forward, backward, graph_eval
graph_size = 1000
graph_beta = 0.1
infection_beta = None
infection_gamma = None
############################################################################################################
# example code using forward function
# generate a dataset of 5 forward flow simulations with observations saved as numpy lists at time intervals 0 and 1
output = forward(distance=[1, 4],
interval_lower=0,
obs_type='numpy',
num_results=5,
graph_size=graph_size,
graph_beta=graph_beta,
inf_beta=infection_beta,
inf_gamma=infection_gamma)
# display general information about these simulation results
print('Observations are of type:', type(output[0]['observations'][0]['observation']), end='\n\n')
for result in output:
observations = result['observations']
graph = result['base_graph']
sir_config = result['SIR_config'] # Use SIR_config here
# Retrieve beta and gamma values from the Configuration object
beta = sir_config.config["model"]["beta"]
gamma = sir_config.config["model"]["gamma"]
# Print SIR values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(beta, 3)}, gamma = {round(gamma, 3)}')
# Print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss["time"]}, ', end='')
print('\n')
# example code to use graph_eval
start_time = observations[0]['time'] # first observation time
start_obs = observations[0]['observation'] # first observation
# for observations other than the starting observation, ask the model to predict the observation at that time interval
for obs in observations[1:]:
interval = obs['time'] # time interval to predict
# make prediction... as an example, we will use the original starting observation as the predicted observation for all intervals
pred_obs = start_obs
true_obs = obs['observation']
# evaluate accuracy of this observation
eval_dict = graph_eval(true_obs, pred_obs)
#print the evaluation metrics
print('Prediction for time interval', interval, '- ', end='')
for key, value in eval_dict.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
print(f'{key}_{sub_key}: {round(sub_value, 3)}, ', end='')
else:
print(f'{key}: {round(value, 3)}, ', end='')
print()
print()
############################################################################################################
# example code using forward function
# generate a dataset of 5 forward flow simulations with observations saved as torch data objects at time intervals 0, 1 and 4
output = forward(distance=[1, 4],
interval_lower=0,
obs_type='torch',
num_results=5,
graph_size=graph_size,
graph_beta=graph_beta,
inf_beta=infection_beta,
inf_gamma=infection_gamma)
# Display general information about these simulation results
print('Observations are of type:', type(output[0]['observations'][0]), end='\n\n')
for result in output:
observations = result['observations']
# print("observations", observations)
graph = result['base_graph']
sir_config = result['SIR_config'] # Use SIR_config here
# Retrieve beta and gamma values from the Configuration object
beta = sir_config.config["model"]["beta"]
gamma = sir_config.config["model"]["gamma"]
# Print SIR values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(beta, 3)}, gamma = {round(gamma, 3)}')
# Print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss.time_y}, ', end='') # Access the time attribute directly from the data object
print('\n')
# example code to use graph_eval
start_time = observations[0].time_y # first observation time
start_obs = observations[0].y.numpy() # first observation
# for observations other than the starting observation, ask the model to predict the observation at that time interval
for obs in observations[1:]:
interval = obs.time_y.item() # time interval to predict
# make prediction... as an example, we will use the original starting observation as the predicted observation for all intervals
pred_obs = start_obs
true_obs = obs.y.numpy()
# evaluate accuracy of this observation
eval_dict = graph_eval(true_obs, pred_obs)
# print the evaluation metrics
print('Prediction for time interval', interval, '- ', end='')
for key, value in eval_dict.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
print(f'{key}_{sub_key}: {round(sub_value, 3)}, ', end='')
else:
print(f'{key}: {round(value, 3)}, ', end='')
print()
print()
############################################################################################################
# Example code using forward function
# Generate a dataset of 5 forward flow simulations with observations saved as networkx graphs at time intervals t, t-1, t-4
output = forward(distance=[1, 4],
interval_lower=0,
obs_type='networkx',
num_results=5,
graph_size=graph_size,
graph_beta=graph_beta,
inf_beta=infection_beta,
inf_gamma=infection_gamma)
print('Observations are of type:', type(output[0]['observations'][0]), end='\n\n')
for result in output:
observations = result['observations']
graph = result['base_graph']
sir_config = result['SIR_config']
# Retrieve beta and gamma values from the Configuration object
if sir_config:
beta = sir_config.config["model"]["beta"]
gamma = sir_config.config["model"]["gamma"]
# Print SIR values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(beta, 3)}, gamma = {round(gamma, 3)}')
# Print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss.graph["time_y"]}, ', end='') # Access the time attribute directly from the graph metadata
print('\n')
# example code to use graph_eval
start_time = observations[0].graph['time_y'] # first observation time
start_obs = np.array(list(nx.get_node_attributes(observations[0], 'state_y').values())) # first observation
# for observations other than the starting observation, ask the model to predict the observation at that time interval
for obs in observations[1:]:
interval = obs.graph['time_y'] # time interval to predict
# make prediction... as an example, we will use the original starting observation as the predicted observation for all intervals
pred_obs = start_obs
true_obs = np.array(list(nx.get_node_attributes(obs, 'state_y').values()))
# evaluate accuracy of this observation
eval_dict = graph_eval(true_obs, pred_obs)
# print the evaluation metrics
print('Prediction for time interval', interval, '- ', end='')
for key, value in eval_dict.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
print(f'{key}_{sub_key}: {round(sub_value, 3)}, ', end='')
else:
print(f'{key}: {round(value, 3)}, ', end='')
print()
print()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/__init__.py | xflow/__init__.py | import xflow.method
import xflow.dataset
import xflow.diffusion
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/test_backwards.py | xflow/test_backwards.py | import numpy as np
import random
import networkx as nx
from networkx import Graph
from torch_geometric.data.data import Data
from . import xflow
from xflow.dataset.nx import connSW
from xflow.flow_tasks import forward, backward, graph_eval
# def main():
# print("Testing local XFlow package")
# if __name__ == "__main__":
# main()
graph_size = 1000
graph_beta = 0.1
infection_beta = None
infection_gamma = None
############################################################################################################
# example code using backward function
# generate a dataset of 5 backward flow simulations with observations saved as numpy lists at time intervals 1 and 4
output = backward(distance=[1, 4],
interval_lower=0,
obs_type='numpy',
num_results=5,
graph_size=graph_size,
graph_beta=graph_beta,
inf_beta=infection_beta,
inf_gamma=infection_gamma)
# display general information about these simulation results
print('Observations are of type:', type(output[0]['observations'][0]['observation']), end='\n\n')
for result in output:
observations = result['observations']
graph = result['base_graph']
sir_config = result['SIR_config'] # Use SIR_config here
# Retrieve beta and gamma values from the Configuration object
beta = sir_config.config["model"]["beta"]
gamma = sir_config.config["model"]["gamma"]
# Print SIR values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(beta, 3)}, gamma = {round(gamma, 3)}')
# print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss["time"]}, ', end='')
print('\n')
# example code to use graph_eval
start_time = observations[0]['time'] # first observation time
start_obs = observations[0]['observation'] # first observation
# for observations other than the starting observation, ask the model to predict the observation at that time interval
for obs in observations[1:]:
interval = obs['time'] # time interval to predict
# make prediction... as an example, we will use the original starting observation as the predicted observation for all intervals
pred_obs = start_obs
true_obs = obs['observation']
# evaluate accuracy of this observation
eval_dict = graph_eval(true_obs, pred_obs)
#print the evaluation metrics
print('Prediction for time interval', interval, '- ', end='')
for key, value in eval_dict.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
print(f'{key}_{sub_key}: {round(sub_value, 3)}, ', end='')
else:
print(f'{key}: {round(value, 3)}, ', end='')
print()
print()
############################################################################################################
# example code using backward function
# generate a dataset of 5 backward flow simulations with observations saved as torch data objects at time intervals 1 and 4
output = backward(distance=[1, 4],
interval_lower=0,
obs_type='torch',
num_results=5,
graph_size=graph_size,
graph_beta=graph_beta,
inf_beta=infection_beta,
inf_gamma=infection_gamma)
# Display general information about these simulation results
print('Observations are of type:', type(output[0]['observations'][0]), end='\n\n')
for result in output:
observations = result['observations']
graph = result['base_graph']
sir_config = result['SIR_config'] # Use SIR_config here
# Retrieve beta and gamma values from the Configuration object
beta = sir_config.config["model"]["beta"]
gamma = sir_config.config["model"]["gamma"]
# Print SIR values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(beta, 3)}, gamma = {round(gamma, 3)}')
# Print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss.time_y}, ', end='') # Access the time attribute directly from the data object
print('\n')
# example code to use graph_eval
start_time = observations[0].time_y # first observation time
start_obs = observations[0].y.numpy() # first observation
# for observations other than the starting observation, ask the model to predict the observation at that time interval
for obs in observations[1:]:
interval = obs.time_y.item() # time interval to predict
# make prediction... as an example, we will use the original starting observation as the predicted observation for all intervals
pred_obs = start_obs
true_obs = obs.y.numpy()
# evaluate accuracy of this observation
eval_dict = graph_eval(true_obs, pred_obs)
# print the evaluation metrics
print('Prediction for time interval', interval, '- ', end='')
for key, value in eval_dict.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
print(f'{key}_{sub_key}: {round(sub_value, 3)}, ', end='')
else:
print(f'{key}: {round(value, 3)}, ', end='')
print()
print()
############################################################################################################
# Example code using backward function
# Generate a dataset of 5 backward flow simulations with observations saved as networkx graphs at time intervals 1 and 4
output = backward(distance=[1, 4],
interval_lower=0,
obs_type='networkx',
num_results=5,
graph_size=graph_size,
graph_beta=graph_beta,
inf_beta=infection_beta,
inf_gamma=infection_gamma)
print('Observations are of type:', type(output[0]['observations'][0]), end='\n\n')
for result in output:
observations = result['observations']
graph = result['base_graph']
sir_config = result['SIR_config']
# Retrieve beta and gamma values from the Configuration object
if sir_config:
beta = sir_config.config["model"]["beta"]
gamma = sir_config.config["model"]["gamma"]
# Print SIR values for this result
print('SIR model has values: ', sep='', end='')
print(f'beta = {round(beta, 3)}, gamma = {round(gamma, 3)}')
# Print observation time intervals for this result
print('Observations at time intervals: ', sep='', end='')
for ss in observations:
print(f'{ss.graph["time_y"]}, ', end='') # Access the time attribute directly from the graph metadata
print('\n')
# example code to use graph_eval
start_time = observations[0].graph['time_y'] # first observation time
start_obs = np.array(list(nx.get_node_attributes(observations[0], 'state_y').values())) # first observation
# for observations other than the starting observation, ask the model to predict the observation at that time interval
for obs in observations[1:]:
interval = obs.graph['time_y'] # time interval to predict
# make prediction... as an example, we will use the original starting observation as the predicted observation for all intervals
pred_obs = start_obs
true_obs = np.array(list(nx.get_node_attributes(obs, 'state_y').values()))
# evaluate accuracy of this observation
eval_dict = graph_eval(true_obs, pred_obs)
# print the evaluation metrics
print('Prediction for time interval', interval, '- ', end='')
for key, value in eval_dict.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
print(f'{key}_{sub_key}: {round(sub_value, 3)}, ', end='')
else:
print(f'{key}: {round(value, 3)}, ', end='')
print()
print() | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/flow_tasks.py | xflow/flow_tasks.py | # pip dependencies
#!pip install torch
#!pip install torch_geometric==2.2.0
#!pip install xflow-net==0.0.21
#!pip install networkx
#!pip install ndlib
#!pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.2.0+cpu.html
#!pip install torch_geometric_temporal
# group 1
import numpy as np
import random
import networkx as nx
from networkx import Graph
from torch_geometric.data.data import Data, torch
import xflow
from xflow.dataset.nx import connSW
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
import warnings
from torch_geometric.utils.convert import from_networkx
from sklearn.metrics import classification_report, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
from torch_geometric.utils import to_networkx
def connSW(n, beta=None):
g = nx.connected_watts_strogatz_graph(n, 10, 0.1) #Generate connSW graph
config = mc.Configuration()
#add edge weights in ndlib config
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
if beta:
weight = beta
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def BA(n=1000, beta=None):
g = nx.barabasi_albert_graph(n, 5)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
if beta:
weight = beta
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def ER(n=5000, beta=None):
g = nx.erdos_renyi_graph(n, 0.1)
while nx.is_connected(g) == False:
g = nx.erdos_renyi_graph(n, 0.1)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
if beta:
weight = beta
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
graph_gen_dict = {'connSW':connSW, 'BA':BA, 'ER':ER} #dictionary of string to graph gen mappings, shpuld be a generator that
#returns g as networkx grapg and config as ndlib config
#and accepts values of n (nodes) and beta (edge weight override)
def setup(graph_kind, graph_size, graph_beta, inf_beta, inf_gamma, inf_initial_frac):
#generate a graph
for name, gen_function in graph_gen_dict.items():
if name == graph_kind:
(g, config) = gen_function(n=graph_size, beta=graph_beta)
break
#make sure graph gen was sucessful
if g is None:
raise Exception('Graph generation function not known.')
#sir model setup
if inf_beta is None:
inf_beta = random.uniform(0.01,0.06)
if inf_gamma is None:
inf_gamma = random.uniform(0.005,0.03)
if inf_initial_frac is None:
inf_initial_frac = random.uniform(0.02,0.05)
config.add_model_parameter('beta', inf_beta)
config.add_model_parameter('gamma', inf_gamma)
config.add_model_parameter("fraction_infected", inf_initial_frac)
model = ep.SIRModel(g)
model.set_initial_status(config)
return g,model,config
def run_sim(distance, interval_lower, g, model):
#choose timesteps, dont run longer than needed
diameter = nx.diameter(g)
dist_max = max(distance)
r = interval_lower
if r < 0:
if (diameter-dist_max <0):
warnings.warn(f'Distance of {dist_max} exceeds graph diameter of {diameter}.')
diameter = dist_max
if (diameter-dist_max ==0):
r=0
else:
r = random.randrange(diameter-dist_max) #choose random timesteps
#run the simulation
iterations = model.iteration_bunch(r+dist_max+1, node_status=True)
node_states_iterations = []
node_states = {}
for iteration in iterations:
if iteration['iteration'] == 0:
node_states = {node: status for node, status in iteration['status'].items()}
else:
for node, status in iteration['status'].items():
node_states[node] = status
node_states_iterations.append(node_states.copy())
#print(node_states_iterations)
return node_states_iterations
def format_sim_result(intervals, iterations, obs_type, g):
simulation_result = []
if obs_type == 'numpy':
# Get result at each timestep
for interval in intervals:
node_states = iterations[interval]
obs = np.array(list(node_states.values()))
# Build the snapshot using the observation at this iteration
snapshot = {
'time': interval,
'observation': obs
}
# Append the snapshot to sim result
simulation_result.append(snapshot)
return simulation_result
if obs_type == 'torch':
start = intervals[0]
node_states = iterations[start]
nx.set_node_attributes(g, node_states, name='state_x')
nx.set_node_attributes(g, start, name='time_x')
# Get result at each timestep
for interval in intervals:
node_states = iterations[interval]
nx.set_node_attributes(g, node_states, name='state_y')
nx.set_node_attributes(g, interval, name='time_y')
obs = from_networkx(g.copy(), group_node_attrs=['state_x', 'time_x', 'time_y'], group_edge_attrs=['weight'])
state = np.array(list(node_states.values()))
obs.y = torch.tensor(state)
obs.time_y = torch.tensor(interval) # Add the time_y attribute
# Append the snapshot to sim result
simulation_result.append(obs)
return simulation_result
if obs_type == 'networkx':
# Get result at each timestep
for interval in intervals:
node_states = iterations[interval]
g_copy = g.copy()
nx.set_node_attributes(g_copy, node_states, name='state_y')
g_copy.graph['time_y'] = interval # Store time in graph metadata
# Append the graph snapshot to sim result
simulation_result.append(g_copy)
return simulation_result
# raise Exception("Acceptable values for obs_storage are: 'numpy', 'networkx', 'torch'")
def forward(distance,
num_results=10,
obs_type = 'numpy',
graph_kind = 'connSW',
graph_size=1000,
graph_beta = None,
inf_beta = None,
inf_gamma = None,
inf_initial_frac = None,
interval_lower = -1,
):
if isinstance(distance,int):
distance = [distance]
#generate results
results = []
for n in range(num_results):
(g,inf_model, sir_config) = setup(graph_kind, graph_size, graph_beta, inf_beta, inf_gamma, inf_initial_frac)
iterations = run_sim(distance, interval_lower, g, inf_model)
intervals =[len(iterations) -max(distance)-1]
for d in distance:
intervals.append(intervals[0] + d)
simulation_result = format_sim_result(intervals, iterations, obs_type, g.copy())
#append two results that are distance apart
results.append({'observations': simulation_result,
'base_graph': g,
'SIR_config': sir_config})
return results
def backward(distance,
num_results=10,
obs_type = 'numpy',
graph_kind = 'connSW',
graph_size=1000,
graph_beta = None,
inf_beta = None,
inf_gamma = None,
inf_initial_frac = None,
interval_lower = -1,
):
if isinstance(distance,int):
distance = [distance]
#generate results
results = []
for n in range(num_results):
(g,inf_model, sir_config) = setup(graph_kind, graph_size, graph_beta, inf_beta, inf_gamma, inf_initial_frac)
iterations = run_sim(distance, interval_lower, g, inf_model)
intervals =[len(iterations)-1]
for d in distance:
intervals.append(intervals[0] - d)
simulation_result = format_sim_result(intervals, iterations, obs_type, g.copy())
#append observations, the starting graph, the starting SIR model
results.append({'observations': simulation_result,
'base_graph': g,
'SIR_config': sir_config})
return results
# def graph_eval(obs_true, obs_pred, display=False):
# from sklearn.metrics import classification_report, ConfusionMatrixDisplay
# import matplotlib.pyplot as plt
# if not isinstance(obs_true, np.ndarray) or not isinstance(obs_pred, np.ndarray):
# raise Exception('Observation inputs must be numpy arrays')
# cr = classification_report(obs_true, obs_pred, zero_division=0)
# if display:
# ConfusionMatrixDisplay.from_predictions(obs_true, obs_pred)
# plt.show()
# print(cr)
# return cr
def parse_classification_report(cr):
"""
Parse the sklearn classification report into a dictionary.
"""
report = {}
lines = cr.split('\n')
for line in lines[2:-3]:
row = line.split()
if len(row) < 2:
continue
if row[0] == 'accuracy':
report['accuracy'] = float(row[1])
else:
report[row[0]] = {'precision': float(row[1]), 'recall': float(row[2]), 'f1-score': float(row[3]), 'support': int(row[4])}
return report
def graph_eval(obs_true, obs_pred, display=False):
from sklearn.metrics import classification_report, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
if not isinstance(obs_true, np.ndarray) or not isinstance(obs_pred, np.ndarray):
raise Exception('Observation inputs must be numpy arrays')
cr = classification_report(obs_true, obs_pred, zero_division=0)
report_dict = parse_classification_report(cr)
if display:
ConfusionMatrixDisplay.from_predictions(obs_true, obs_pred)
plt.show()
print(cr)
return report_dict
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/visualization/multinet_vis.py | xflow/visualization/multinet_vis.py | # imports
import dash
import random
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import networkx as nx
import ndlib.models.epidemics as ep
from ndlib.models.ModelConfig import Configuration
import pandas as pd
from dash import dash_table
# - - - - - - - - - - - - - - - - - - - - -
# Set the number of simulation time steps
TIME_STEPS = 10
# - - - - - - - - - - - - - - - - - - - - -
def get_sir_model(graph, num_infected, beta, gamma):
"""Returns a configured SIR model for the given graph."""
model = ep.SIRModel(graph)
config = Configuration()
config.add_model_parameter("beta", beta)
config.add_model_parameter("gamma", gamma)
infected_nodes = random.sample(list(graph.nodes()), num_infected)
config.add_model_initial_configuration("Infected", infected_nodes)
model.set_initial_status(config)
return model
def run_sir_model(model, time_steps):
"""Runs the given SIR model for the given number of time steps."""
return model.iteration_bunch(time_steps)
# Create two random graphs with different numbers of nodes
network_layers = [nx.erdos_renyi_graph(20, 1), nx.erdos_renyi_graph(20, 1)]
# Assign random positions for the nodes in each network layer
for G in network_layers:
for node in G.nodes():
G.nodes[node]["pos"] = (random.uniform(-1, 1), random.uniform(-1, 1))
# Get some of the nodes in layer 0 and layer 1
nodes_pool_layer0 = int(len(network_layers[0].nodes()) * 1)
nodes_pool_layer1 = int(len(network_layers[1].nodes()) * 1)
# Randomly sample nodes from both layers
nodes_layer0_to_connect = random.sample(network_layers[0].nodes(), nodes_pool_layer0)
nodes_layer1_to_connect = random.sample(network_layers[1].nodes(), nodes_pool_layer1)
# Pair and connect selected nodes
for node0, node1 in zip(nodes_layer0_to_connect, nodes_layer1_to_connect):
network_layers[0].add_edge(node0, node1)
network_layers[1].add_edge(node0, node1)
# Initialize the app
app = dash.Dash(
__name__,
external_stylesheets=[
"https://cdn.jsdelivr.net/npm/bootstrap@4.3.1/dist/css/bootstrap.min.css",
'styles.css'
]
)
app.layout = html.Div(
[
# Left side of the screen (col-6)
html.Div([
# Controls and Status Information section
html.Div([
# Row for "Initial infected nodes" and DataTable
html.Div([
# Column for Initial infected nodes
html.Div([
# Label for Initial infected nodes
html.Div([
html.Label("Initial infected nodes:", style={"font-weight": "bold"}),
], className="col-12"), # This div ensures the label takes up the full width of its parent column
# Input for Initial infected nodes
html.Div([
dcc.Input(id="input-infected", type="number", value=1)
], className="col-12"), # This div ensures the input takes up the full width of its parent column
], className="col-6"), # This column takes up half of the left-hand side
# DataTable column with padding to the right
html.Div([
dash_table.DataTable(id="status-table"),
], className="col-6", style={"padding-right": "100px"}), # This column also takes up half of the left-hand side and has padding to the right
], className="row"),
# Beta (Infection rate) label and slider
html.Div([
html.Label("Beta (Infection rate):", style={"font-weight": "bold"}),
dcc.Slider(id="beta-slider", min=0, max=1, step=0.1, value=0.8)
], className="col-12"), # This div will ensure its content starts on a new line
# Gamma (Recovery rate) label and slider
html.Div([
html.Label("Gamma (Recovery rate):", style={"font-weight": "bold"}),
dcc.Slider(id="gamma-slider", min=0, max=1, step=0.1, value=0.01)
], className="col-12"),
# Time Step label and slider
html.Div([
html.Label("Time Step:", style={"font-weight": "bold"}),
dcc.Slider(
id="time-slider",
min=0,
max=TIME_STEPS - 1,
value=0,
marks={str(i): f"{i}" for i in range(TIME_STEPS)},
step=None,
)
], className="col-12"),
], style={"height": "400px", "width": "100%", "padding-top": "20px"}),
# Energy Plot section
html.Div([
dcc.Graph(id="energy-plot", style={"height": "550px", "width": "100%"})
], className="col-12"),
], className="col-4"),
# Right side of the screen (col-6)
html.Div([
# 3D Scatter Plot section
html.Div([
dcc.Graph(id="3d-scatter-plot", style={"height": "1200px", "width": "100%"})
], className="col-12"),
], className="col-8"),
],
className="row"
)
# for both table and graph
@app.callback(
Output("status-table", "data"),
Output("status-table", "columns"),
Output("3d-scatter-plot", "figure"),
Output("energy-plot", "figure"),
[
Input("time-slider", "value"),
Input("input-infected", "value"),
Input("beta-slider", "value"),
Input("gamma-slider", "value"),
],
)
def update_table_graph(time_step, num_infected, beta, gamma):
# Part 1
for layer in network_layers:
print("layer", layer)
model_results = run_simulations(network_layers, num_infected, beta, gamma, TIME_STEPS)
model_results_modified = model_results
if len(model_results) > 1:
# Extract the status of each node in layer 0 and layer 1 at the current time step
layer0_results = model_results[0]
layer1_results = model_results[1]
print("Status of each node in Layer 0:")
for result in layer0_results:
if 'status' in result:
for node_id, status in result['status'].items():
print(f"Node {node_id}: Status {status}")
print("\nStatus of each node in Layer 1:")
for result in layer1_results:
if 'status' in result:
for node_id, status in result['status'].items():
print(f"Node {node_id}: Status {status}")
# # Initialize status count dictionaries for each layer
status_counts_total = {"Susceptible": 0, "Infected": 0, "Recovered": 0}
status_counts_layer0 = {"Susceptible": 0, "Infected": 0, "Recovered": 0}
status_counts_layer1 = {"Susceptible": 0, "Infected": 0, "Recovered": 0}
# Compute the counts of each status at the current time step for each layer
for layer_index, result in enumerate(model_results):
for status, count in result[time_step]["node_count"].items():
if status == 0:
status_counts_total["Susceptible"] += count
# print("layer_index", layer_index)
if layer_index == 0:
status_counts_layer0["Susceptible"] += count
elif layer_index == 1:
status_counts_layer1["Susceptible"] += count
elif status == 1:
status_counts_total["Infected"] += count
if layer_index == 0:
status_counts_layer0["Infected"] += count
elif layer_index == 1:
status_counts_layer1["Infected"] += count
elif status == 2:
status_counts_total["Recovered"] += count
if layer_index == 0:
status_counts_layer0["Recovered"] += count
elif layer_index == 1:
status_counts_layer1["Recovered"] += count
# Create a DataFrame and format it for use with DataTable
df = pd.DataFrame([status_counts_total])
data = df.to_dict("records")
columns = [{"name": i, "id": i} for i in df.columns]
# Part 2
for result in model_results_modified:
# Initialize an empty dictionary for 'updated_status'
# to hold status values across iterations
updated_status = {}
for iteration in result:
if iteration['iteration'] == 0:
# For the first iteration, 'updated_status' is same as 'status'
iteration['updated_status'] = iteration['status'].copy()
else:
# For subsequent iterations, update 'updated_status' based on the 'status'
for key, value in iteration['status'].items():
updated_status[key] = value
iteration['updated_status'] = updated_status.copy()
# Update our ongoing 'updated_status' with the current 'updated_status'
updated_status = iteration['updated_status'].copy()
print('model_results_modified', model_results_modified)
model_results_diff = model_results_modified
for result in model_results_diff:
for iteration in result:
updated_status = iteration['updated_status']
energy_diff_count = 0
keys = list(updated_status.keys())
for i in range(len(keys)):
for j in range(i+1, len(keys)):
val_i = updated_status[keys[i]]
val_j = updated_status[keys[j]]
# If val_i or val_j is 2, consider it as 0
if val_i == 2:
val_i = 0
if val_j == 2:
val_j = 0
# Check if they're different
if val_i != val_j:
energy_diff_count += 1
iteration['energy_diff_count'] = energy_diff_count
print('model_results_diff', model_results_diff)
model_results_self = model_results_diff
for result in model_results_self:
for iteration in result:
updated_status = iteration['updated_status']
energy_self_count = sum([1 for value in updated_status.values() if value == 1])
iteration['energy_self_count'] = energy_self_count
print('model_results_self', model_results_self)
layer2_results = []
for iteration in range(len(model_results_self[0])): # Assuming both lists have the same length
layer2_energy_self_count = 0
layer2_energy_diff_count = 0
# Check self energy for nodes in layer 0
for node in nodes_layer0_to_connect:
if model_results_self[0][iteration]["updated_status"].get(node) == 1:
layer2_energy_self_count += 1
# Check self energy for nodes in layer 1
for node in nodes_layer1_to_connect:
if model_results_self[1][iteration]["updated_status"].get(node) == 1:
layer2_energy_self_count += 1
# Check diff energy between pairs of nodes
for node0, node1 in zip(nodes_layer0_to_connect, nodes_layer1_to_connect):
status_node0 = model_results_self[0][iteration]["updated_status"].get(node0)
status_node1 = model_results_self[1][iteration]["updated_status"].get(node1)
# Check the cases
if (status_node0 == 1 and status_node1 in [0, 2]) or (status_node1 == 1 and status_node0 in [0, 2]):
layer2_energy_diff_count += 1
# Store the results
layer2_results.append({
'iteration': iteration,
'layer2_energy_self_count': layer2_energy_self_count,
'layer2_energy_diff_count': layer2_energy_diff_count
})
print('layer2_results', layer2_results)
graph_data = []
# Create traces for edges and nodes
for idx, network in enumerate(network_layers):
edge_color = "pink" if idx == 0 else "lightblue" if idx == 1 else "#888" # Default color is #888
edge_trace = go.Scatter3d(
x=[],
y=[],
z=[],
line={"width": 0.5, "color": edge_color},
hoverinfo="none",
mode="lines",
name="Edges" # Assign a name for the edges trace
)
node_trace = go.Scatter3d(
x=[],
y=[],
z=[],
mode="markers",
hoverinfo="text",
marker={
"showscale": False,
"colorscale": "Viridis",
"reversescale": True,
"color": [],
"size": 6,
"opacity": 0.8,
"line": {"width": 0.5, "color": "#888"},
},
name="Nodes" # Assign a name for the nodes trace
)
# Add edges to trace
for edge in network.edges():
x0, y0 = network.nodes[edge[0]]["pos"]
x1, y1 = network.nodes[edge[1]]["pos"]
edge_trace["x"] += (x0, x1, None)
edge_trace["y"] += (y0, y1, None)
edge_trace["z"] += (idx, idx, None)
# Add nodes to trace
for node in network.nodes:
x, y = network.nodes[node]["pos"]
node_trace["x"] += (x,)
node_trace["y"] += (y,)
node_trace["z"] += (idx,)
status = 0
if node in model_results_self[idx][time_step]["updated_status"]:
status = model_results_self[idx][time_step]["updated_status"][node]
color = (
"red" if status == 1 else "green" if status == 2 else "grey"
) # Color based on the infection status
node_trace["marker"]["color"] += (color,)
graph_data.extend((edge_trace, node_trace))
# Add inter-layer edges to trace
inter_edge_trace = go.Scatter3d(
x=[],
y=[],
z=[],
line={"width": 0.5, "color": "#888"},
hoverinfo="none",
mode="lines",
name="Inter-Layer-Edges"
)
# Add inter-layer edges to trace
for node0, node1 in zip(network_layers[0].nodes(), network_layers[1].nodes()):
x0, y0 = network_layers[0].nodes[node0]["pos"]
x1, y1 = network_layers[1].nodes[node1]["pos"]
inter_edge_trace["x"] += (x0, x1, None)
inter_edge_trace["y"] += (y0, y1, None)
inter_edge_trace["z"] += (0, 1, None)
graph_data.append(inter_edge_trace)
# Define layout
layout = go.Layout(
scene=dict(
xaxis=dict(title="", showticklabels=False, range=[-1, 1], autorange=False, zeroline=False, showline=False, showbackground=False, showgrid=False),
yaxis=dict(title="", showticklabels=False, range=[-1, 1], autorange=False, zeroline=False, showline=False, showbackground=False, showgrid=False),
zaxis=dict(title="", showticklabels=False, range=[-1, 1], autorange=False, zeroline=False, showline=False, showbackground=False, showgrid=False),
aspectratio=dict(x=1, y=1, z=1),
camera=dict(eye=dict(x=1.2, y=1.2, z=1.2)),
),
legend=dict(
x=0.5,
y=1.1,
xanchor='center',
yanchor='middle',
orientation='h',
font=dict(
size=20
)
)
)
figure = {"data": graph_data, "layout": layout}
# Extracting the data
iterations = [entry['iteration'] for entry in model_results_self[0]]
y_values_0 = [entry['energy_diff_count'] + entry['energy_self_count'] for entry in model_results_self[0]]
y_values_1 = [entry['energy_diff_count'] + entry['energy_self_count'] for entry in model_results_self[1]]
y_values_2 = [entry['layer2_energy_self_count'] + entry['layer2_energy_diff_count'] for entry in layer2_results]
# Plotting the data
energy = go.Figure()
# blue
energy.add_trace(go.Scatter(x=iterations, y=y_values_0, mode='lines', name='Lower Layer', fill='tozeroy', fillcolor='rgba(173, 216, 230, 0.3)'))
# red
energy.add_trace(go.Scatter(x=iterations, y=y_values_1, mode='lines', name='Upper Layer', fill='tozeroy', fillcolor='rgba(255, 182, 193, 0.3)'))
# green
energy.add_trace(go.Scatter(x=iterations, y=y_values_2, mode='lines', name='Inter-Layer', fill='tozeroy', fillcolor='rgba(144, 238, 144, 0.3)'))
energy.update_layout(title='Energy vs. Time Step',
xaxis_title='Time Step',
yaxis_title='Energy',
title_x=0.5) # This centers the title
energy.update_layout(
legend=dict(
x=0.5, # this centers the legend horizontally
y=1.1, # this positions the legend just above the chart
xanchor='center', # this is to ensure the 'x' value refers to the center of the legend
orientation='h' # this ensures the legend items are arranged horizontally
)
)
return data, columns, figure, energy
def run_simulations(network_layers, num_infected, beta, gamma, TIME_STEPS):
models = [get_sir_model(layer, num_infected, beta, gamma) for layer in network_layers]
return [run_sir_model(model, TIME_STEPS) for model in models]
if __name__ == "__main__":
app.run_server(debug=True) | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/visualization/entropy_demo_2.py | xflow/visualization/entropy_demo_2.py | # -*- coding: utf-8 -*-
"""IJCAI Demo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Rslj_mz0mLHeSsNpEqWY7bneliHBmLhD
"""
# !pip install -U dash==1.19.0
# !pip install --upgrade dash werkzeug
# !pip install ndlib
import dash
from dash import dcc, html
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import networkx as nx
import numpy as np
import random
import scipy.stats
from scipy.stats import entropy
import time
# Function to create a small-world graph
def create_small_world_graph(n=1500, k=5, p=0.3):
"""
Creates a small world graph using the Watts-Strogatz model.
n: number of nodes
k: Each node is connected to k nearest neighbors in ring topology
p: The probability of rewiring each edge
"""
return nx.watts_strogatz_graph(n, k, p)
# Function for the SIR model simulation
def SIR_simulation(graph, beta=0.5, gamma=0.05, initial_infected=1, max_steps=1000, interval=10):
"""
Simulates the SIR model on a graph.
graph: networkx graph
beta: infection rate
gamma: recovery rate
initial_infected: number of initially infected nodes
max_steps: maximum number of steps to simulate
"""
# Initialize statuses
susceptible = set(graph.nodes())
infected = set(random.sample(graph.nodes(), initial_infected))
recovered = set()
# Records for plotting
S, I, R = [], [], []
SI, IR, SR, SS, II, RR = [], [], [], [], [], []
status_record = []
for step in range(max_steps):
# Update statuses
new_infected = set()
new_recovered = set()
# Check infection spread
for i in infected:
for neighbor in graph.neighbors(i):
if neighbor in susceptible and random.random() < beta:
new_infected.add(neighbor)
# Update recovered
for i in infected:
if random.random() < gamma:
new_recovered.add(i)
# Update sets
infected |= new_infected
infected -= new_recovered
recovered |= new_recovered
susceptible -= new_infected
# Update records
S.append(len(susceptible))
I.append(len(infected))
R.append(len(recovered))
# Record status at specified interval
if step % interval == 0:
status_record.append({'S': set(susceptible), 'I': set(infected), 'R': set(recovered)})
# Edge counts
ss, ii, rr, si, ir, sr = 0, 0, 0, 0, 0, 0
for u, v in graph.edges():
u_status = 'S' if u in susceptible else 'I' if u in infected else 'R'
v_status = 'S' if v in susceptible else 'I' if v in infected else 'R'
if u_status == 'S' and v_status == 'S': ss += 1
elif u_status == 'I' and v_status == 'I': ii += 1
elif u_status == 'R' and v_status == 'R': rr += 1
elif (u_status == 'S' and v_status == 'I') or (u_status == 'I' and v_status == 'S'): si += 1
elif (u_status == 'I' and v_status == 'R') or (u_status == 'R' and v_status == 'I'): ir += 1
elif (u_status == 'S' and v_status == 'R') or (u_status == 'R' and v_status == 'S'): sr += 1
SI.append(si)
IR.append(ir)
SR.append(sr)
SS.append(ss)
II.append(ii)
RR.append(rr)
# End if no infected left
if not infected:
break
return S, I, R, status_record, SI, IR, SR, SS, II, RR
# Function to calculate entropy
def calculate_entropy(counts):
total = sum(counts)
if total == 0:
return 0
probabilities = [c / total for c in counts]
return entropy(probabilities, base=2)
def plot_networkx_graph(graph, status=None):
pos = nx.spring_layout(graph) # Generate positions for all nodes
# Initialize node color list
node_color = []
if status is None:
node_color = ['gray' for node in graph.nodes()] # Default color if no status is provided
else:
# Assign colors based on SIR status
for node in graph.nodes():
if node in status['S']:
node_color.append('blue') # Color for Susceptible
elif node in status['I']:
node_color.append('red') # Color for Infected
elif node in status['R']:
node_color.append('green') # Color for Recovered
else:
node_color.append('black') # Color for nodes not in S, I, or R
# Plotting edges (same as before)
edge_x = []
edge_y = []
for edge in graph.edges():
# x0, y0 = pos[edge[0]]
# x1, y1 = pos[edge[1]]
# edge_x.append(x0)
# edge_x.append(x1)
# edge_x.append(None)
# edge_y.append(y0)
# edge_y.append(y1)
# edge_y.append(None)
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
edge_x += [x0, x1, None]
edge_y += [y0, y1, None]
# Plotting nodes
node_x = [pos[node][0] for node in graph.nodes()]
node_y = [pos[node][1] for node in graph.nodes()]
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=False,
color=node_color, # Use node_color list for coloring nodes
size=10,
line_width=2),
showlegend=False # Set showlegend to False for the nodes trace
)
# Define legend labels
legend_labels = ['Susceptible (S)', 'Infected (I)', 'Recovered (R)', 'Other']
fig = go.Figure(data=[
go.Scatter(x=edge_x, y=edge_y, line=dict(width=0.5, color='#888'), hoverinfo='none', mode='lines', showlegend=False),
node_trace
],
layout=go.Layout(
showlegend=True,
hovermode='closest',
margin=dict(b=0, l=0, r=0, t=0),
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
legend=dict(x=0.01, y=0.99, borderwidth=1, itemsizing='constant', itemwidth=30,
font=dict(family="Courier", size=12, color="black")),
legend_title_text='SIR Status',
legend_traceorder='normal',
legend_bgcolor='LightSteelBlue',
legend_borderwidth=2,
legend_bordercolor='Black',
legend_itemsizing='constant',
legend_itemwidth=30,
legend_font=dict(family="Courier", size=12, color="black"),
legend_title=dict(text="SIR Status", font=dict(family="Courier", size=12, color="black"))
))
# Add legend items with consistent styling
for color, label in zip(['blue', 'red', 'green', 'black'], legend_labels):
fig.add_trace(go.Scatter(x=[None], y=[None], mode='markers',
marker=dict(size=10, color=color),
showlegend=True, name=label))
return fig
# Generate a small-world graph
graph = create_small_world_graph()
# SIR model simulation
S, I, R, status_record, SI, IR, SR, SS, II, RR = SIR_simulation(graph)
# Combine SIR status information into a dictionary
status = {'S': set(S), 'I': set(I), 'R': set(R)}
# Entropy calculations
entropy_S = [calculate_entropy([s, sum(I) - i, sum(R) - r]) for s, i, r in zip(S, I, R)]
entropy_I = [calculate_entropy([sum(S) - s, i, sum(R) - r]) for s, i, r in zip(S, I, R)]
entropy_R = [calculate_entropy([sum(S) - s, sum(I) - i, r]) for s, i, r in zip(S, I, R)]
combined_entropy = [calculate_entropy([s, i, r]) for s, i, r in zip(S, I, R)]
combined_SR_entropy = [calculate_entropy([s + r, i]) for s, i, r in zip(S, I, R)]
# Eigenvalues of graph Laplacian and adjacency matrix
# laplacian_eigenvalues = nx.laplacian_spectrum(graph)
# adjacency_eigenvalues = nx.adjacency_eigenvalues(graph)
L = nx.normalized_laplacian_matrix(graph)
laplacian_eigenvalues = np.linalg.eigvals(L.toarray())
adjacency_eigenvalues = nx.adjacency_spectrum(graph)
# Initialize Dash app
app = dash.Dash(__name__)
# Dash app layout with two columns
app.layout = html.Div([
html.H1('Network Analysis with SIR Model 2'),
dcc.Store(id='interval-index', data=0), # Store for current interval index
# Row for content
html.Div([
# Left column for textual content or additional information
html.Div([
# Generate the plot with SIR status information
dcc.Graph(id='network-graph', figure=plot_networkx_graph(graph, status))
# dcc.Graph(id='network-graph', figure=plot_networkx_graph(graph))
# ], className="six columns"),
], style={'width': '50%', 'display': 'inline-block'}), # Inline style for left column
# Right column with embedded two columns
html.Div([
# Embedded left column within the right column
html.Div([
dcc.Graph(
id='sum-of-energy',
figure={
'data': [
go.Scatter(y=np.array(SI) + np.array(IR), mode='lines', name='Neighbor-Energy'),
go.Scatter(y=I, mode='lines', name='Self-Energy')
],
'layout': go.Layout(
title='Sum of Energy Over Time',
xaxis={'title': 'Step'},
yaxis={'title': 'Number of Edges'},
legend=dict(x=0.01, y=0.99, bordercolor="Black", borderwidth=1) # Adjust these values as needed
)
}
),
dcc.Graph(
id='sir-model',
figure={
'data': [
go.Scatter(y=S, mode='lines', name='Susceptible'),
go.Scatter(y=I, mode='lines', name='Infected'),
go.Scatter(y=R, mode='lines', name='Recovered')
],
'layout': go.Layout(
title='SIR Model Over Time',
xaxis={'title': 'Step'},
yaxis={'title': 'Number of Nodes'},
legend=dict(x=0.01, y=0.99, bordercolor="Black", borderwidth=1) # Adjust these values as needed
)
}
),
], style={'width': '50%', 'display': 'inline-block'}), # Inline style for embedded left column
# Embedded right column within the right column
html.Div([
dcc.Graph(
id='entropy-separate-statuses',
figure={
'data': [
go.Scatter(y=entropy_S, mode='lines', name='Entropy of S', line=dict(color='blue')),
go.Scatter(y=entropy_I, mode='lines', name='Entropy of I', line=dict(color='red')),
go.Scatter(y=entropy_R, mode='lines', name='Entropy of R', line=dict(color='green'))
],
'layout': go.Layout(
title='Entropy of Separate Statuses Over Time',
xaxis={'title': 'Step'},
yaxis={'title': 'Entropy'},
# legend={'x': 1, 'y': 1}
legend=dict(x=0.01, y=0.99, bordercolor="Black", borderwidth=1) # Adjust these values as needed
)
}
),
dcc.Graph(
id='combined-entropy',
figure={
'data': [
go.Scatter(y=combined_entropy, mode='lines', name='Combined Entropy', line=dict(color='purple'))
],
'layout': go.Layout(
title='Entropy of All Statuses Over Time',
xaxis={'title': 'Step'},
yaxis={'title': 'Entropy'},
# legend={'x': 1, 'y': 1}
legend=dict(x=0.01, y=0.99, bordercolor="Black", borderwidth=1) # Adjust these values as needed
)
}
)
], style={'width': '50%', 'display': 'inline-block'}), # Inline style for embedded right column
], style={'display': 'flex', 'width': '50%'}), # Style for the right column to display its children inline
], style={'display': 'flex'}), # This ensures that the divs are placed side by side
# Interval component to trigger updates
dcc.Interval(
id='interval-component',
interval=1000, # in milliseconds
n_intervals=0
)
])
# Define callback to update network plot based on interval and interval index
@app.callback(
[Output('network-graph', 'figure'),
Output('interval-index', 'data')],
[Input('interval-component', 'n_intervals')],
[State('interval-index', 'data')]
)
def update_network_plot_and_index(n_intervals, interval_index):
# Check if it's time to update based on interval index
if n_intervals == interval_index:
# Increment interval index
new_index = min(interval_index + 1, len(status_record) - 1)
return plot_networkx_graph(graph, status_record[n_intervals - 1]), new_index
else:
# Return previous plot and interval index
return dash.no_update, interval_index
# Run the app
if __name__ == '__main__':
app.run_server(debug=True)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/visualization/entropy_demo.py | xflow/visualization/entropy_demo.py | # -*- coding: utf-8 -*-
"""IJCAI Demo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Rslj_mz0mLHeSsNpEqWY7bneliHBmLhD
"""
# !pip install -U dash==1.19.0
# !pip install --upgrade dash werkzeug
# !pip install ndlib
import dash
from dash import dcc, html
import plotly.graph_objs as go
from dash.dependencies import Input, Output
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import random
import scipy.stats
from scipy.stats import entropy
# Function to create a small-world graph
def create_small_world_graph(n=1500, k=5, p=0.3):
"""
Creates a small world graph using the Watts-Strogatz model.
n: number of nodes
k: Each node is connected to k nearest neighbors in ring topology
p: The probability of rewiring each edge
"""
return nx.watts_strogatz_graph(n, k, p)
# Function for the SIR model simulation
def SIR_simulation(graph, beta=0.5, gamma=0.05, initial_infected=1, max_steps=50):
"""
Simulates the SIR model on a graph.
graph: networkx graph
beta: infection rate
gamma: recovery rate
initial_infected: number of initially infected nodes
max_steps: maximum number of steps to simulate
"""
# Initialize statuses
susceptible = set(graph.nodes())
infected = set(random.sample(graph.nodes(), initial_infected))
recovered = set()
# Records for plotting
S, I, R = [], [], []
SI, IR, SR, SS, II, RR = [], [], [], [], [], []
for _ in range(max_steps):
# Update statuses
new_infected = set()
new_recovered = set()
# Check infection spread
for i in infected:
for neighbor in graph.neighbors(i):
if neighbor in susceptible and random.random() < beta:
new_infected.add(neighbor)
# Update recovered
for i in infected:
if random.random() < gamma:
new_recovered.add(i)
# Update sets
infected |= new_infected
infected -= new_recovered
recovered |= new_recovered
susceptible -= new_infected
# Update records
S.append(len(susceptible))
I.append(len(infected))
R.append(len(recovered))
# Edge counts
ss, ii, rr, si, ir, sr = 0, 0, 0, 0, 0, 0
for u, v in graph.edges():
u_status = 'S' if u in susceptible else 'I' if u in infected else 'R'
v_status = 'S' if v in susceptible else 'I' if v in infected else 'R'
if u_status == 'S' and v_status == 'S': ss += 1
elif u_status == 'I' and v_status == 'I': ii += 1
elif u_status == 'R' and v_status == 'R': rr += 1
elif (u_status == 'S' and v_status == 'I') or (u_status == 'I' and v_status == 'S'): si += 1
elif (u_status == 'I' and v_status == 'R') or (u_status == 'R' and v_status == 'I'): ir += 1
elif (u_status == 'S' and v_status == 'R') or (u_status == 'R' and v_status == 'S'): sr += 1
SI.append(si)
IR.append(ir)
SR.append(sr)
SS.append(ss)
II.append(ii)
RR.append(rr)
# End if no infected left
if not infected:
break
return S, I, R, SI, IR, SR, SS, II, RR
# Function to calculate entropy
def calculate_entropy(counts):
total = sum(counts)
if total == 0:
return 0
probabilities = [c / total for c in counts]
return entropy(probabilities, base=2)
def plot_networkx_graph(graph, status=None):
pos = nx.spring_layout(graph) # Generate positions for all nodes
# Initialize node color list
node_color = []
if status is None:
node_color = ['gray' for node in graph.nodes()] # Default color if no status is provided
else:
# Assign colors based on SIR status
for node in graph.nodes():
if node in status['S']:
node_color.append('blue') # Color for Susceptible
elif node in status['I']:
node_color.append('red') # Color for Infected
elif node in status['R']:
node_color.append('green') # Color for Recovered
else:
node_color.append('black') # Color for nodes not in S, I, or R
# Plotting edges (same as before)
edge_x = []
edge_y = []
for edge in graph.edges():
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
# Plotting nodes
node_x = [pos[node][0] for node in graph.nodes()]
node_y = [pos[node][1] for node in graph.nodes()]
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=False,
color=node_color, # Use node_color list for coloring nodes
size=10,
line_width=2),
showlegend=False # Set showlegend to False for the nodes trace
)
# Define legend labels
legend_labels = ['Susceptible (S)', 'Infected (I)', 'Recovered (R)', 'Other']
fig = go.Figure(data=[
go.Scatter(x=edge_x, y=edge_y, line=dict(width=0.5, color='#888'), hoverinfo='none', mode='lines', showlegend=False),
node_trace
],
layout=go.Layout(
showlegend=True,
hovermode='closest',
margin=dict(b=0, l=0, r=0, t=0),
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
legend=dict(x=0.01, y=0.99, borderwidth=1, itemsizing='constant', itemwidth=30,
font=dict(family="Courier", size=12, color="black")),
legend_title_text='SIR Status',
legend_traceorder='normal',
legend_bgcolor='LightSteelBlue',
legend_borderwidth=2,
legend_bordercolor='Black',
legend_itemsizing='constant',
legend_itemwidth=30,
legend_font=dict(family="Courier", size=12, color="black"),
legend_title=dict(text="SIR Status", font=dict(family="Courier", size=12, color="black"))
))
# Add legend items with consistent styling
for color, label in zip(['blue', 'red', 'green', 'black'], legend_labels):
fig.add_trace(go.Scatter(x=[None], y=[None], mode='markers',
marker=dict(size=10, color=color),
showlegend=True, name=label))
return fig
# Generate a small-world graph
graph = create_small_world_graph()
# SIR model simulation
S, I, R, SI, IR, SR, SS, II, RR = SIR_simulation(graph)
# Combine SIR status information into a dictionary
status = {'S': set(S), 'I': set(I), 'R': set(R)}
# Entropy calculations
entropy_S = [calculate_entropy([s, sum(I) - i, sum(R) - r]) for s, i, r in zip(S, I, R)]
entropy_I = [calculate_entropy([sum(S) - s, i, sum(R) - r]) for s, i, r in zip(S, I, R)]
entropy_R = [calculate_entropy([sum(S) - s, sum(I) - i, r]) for s, i, r in zip(S, I, R)]
combined_entropy = [calculate_entropy([s, i, r]) for s, i, r in zip(S, I, R)]
combined_SR_entropy = [calculate_entropy([s + r, i]) for s, i, r in zip(S, I, R)]
# Eigenvalues of graph Laplacian and adjacency matrix
# laplacian_eigenvalues = nx.laplacian_spectrum(graph)
# adjacency_eigenvalues = nx.adjacency_eigenvalues(graph)
L = nx.normalized_laplacian_matrix(graph)
laplacian_eigenvalues = np.linalg.eigvals(L.toarray())
adjacency_eigenvalues = nx.adjacency_spectrum(graph)
# Initialize Dash app
app = dash.Dash(__name__)
# Dash app layout with two columns
app.layout = html.Div([
html.H1('Network Analysis with SIR Model'),
# Row for content
html.Div([
# Left column for textual content or additional information
html.Div([
# Generate the plot with SIR status information
dcc.Graph(id='network-graph', figure=plot_networkx_graph(graph, status))
# dcc.Graph(id='network-graph', figure=plot_networkx_graph(graph))
# ], className="six columns"),
], style={'width': '50%', 'display': 'inline-block'}), # Inline style for left column
# Right column with embedded two columns
html.Div([
# Embedded left column within the right column
html.Div([
dcc.Graph(
id='sum-of-energy',
figure={
'data': [
go.Scatter(y=np.array(SI) + np.array(IR), mode='lines', name='Neighbor-Energy'),
go.Scatter(y=I, mode='lines', name='Self-Energy')
],
'layout': go.Layout(
title='Sum of Energy Over Time',
xaxis={'title': 'Step'},
yaxis={'title': 'Number of Edges'},
legend=dict(x=0.01, y=0.99, bordercolor="Black", borderwidth=1) # Adjust these values as needed
)
}
),
dcc.Graph(
id='sir-model',
figure={
'data': [
go.Scatter(y=S, mode='lines', name='Susceptible'),
go.Scatter(y=I, mode='lines', name='Infected'),
go.Scatter(y=R, mode='lines', name='Recovered')
],
'layout': go.Layout(
title='SIR Model Over Time',
xaxis={'title': 'Step'},
yaxis={'title': 'Number of Nodes'},
legend=dict(x=0.01, y=0.99, bordercolor="Black", borderwidth=1) # Adjust these values as needed
)
}
),
], style={'width': '50%', 'display': 'inline-block'}), # Inline style for embedded left column
# Embedded right column within the right column
html.Div([
dcc.Graph(
id='entropy-separate-statuses',
figure={
'data': [
go.Scatter(y=entropy_S, mode='lines', name='Entropy of S', line=dict(color='blue')),
go.Scatter(y=entropy_I, mode='lines', name='Entropy of I', line=dict(color='red')),
go.Scatter(y=entropy_R, mode='lines', name='Entropy of R', line=dict(color='green'))
],
'layout': go.Layout(
title='Entropy of Separate Statuses Over Time',
xaxis={'title': 'Step'},
yaxis={'title': 'Entropy'},
# legend={'x': 1, 'y': 1}
legend=dict(x=0.01, y=0.99, bordercolor="Black", borderwidth=1) # Adjust these values as needed
)
}
),
dcc.Graph(
id='combined-entropy',
figure={
'data': [
go.Scatter(y=combined_entropy, mode='lines', name='Combined Entropy', line=dict(color='purple'))
],
'layout': go.Layout(
title='Entropy of All Statuses Over Time',
xaxis={'title': 'Step'},
yaxis={'title': 'Entropy'},
# legend={'x': 1, 'y': 1}
legend=dict(x=0.01, y=0.99, bordercolor="Black", borderwidth=1) # Adjust these values as needed
)
}
)
], style={'width': '50%', 'display': 'inline-block'}), # Inline style for embedded right column
], style={'display': 'flex', 'width': '50%'}), # Style for the right column to display its children inline
], style={'display': 'flex'}) # This ensures that the divs are placed side by side
])
# Run the app
if __name__ == '__main__':
app.run_server(debug=True)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/ibm.py | xflow/method/ibm.py | import networkx as nx
import numpy as np
import ndlib
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
import statistics as s
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
import random
from xflow.diffusion.SI import SI
from xflow.diffusion.IC import IC
from xflow.diffusion.LT import LT
# random
# baselines: simulation based
# greedy
def greedy(g, config, budget, seeds, rounds=100, model='SI', beta=0.1):
selected = []
candidates = list(g.nodes())
for i in range(budget):
min = float('inf')
index = -1
for node in candidates:
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
removed = selected + [node]
for node in removed:
g_greedy.remove_node(node)
if (model == "IC"):
result = IC(g_greedy, config, seeds, rounds)
if (model == "LT"):
result = LT(g_greedy, config, seeds, rounds)
if (model == "SI"):
result = SI(g_greedy, config, seeds, rounds, beta)
if s.mean(result) < min:
min = s.mean(result)
index = node
selected.append(index)
candidates.remove(index)
print(selected)
return selected
# baselines: proxy based
# eigen centrality
def eigen(g, config, budget):
g_eig = g.__class__()
g_eig.add_nodes_from(g)
g_eig.add_edges_from(g.edges)
for a, b in g_eig.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_eig[a][b]['weight'] = weight
eig = []
for k in range(budget):
eigen = nx.eigenvector_centrality_numpy(g_eig)
selected = sorted(eigen, key=eigen.get, reverse=True)[0]
eig.append(selected)
g_eig.remove_node(selected)
print(eig)
return eig
# degree
def degree(g, config, budget):
g_deg = g.__class__()
g_deg.add_nodes_from(g)
g_deg.add_edges_from(g.edges)
for a, b in g_deg.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_deg[a][b]['weight'] = weight
deg = []
for k in range(budget):
degree = nx.centrality.degree_centrality(g_deg)
selected = sorted(degree, key=degree.get, reverse=True)[0]
deg.append(selected)
g_deg.remove_node(selected)
print(deg)
return deg
# pi
def pi(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
result = []
for k in range(budget):
n = g_greedy.number_of_nodes()
I = np.ones((n, 1))
C = np.ones((n, n))
N = np.ones((n, n))
A = nx.convert_matrix.to_numpy_array(g_greedy, nodelist=list(g_greedy.nodes()))
for i in range(5):
B = np.power(A, i + 1)
D = C - B
N = np.multiply(N, D)
P = C - N
pi = np.matmul(P, I)
value = {}
for i in range(n):
value[list(g_greedy.nodes())[i]] = pi[i, 0]
selected = sorted(value, key=value.get, reverse=True)[0]
result.append(selected)
g_greedy.remove_node(selected)
print(result)
return result
# sigma
def sigma(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
result = []
for k in range(budget):
n = g_greedy.number_of_nodes()
I = np.ones((n, 1))
F = np.ones((n, n))
N = np.ones((n, n))
A = nx.convert_matrix.to_numpy_array(g, nodelist=g_greedy.nodes())
sigma = I
for i in range(5):
B = np.power(A, i + 1)
C = np.matmul(B, I)
sigma += C
value = {}
for i in range(n):
value[list(g_greedy.nodes())[i]] = sigma[i, 0]
selected = sorted(value, key=value.get, reverse=True)[0]
result.append(selected)
g_greedy.remove_node(selected)
print(result)
return result
def Netshield(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
A = nx.adjacency_matrix(g_greedy)
lam, u = np.linalg.eigh(A.toarray())
lam = list(lam)
lam = lam[-1]
u = u[:, -1]
u = np.abs(np.real(u).flatten())
v = (2 * lam * np.ones(len(u))) * np.power(u, 2)
nodes = []
for i in range(budget):
B = A[:, nodes]
b = B * u[nodes]
score = v - 2 * b * u
score[nodes] = -1
nodes.append(np.argmax(score))
print(nodes)
return nodes | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/im.py | xflow/method/im.py | import networkx as nx
import numpy as np
import ndlib
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
import statistics as s
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
from random import uniform, seed
from collections import Counter
import operator
import copy
from xflow.diffusion.SI import SI
from xflow.diffusion.IC import IC
from xflow.diffusion.LT import LT
# random
# baselines: simulation based
# greedy
def greedy(g, config, budget, rounds=100, model='SI', beta=0.1):
selected = []
candidates = list(g.nodes())
for i in range(budget):
max = 0
index = -1
for node in candidates:
seeds = selected + [node]
if (model == "IC"):
result = IC(g, config, seeds, rounds)
elif (model == "LT"):
result = LT(g, config, seeds, rounds)
elif (model == "SI"):
result = SI(g, config, seeds, rounds, beta)
if s.mean(result) > max:
max = s.mean(result)
index = node
selected.append(index)
candidates.remove(index)
print(selected)
return selected
def celf(g, config, budget, rounds=100, model='SI', beta=0.1):
# Find the first node with greedy algorithm
# Compute marginal gain for each node
candidates = list(g.nodes())
#, start_time = list(g.nodes()), time.time()
# step 1, call a diffusion function, get the result of list
# step 2, calculate the margin gain
if (model == "IC"):
marg_gain = [s.mean(IC(g, config, [node])) for node in candidates]
elif (model == "LT"):
marg_gain = [s.mean(LT(g, config, [node])) for node in candidates]
elif (model == "SI"):
marg_gain = [s.mean(SI(g, config, [node], beta)) for node in candidates]
# Create the sorted list of nodes and their marginal gain
Q = sorted(zip(candidates,marg_gain), key = lambda x: x[1],reverse=True)
# Select the first node and remove from candidate list
selected, spread, Q = [Q[0][0]], Q[0][1], Q[1:]
# Find the next budget-1 nodes using the CELF list-sorting procedure
for _ in range(budget-1):
check = False
while not check:
# Recalculate spread of top node
current = Q[0][0]
# Evaluate the spread function and store the marginal gain in the list
if (model == "IC"):
Q[0] = (current, s.mean(IC(g, config, selected+[current]), rounds) - spread)
elif (model == "LT"):
Q[0] = (current, s.mean(LT(g, config, selected+[current]), rounds) - spread)
elif (model == "SI"):
Q[0] = (current, s.mean(SI(g, config, selected+[current]), rounds, beta) - spread)
# Re-sort the list
Q = sorted(Q, key = lambda x: x[1], reverse=True)
# Check if previous top node stayed on top after the sort
check = Q[0][0] == current
# Select the next node
selected.append(Q[0][0])
spread = Q[0][1]
# Remove the selected node from the list
Q = Q[1:]
print(selected)
return(selected)
def celfpp(g, config, budget, rounds=100, model='SI', beta=0.1):
# Compute marginal gain for each node
candidates = list(g.nodes())
if (model == "IC"):
marg_gain = [s.mean(IC(g, config, [node], rounds)) for node in candidates]
elif (model == "LT"):
marg_gain = [s.mean(LT(g, config, [node], rounds)) for node in candidates]
elif (model == "SI"):
marg_gain = [s.mean(SI(g, config, [node], rounds, beta)) for node in candidates]
# Create the sorted list of nodes and their marginal gain
Q = sorted(zip(candidates, marg_gain), key = lambda x: x[1], reverse=True)
# Select the first node and remove from candidate list
selected, spread, Q = [Q[0][0]], Q[0][1], Q[1:]
# Initialize last_seed as the first selected node
last_seed = selected[0]
# Find the next budget-1 nodes using the CELF++ procedure
for _ in range(budget - 1):
check = False
while not check:
# Get current node and its previous computed marginal gain
current, old_gain = Q[0][0], Q[0][1]
# Check if the last added seed has changed
if current != last_seed:
# Compute new marginal gain
if (model == "IC"):
new_gain = s.mean(IC(g, config, selected+[current], rounds)) - spread
elif (model == "LT"):
new_gain = s.mean(LT(g, config, selected+[current], rounds)) - spread
elif (model == "SI"):
new_gain = s.mean(SI(g, config, selected+[current], rounds, beta)) - spread
else:
# If the last added seed hasn't changed, the marginal gain remains the same
new_gain = old_gain
# Update the marginal gain of the current node
Q[0] = (current, new_gain)
# Re-sort the list
Q = sorted(Q, key = lambda x: x[1], reverse=True)
# Check if previous top node stayed on top after the sort
check = Q[0][0] == current
# Select the next node
selected.append(Q[0][0])
spread += Q[0][1] # Update the spread
last_seed = Q[0][0] # Update the last added seed
# Remove the selected node from the list
Q = Q[1:]
print(selected)
return selected
# baselines: proxy based
# eigen centrality
def eigen(g, config, budget):
g_eig = g.__class__()
g_eig.add_nodes_from(g)
g_eig.add_edges_from(g.edges)
for a, b in g_eig.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_eig[a][b]['weight'] = weight
eig = []
for k in range(budget):
eigen = nx.eigenvector_centrality_numpy(g_eig)
selected = sorted(eigen, key=eigen.get, reverse=True)[0]
eig.append(selected)
g_eig.remove_node(selected)
print(eig)
return eig
# degree
def degree(g, config, budget):
g_deg = g.__class__()
g_deg.add_nodes_from(g)
g_deg.add_edges_from(g.edges)
for a, b in g_deg.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_deg[a][b]['weight'] = weight
deg = []
for k in range(budget):
degree = nx.centrality.degree_centrality(g_deg)
selected = sorted(degree, key=degree.get, reverse=True)[0]
deg.append(selected)
g_deg.remove_node(selected)
print(deg)
return deg
# pi
def pi(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
result = []
for k in range(budget):
n = g_greedy.number_of_nodes()
I = np.ones((n, 1))
C = np.ones((n, n))
N = np.ones((n, n))
A = nx.convert_matrix.to_numpy_array(g_greedy, nodelist=list(g_greedy.nodes()))
for i in range(5):
B = np.power(A, i + 1)
D = C - B
N = np.multiply(N, D)
P = C - N
pi = np.matmul(P, I)
value = {}
for i in range(n):
value[list(g_greedy.nodes())[i]] = pi[i, 0]
selected = sorted(value, key=value.get, reverse=True)[0]
result.append(selected)
g_greedy.remove_node(selected)
print(result)
return result
# sigma
def sigma(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
result = []
for k in range(budget):
n = g_greedy.number_of_nodes()
I = np.ones((n, 1))
F = np.ones((n, n))
N = np.ones((n, n))
A = nx.convert_matrix.to_numpy_array(g, nodelist=g_greedy.nodes())
sigma = I
for i in range(5):
B = np.power(A, i + 1)
C = np.matmul(B, I)
sigma += C
value = {}
for i in range(n):
value[list(g_greedy.nodes())[i]] = sigma[i, 0]
selected = sorted(value, key=value.get, reverse=True)[0]
result.append(selected)
g_greedy.remove_node(selected)
print(result)
return result
def Netshield(g, config, budget):
g_greedy = g.__class__()
g_greedy.add_nodes_from(g)
g_greedy.add_edges_from(g.edges)
for a, b in g_greedy.edges():
weight = config.config["edges"]['threshold'][(a, b)]
g_greedy[a][b]['weight'] = weight
A = nx.adjacency_matrix(g_greedy)
lam, u = np.linalg.eigh(A.toarray())
lam = list(lam)
lam = lam[-1]
u = u[:, -1]
u = np.abs(np.real(u).flatten())
v = (2 * lam * np.ones(len(u))) * np.power(u, 2)
nodes = []
for i in range(budget):
B = A[:, nodes]
b = B * u[nodes]
score = v - 2 * b * u
score[nodes] = -1
nodes.append(np.argmax(score))
print(nodes)
return nodes
# IMRank
# https://github.com/Braylon1002/IMTool
def IMRank(g, config, budget):
"""
IMRank algorithm to rank the nodes based on their influence.
"""
# Obtain adjacency matrix from the graph
adjacency_matrix = nx.adjacency_matrix(g).todense()
# Normalize the adjacency matrix
row_sums = adjacency_matrix.sum(axis=1)
# Check for zero entries in row_sums (which could correspond to isolated nodes)
# and replace them with 1 to prevent division by zero errors
row_sums[row_sums == 0] = 1
adjacency_matrix = adjacency_matrix / row_sums
start = time.perf_counter()
t = 0
r0 = [i for i in range(len(adjacency_matrix))]
r = [0 for i in range(len(adjacency_matrix))]
# Loop until the ranks converge
while True:
t = t + 1
r = LFA(adjacency_matrix)
r = np.argsort(-np.array(r))
if operator.eq(list(r0), list(r)):
break
r0 = copy.copy(r)
# Select top nodes up to the budget
selected = r[:budget].tolist()
print(selected)
return selected
# baselines: sketch based
#RIS
# https://github.com/Braylon1002/IMTool
def RIS(g, config, budget, rounds=100):
# mc = 100
# Generate mc RRSs
R = [get_RRS(g, config) for _ in range(rounds)]
selected = []
for _ in range(budget):
# Collect all nodes from all RRSs
flat_map = [item for subset in R for item in subset]
# Only proceed if there are nodes in the flat_map
if flat_map:
seed = Counter(flat_map).most_common()[0][0]
selected.append(seed)
R = [rrs for rrs in R if seed not in rrs]
# For every removed RRS, generate a new one
while len(R) < rounds:
R.append(get_RRS(g, config))
print(selected)
return (selected)
# def IMM(g, config, budget, rounds=100, model='SI', beta=0.1):
# l = 1
# epsilon = 0.1
# l = l * (1 + np.log(2) / np.log(len(g.nodes()))) # Update l
# k = budget
# R = Sampling(g, config, epsilon, l, model, rounds, beta)
# S = NodeSelection(R, k)
# print(S)
# return S
# def Sampling(g, config, epsilon, l, model='SI', rounds=100, beta=0.1):
# R = []
# n = len(g.nodes())
# LB = 1
# eps_prime = np.sqrt(2) * epsilon
# for i in range(1, int(np.log2(n))):
# x = n / (2 ** i)
# theta_i = (l / eps_prime ** 2) * np.log(n) / x
# while len(R) <= theta_i:
# v = random.choice(list(g.nodes()))
# R.append(get_RRS(g, config))
# S_i = NodeSelection(R, int(x)) # Changed budget to int(x) here
# if n * len(S_i) >= (1 + eps_prime) * x:
# LB = n * len(S_i) / (1 + eps_prime)
# break
# theta = (l / (epsilon ** 2)) * np.log(n) / LB
# while len(R) <= theta:
# v = random.choice(list(g.nodes()))
# R.append(get_RRS(g, config))
# return R
# def NodeSelection(R, k):
# S = []
# RR_sets_covered = set()
# for _ in range(k):
# max_spread = 0
# best_node = None
# for v in set().union(*R):
# if v not in S:
# RR_sets_can_cover = sum([v in RR for RR in R if tuple(RR) not in RR_sets_covered])
# if RR_sets_can_cover > max_spread:
# max_spread = RR_sets_can_cover
# best_node = v
# if best_node is not None:
# S.append(best_node)
# RR_sets_covered |= set([tuple(RR) for RR in R if best_node in RR])
# else:
# print("No suitable node found!")
# return S
# updates to the Mr vector occur simultaneously:
def LFA(matrix):
"""
Linear Feedback Algorithm to update the ranks of the nodes.
"""
n = len(matrix)
Mr = [1 for _ in range(n)]
Mr_next = Mr.copy()
for i_ in range(1, n):
i = n - i_
for j in range(0, i + 1):
Mr_next[j] = Mr_next[j] + matrix[j][i] * Mr[i]
Mr_next[i] = (1 - matrix[j][i]) * Mr_next[i]
Mr = Mr_next.copy()
return Mr
def get_RRS(g, config):
"""
Inputs: g: Network graph
config: Configuration object for the IC model
Outputs: A random reverse reachable set expressed as a list of nodes
"""
# get edges according to the propagation probability
edges = [(u, v) for (u, v, d) in g.edges(data=True) if uniform(0, 1) < config.config["edges"]['threshold'][(u, v)]]
# create a subgraph based on the edges
g_sub = g.edge_subgraph(edges)
# select a random node as the starting point that is part of the subgraph
source = random.choice(list(g_sub.nodes()))
# perform a depth-first traversal from the source node to get the RRS
RRS = list(nx.dfs_preorder_nodes(g_sub, source))
return RRS
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/__init__.py | xflow/method/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/sl.py | xflow/method/sl.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/__init__.py | xflow/method/cosasi/__init__.py | from .contagion import *
from .source_inference import *
from .benchmark import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/benchmark/benchmark.py | xflow/method/cosasi/benchmark/benchmark.py | import random
import os, sys
import json
sys.path.insert(0, os.getcwd())
import numpy as np
import networkx as nx
import cosasi
MODULE_PATH = __file__[: -len("benchmark.py")]
MODULE_PATH = (
MODULE_PATH
if len(MODULE_PATH) > 0 and (MODULE_PATH[-1] == "/" or MODULE_PATH[-1] == "\\")
else MODULE_PATH + "/"
)
ALGORITHMS_PATH = MODULE_PATH[: -len("benchmark/")] + "source_inference/"
def _get_relevant_namespaces(
source_type=None, information_type="single snapshot", epidemic_model=None
):
"""Retrieves the functional names of all applicable source inference algorithms.
Parameters
----------
source_type : str or None (optional)
one of None, "single-source", or "multi-source"
If None, we consider any source type
information_type : str
describes the information the source inference algorithm receives
e.g. "single snapshot"
epidemic_model : str or None (optional)
specifies the epidemic model, e.g. SI, SIS, SIR
if None, ignores this constraint
"""
valid_namespaces = []
algorithms_dict = json.load(open(ALGORITHMS_PATH + "algorithm_details.json"))
if isinstance(source_type, type(None)):
source_type_iter = list(algorithms_dict.keys())
else:
source_type_iter = [source_type]
for source_type in source_type_iter:
for alg_name in algorithms_dict[source_type]:
if not isinstance(epidemic_model, type(None)):
if (
epidemic_model.lower()
not in algorithms_dict[source_type][alg_name]["epidemic model"]
):
continue
if (
algorithms_dict[source_type][alg_name]["information type"]
== information_type
and algorithms_dict[source_type][alg_name]["status"] == "complete"
):
valid_namespaces.append(
eval(algorithms_dict[source_type][alg_name]["namespace"])
)
return valid_namespaces
def _get_namespace_params(name, return_defaults=True):
"""Retrieves the names of the parameters and their default values.
Parameters
----------
name : function
function namespace
return_defaults : bool
if True, also includes
"""
arg_num = name.__code__.co_argcount
param_names = name.__code__.co_varnames[:arg_num]
if not return_defaults:
return param_names
params = {}
if isinstance(name.__defaults__, type(None)):
defaults = []
else:
defaults = list(name.__defaults__)[::-1]
param_names = param_names[::-1]
for i in range(len(param_names)):
if i < len(defaults):
arg = defaults[i]
else:
arg = ""
params[param_names[i]] = arg
return params
def _execute_algorithm_from_namespace(name, what_we_know):
"""Runs a source inference algorithm, passing what we know as arguments.
Parameters
----------
name : function
function namespace
what_we_know : dict
dictionary of arguments we want to pass to the algorithm
"""
function_args = _get_namespace_params(name=name, return_defaults=True)
for param in what_we_know:
if param in function_args:
function_args[param] = what_we_know[param]
if "" in function_args.values():
raise ValueError(
"Insufficient arguments provided.",
function_args,
what_we_know,
name,
name.__defaults__,
)
return name(**function_args)
class BenchmarkFromDetails:
"""Benchmarking tool using provided class args to pass to algorithms when available.
Parameters
----------
true_source : node or tuple of nodes
the true source of the diffusion process
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
information_type : str
describes the information the source inference algorithm receives
e.g. "single snapshot"
I : NetworkX Graph
The infection subgraph observed at a particular time step
t : int
the timestep corresponding to I
observer_dict : dict or None (optional)
takes a dict of observers and the timestamps at which they become infected.
epidemic_model : str or None (optional)
specifies the epidemic model, e.g. SI, SIS, SIR
if None, ignores this constraint
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
infection_rate : float or None (optional)
Inter-node infection efficiency. If a float, must be in [0,1]
if None, ignores this parameter
"""
def __init__(
self,
true_source,
G,
information_type,
I=None,
t=None,
observer_dict=None,
epidemic_model=None,
number_sources=None,
infection_rate=None,
):
"""Benchmarking tool using provided class args to pass to algorithms when available.
Parameters
----------
true_source : node or tuple of nodes
the true source of the diffusion process
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
information_type : str
describes the information the source inference algorithm receives
e.g. "single snapshot"
I : NetworkX Graph
The infection subgraph observed at a particular time step
t : int
the timestep corresponding to I
observer_dict : dict or None (optional)
takes a dict of observers and the timestamps at which they become infected.
epidemic_model : str or None (optional)
specifies the epidemic model, e.g. SI, SIS, SIR
if None, ignores this constraint
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
infection_rate : float or None (optional)
Inter-node infection efficiency. If a float, must be in [0,1]
if None, ignores this parameter
"""
self.epidemic_model = epidemic_model
self.number_sources = number_sources
self.information_type = information_type
if isinstance(t, (int, float, type(None))):
self.t = t
else:
raise ValueError("Time parameter must be an integer or float or None")
self.observer_dict = observer_dict
if information_type == "single snapshot" and (
isinstance(I, type(None)) or isinstance(t, type(None))
):
raise ValueError(
"If information type is single snapshot, we need the infection subgraph and its corresponding timestep"
)
if information_type == "observers" and (isinstance(observer_dict, type(None))):
raise ValueError(
"If the information type is observers, we need the observer_dict"
)
if isinstance(G, nx.classes.graph.Graph):
self.G = G
else:
raise ValueError("G must be a NetworkX graph.")
if all(v in G for v in true_source):
self.true_source = true_source
elif true_source in G:
self.true_source = true_source
else:
raise ValueError("All members of true_source must be in G.")
if isinstance(I, (nx.classes.graph.Graph, type(None))):
self.I = I
else:
raise ValueError("I must be a NetworkX graph.")
if (
isinstance(infection_rate, float) and 0.0 <= infection_rate <= 1.0
) or isinstance(infection_rate, type(None)):
self.infection_rate = infection_rate
else:
raise ValueError("Infection rate must be a float between 0 and 1.")
self.namespaces = self.get_namespaces()
return None
def get_namespaces(self):
"""Finds all source localization algorithms applicable to the contagion task
specified in the class constructor.
"""
if isinstance(self.number_sources, type(None)):
source_type = None
elif self.number_sources > 1:
source_type = "multi-source"
elif self.number_sources == 1:
source_type = "single-source"
else:
raise NotImplementedError
namespaces = _get_relevant_namespaces(
source_type=source_type,
information_type=self.information_type,
epidemic_model=self.epidemic_model,
)
return namespaces
def go(self):
"""Runs all available algorithms with the information we have on hand."""
result_dict = {}
what_we_know = {
"G": self.G,
"I": self.I,
"observer_dict": self.observer_dict,
"t": self.t,
"number_sources": self.number_sources,
}
for alg in self.namespaces:
result = _execute_algorithm_from_namespace(
name=alg, what_we_know=what_we_know
)
inference_method = result.data["inference method"]["name"]
source_type = result.data["inference method"]["source_type"]
result_dict[source_type + " " + inference_method] = {
"source result": result,
"evaluation": result.evaluate(true_source=self.true_source),
}
return result_dict
class BenchmarkFromSimulation:
"""Benchmarking tool using provided simulation object to pass to algorithms when available.
Parameters
----------
contagion : cosasi.contagion.static_network_contagion.StaticNetworkContagion
an already-run contagion object
t : int
the timestep corresponding to I
information_type : str or None (optional)
describes the information the source inference algorithm receives
e.g. "single snapshot"
observers : int or list
If int, observers specifies the number of observation nodes
If list, observers specifies the observation nodes directly
"""
def __init__(self, contagion, t=None, information_type=None, observers=None):
"""Benchmarking tool using provided simulation object to pass to algorithms when available.
Parameters
----------
contagion : cosasi.contagion.static_network_contagion.StaticNetworkContagion
an already-run contagion object
t : int
the timestep corresponding to I
information_type : str or None (optional)
describes the information the source inference algorithm receives
e.g. "single snapshot"
observers : int or list
If int, observers specifies the number of observation nodes
If list, observers specifies the observation nodes directly
"""
true_source = contagion.get_source()
if information_type == "single snapshot":
if isinstance(t, type(None)):
raise ValueError("If information type is snapshot, t is required")
if not isinstance(t, int):
raise ValueError("t must be an int")
self.benchmarker = BenchmarkFromDetails(
true_source=true_source,
G=contagion.G,
I=contagion.get_infected_subgraph(step=t),
t=t,
epidemic_model=contagion.model,
number_sources=len(true_source),
information_type=information_type,
infection_rate=contagion.infection_rate,
)
elif information_type == "observers":
if isinstance(observers, type(None)):
raise ValueError(
"If information type is observers, the number of observers is required"
)
if not isinstance(observers, (int, list)):
raise ValueError("observers must be an int or a list")
self.benchmarker = BenchmarkFromDetails(
true_source=true_source,
G=contagion.G,
observer_dict=contagion.get_observers(observers=observers),
epidemic_model=contagion.model,
number_sources=len(true_source),
information_type=information_type,
infection_rate=contagion.infection_rate,
)
else:
raise NotImplementedError
return None
def go(self):
"""Runs all available algorithms with the information we have on hand."""
return self.benchmarker.go()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/benchmark/__init__.py | xflow/method/cosasi/benchmark/__init__.py | from .benchmark import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/benchmark/tests/__init__.py | xflow/method/cosasi/benchmark/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/benchmark/tests/test_benchmark.py | xflow/method/cosasi/benchmark/tests/test_benchmark.py | import os, sys
sys.path.insert(0, os.getcwd())
import pytest
from unittest import TestCase
import networkx as nx
import numpy as np
import cosasi
class Test_BenchmarkFromSimulation(TestCase):
def setUp(self):
self.number_infected_init = 3
self.sim_steps = 100
self.G = nx.fast_gnp_random_graph(100, 0.25)
self.contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.01,
number_infected=self.number_infected_init,
)
self.contagion.forward(self.sim_steps)
self.t = 15
return None
def test_inputs_contagion(self):
with pytest.raises((AttributeError, ValueError)):
cosasi.BenchmarkFromSimulation(
contagion="BAD INPUT", information_type="single snapshot", t=self.t
)
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="single snapshot", t=self.t
)
benchmark.go()
assert True
def test_inputs_information_type(self):
with pytest.raises(NotImplementedError):
cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="BAD INPUT", t=self.t
)
with pytest.raises(ValueError):
cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="observers", t=self.t
)
with pytest.raises(ValueError):
cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="observers",
t=self.t,
observers="BAD INPUT",
)
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="observers",
t=self.t,
observers=2,
)
# benchmark.go()
assert True
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="observers",
t=self.t,
observers=[0, 1],
)
# benchmark.go()
assert True
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="single snapshot", t=self.t
)
benchmark.go()
assert True
def test_inputs_t(self):
with pytest.raises(ValueError):
cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="single snapshot",
t="BAD INPUT",
)
with pytest.raises(ValueError):
# invalid step
cosasi.BenchmarkFromSimulation(
contagion=self.contagion,
information_type="single snapshot",
t=self.sim_steps + 1,
)
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="single snapshot", t=self.t
)
benchmark.go()
assert True
def test_go_output(self):
benchmark = cosasi.BenchmarkFromSimulation(
contagion=self.contagion, information_type="single snapshot", t=self.t
)
results = benchmark.go()
assert isinstance(results, dict)
results_keys = results.keys()
assert all(isinstance(k, str) for k in results_keys)
assert all(isinstance(results[k], dict) for k in results_keys)
assert all(
isinstance(
results[k]["source result"],
(
cosasi.source_inference.source_results.SingleSourceResult,
cosasi.source_inference.source_results.MultiSourceResult,
),
)
for k in results_keys
)
class Test_BenchmarkFromDetails(TestCase):
def setUp(self):
self.number_infected_init = 3
self.sim_steps = 100
self.G = nx.fast_gnp_random_graph(100, 0.25)
self.contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.01,
number_infected=self.number_infected_init,
)
self.contagion.forward(self.sim_steps)
self.t = 15
self.I = self.contagion.get_infected_subgraph(step=self.t)
self.true_source = self.contagion.get_source()
return None
def test_inputs_true_source(self):
with pytest.raises(ValueError):
benchmark = cosasi.BenchmarkFromDetails(
true_source="BAD INPUT",
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_inputs_G(self):
with pytest.raises(ValueError):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G="BAD INPUT",
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_inputs_I(self):
with pytest.raises(ValueError):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I="BAD INPUT",
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_inputs_t(self):
with pytest.raises(ValueError):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t="BAD INPUT",
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_inputs_number_sources(self):
with pytest.raises(TypeError):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources="BAD INPUT",
information_type="single snapshot",
)
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=self.number_infected_init,
information_type="single snapshot",
)
benchmark.go()
assert True
def test_get_namespaces(self):
# single source should not have any multisource algorithms
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=1,
information_type="single snapshot",
)
namespaces = [n.__name__ for n in benchmark.get_namespaces()]
for n in namespaces:
if "fast_multisource" in n:
assert False
assert True
# multi-source should have multisource algorithms
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=3,
information_type="single snapshot",
)
namespaces = [n.__name__ for n in benchmark.get_namespaces()]
temp = False
for n in namespaces:
if "fast_multisource" in n:
temp = True
break
assert temp
def test_go_output(self):
benchmark = cosasi.BenchmarkFromDetails(
true_source=self.true_source,
G=self.G,
I=self.I,
t=self.t,
number_sources=None,
information_type="single snapshot",
)
results = benchmark.go()
assert isinstance(results, dict)
results_keys = results.keys()
assert all(isinstance(k, str) for k in results_keys)
assert all(isinstance(results[k], dict) for k in results_keys)
assert all(
isinstance(
results[k]["source result"],
(
cosasi.source_inference.source_results.SingleSourceResult,
cosasi.source_inference.source_results.MultiSourceResult,
),
)
for k in results_keys
)
def test_go_with_observers(self):
G = nx.fast_gnp_random_graph(100, 0.25)
contagion = cosasi.StaticNetworkContagion(
G=G, model="si", infection_rate=0.01, number_infected=1
)
contagion.forward(100)
I = contagion.get_infected_subgraph(step=25)
observers = contagion.get_observers(10)
true_source = contagion.get_source()
benchmark = cosasi.BenchmarkFromDetails(
true_source=true_source,
G=G,
I=I,
t=15,
number_sources=len(true_source),
information_type="observers",
observer_dict=observers,
)
results = benchmark.go()
assert "single-source earliest infection first" in results.keys()
assert "single-source lisn" not in results.keys()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/utils/estimators.py | xflow/method/cosasi/utils/estimators.py | import math
import random
import warnings
import scipy
import numpy as np
import networkx as nx
from sklearn.cluster import SpectralClustering
from .helpers import attack_degree, attack_degree_partition
from ..source_inference.multiple_source import netsleuth
def source_subgraphs(I, number_sources=2):
"""Subdivides the provided graph into specified number of subgraphs
via spectral clustering.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
number_sources : int
The hypothesized number of infection sources
"""
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
I_node_list = list(I.nodes)
A = nx.adjacency_matrix(I)
subgraphs = []
sc = SpectralClustering(number_sources, affinity="precomputed", n_init=100)
sc.fit(scipy.sparse.csr_matrix(A))
subgraph_labels = sc.labels_
unique_subgraph_labels = set(subgraph_labels)
for i in unique_subgraph_labels:
subgraph_nodes = [I_node_list[j] for j in np.where(subgraph_labels == i)[0]]
subgraphs.append(I.subgraph(subgraph_nodes))
return subgraphs
def number_sources(
I,
number_sources=None,
return_source_subgraphs=True,
number_sources_method="eigengap",
G=None,
):
"""Manages source subgraph estimation, mostly via spectral analysis and clustering.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources via Eigengap heuristic
return_source_subgraphs : bool
if True, returns subgraphs of I corresponding to each hypothesized infection source
if False, does not return subgraphs of I corresponding to each hypothesized infection source
number_sources_method : str
method for estimating the number of sources. one of the following options:
- "eigengap" : uses the Eigengap of the normalized graph Laplacian to estimate the number of clusters
- "netsleuth" : runs the multi-source NETSLEUTH algorithm and reports the number of seeds
- "chatter" : invokes a spectral method based on the Chatter algorithm
if number_sources != None, this doesn't do anything
G : NetworkX Graph (optional)
the original network the contagion process was run on
generally optional (e.g. not needed for eigengap), occassionally required (e.g. needed for netsleuth)
Notes
-----
If the diffusion process is brief or observation is early, and infection sources
are sufficiently sparse, then the infected subgraphs corresponding to each infection
source may be the connected components of the input graph. This is described in
Section 2.6 of [1]_.
We estimate the number of infection sources by the minimum of the number of connected
components and the Eigengap heuristic of the provided graph. The Eigengap heuristic
is described in [2]_.
With a hypothesized number of infection sources in hand, we partition the graph via
spectral clustering to provide a list of subgraphs corresponding to each infection
source [3]_.
References
----------
.. [1] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
.. [2] U. von Luxburg,
"A Tutorial on Spectral Clustering"
Statistics and Computing, 2007
https://link.springer.com/article/10.1007/s11222-007-9033-z
.. [3] A. Damle and V. Minden and L. Ying
"Simple, direct and efficient multi-way spectral clustering"
Information and Inference: A Journal of the IMA, 2019
https://academic.oup.com/imaiai/article/8/1/181/5045955
"""
if isinstance(number_sources, int):
if return_source_subgraphs:
return number_sources, source_subgraphs(I, number_sources=number_sources)
else:
return number_sources
elif isinstance(number_sources, type(None)):
if number_sources_method.lower() == "eigengap":
m = eigengap(I)
elif number_sources_method.lower() == "netsleuth":
if isinstance(G, type(None)):
raise ValueError("Need `G` for NETSLEUTH method.")
netsleuth_result = netsleuth(I=I, G=G, hypotheses_per_step=1)
m = len(netsleuth_result.topn(1)[0])
elif number_sources_method.lower() == "chatter":
if isinstance(G, type(None)):
raise ValueError("Need `G` for chatter method.")
m = chatter(I, G)
else:
raise NotImplementedError
if m <= nx.number_connected_components(I):
subgraphs = [I.subgraph(c) for c in nx.connected_components(I)]
m = len(subgraphs)
else:
subgraphs = source_subgraphs(I, number_sources=m)
if return_source_subgraphs:
return m, subgraphs
else:
return m
else:
raise ValueError("number_sources not recognized: must be an integer or None.")
def chatter(I, G):
"""Estimates the number of sources of a graph diffusion process via the Chatter algorithm.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The graph the diffusion process was originally run on
"""
# T = list(I.nodes)
# S = [v for v in G if v not in T]
# frontier = nx.node_boundary(G=G, nbunch1=S, nbunch2=T)
# frontier_idx = [T.index(v) for v in frontier]
freq = chatter_frequency(I)
np.fill_diagonal(freq, 0)
w, v = np.linalg.eig(freq)
return int(np.argmax((1 / (w + 1))[1 : len(I)]) + 1)
def eigengap(G):
"""Returns the estimated number of clusters of G, based on the Eigengap
of the normalized graph Laplacian.
Parameters
----------
G : NetworkX Graph
The graph to analyze
Notes
-----
The Eigengap heuristic is described in [1]_.
References
----------
.. [1] U. von Luxburg,
"A Tutorial on Spectral Clustering"
Statistics and Computing, 2007
https://link.springer.com/article/10.1007/s11222-007-9033-z
"""
warnings.filterwarnings("ignore", category=FutureWarning)
L = nx.normalized_laplacian_matrix(G).toarray()
eigenvalues, eigenvectors = np.linalg.eig(L)
eigenvalues.sort()
k = np.argmax(np.diff(eigenvalues)) + 1
return k
def bits_encode_integer(n):
"""Estimates the number of bits required to encode an integer n>=1.
Parameters
----------
n : int
an integer at least 1
Notes
-----
Calculation is from Section 4.1 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
if n < 1:
raise ValueError("n must be at least 1")
l = math.log(2.865064)
c = math.log(n)
while c > 0:
l += c
c = math.log(c)
return l
def bits_encode_seed(s, G):
"""Number of bits required to identify a seed set (hypothesized
infection source set).
Parameters
----------
s : array-like
seed set
G : NetworkX Graph
The original graph the infection process was run on.
Notes
-----
Calculation is from Section 4.1 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
n = len(s)
return bits_encode_integer(n) + math.log(math.comb(len(G), n))
def bits_encode_ripple(s, G, beta=0.01):
"""Total description length of a seed set and its corresponding maximum likelihood
propagation ripple.
Parameters
----------
s : array-like
seed set
G : NetworkX Graph
The original graph the infection process was run on.
beta : float
infection probability
Notes
-----
Calculation is from Section 4.3 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
def probability_infection(m_d, f_d, d):
"""Probability of m_d nodes being infected in a subset of the frontier.
Parameters
----------
m_d : int
number of nodes infected
f_d :
number of nodes in a frontier subset F_d
d : int
degree
Notes
-----
Calculation is from Section 4.2.3 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
p_d = 1 - (1 - beta) ** d # attack probability in the set
return math.comb(f_d, m_d) * (p_d**m_d) * (1 - p_d) ** (f_d - m_d)
def l_frontier(f, infected=s):
"""Calculates the code length for encoding the infectious in the frontier
set at a snapshot of time.
Parameters
----------
f : array-like
frontier set
infected : array-like
infected nodes
Notes
-----
Calculation is Equation 3 from Section 4.2.4 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
l = 0
partition = attack_degree_partition(f, infected, G)
for d in partition:
f_d = len(partition[d])
m_d = int(min(math.floor(p_d * (f_d + 1)), f_d))
if m_d == 0 or f_d == 0:
continue
l -= (
math.log(probability_infection(m_d, f_d, d))
+ m_d * math.log(m_d / f_d)
+ (f_d - m_d) * math.log(1 - m_d / f_d)
)
return l
infected = s
frontier = set([j for i in infected for j in G.neighbors(i) if j not in infected])
bits_ripple = 0
t = 0 # index starts at 0 per p. 42 / Section 4.2.2
while len(frontier) > 0 and len(infected) < len(G):
# ripple step, get new frontier
partition = attack_degree_partition(frontier, infected, G)
for d in partition:
f_d = len(partition[d])
p_d = 1 - (1 - beta) ** d # attack probability in the set
n_d = math.floor((f_d / beta + 1) * p_d)
infected += random.sample(partition[d], min(n_d, f_d))
frontier = set(
[j for i in infected for j in G.neighbors(i) if j not in infected]
)
infected = list(set(infected))
bits_ripple += l_frontier(frontier, infected)
t += 1
return bits_encode_integer(t) + bits_ripple
def description_length(s, G, beta=0.01):
"""Implements a greedy heuristic to estimate the two-part minimal infection
description length of a proposed set of infection sources.
Parameters
----------
s : array-like
seed set
G : NetworkX Graph
The original graph the infection process was run on.
beta : float
infection probability
Notes
-----
The minimal description length, as applied to source localization, is introduced
in [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
return bits_encode_seed(s, G) + bits_encode_ripple(s=s, G=G, beta=0.01)
def chatter_frequency(G, t=None):
"""Implements the Chatter Algorithm described in Notes.
Parameters
----------
G : NetworkX Graph
The graph to analyze
t : int or None (optional)
number of rounds to complete
if None, the algorithm runs until every node's message is received by
every other node at least 5 times.
Notes
-----
Each node starts with a message bank consisting of its own ID.
For `t` many rounds, each node broadcasts its message bank to its neighbors,
and all nodes receiving messages append them to their own message bank.
message_frequency[i][j] is the number of times i received j's message.
A "naive"/pure message-passing formulation of this would be along the lines of:
.. code-block:: python
def chatter_distance_slow(G, t):
messages = {i:[i] for i in G}
for _ in range(t):
new_messages = copy.deepcopy(messages)
for i in range(len(G)):
for j in G.neighbors(i):
new_messages[j] += messages[i]
messages = new_messages
return messages
where messages[i].count(j) is the number of times i received j's message. But
this is very slow and easily re-written as matrix multiplication, as is done
here.
"""
warnings.filterwarnings("ignore", category=FutureWarning)
A = nx.adjacency_matrix(G).toarray()
message_frequency = scipy.sparse.identity(len(G)).toarray()
if isinstance(t, type(None)):
if not nx.is_connected(G):
return chatter_frequency(G, t=len(G))
while np.min(message_frequency) < 5:
for i in range(len(G)):
message_frequency[i] += A.dot(message_frequency[i])
else:
for _ in range(t):
for i in range(len(G)):
message_frequency[i] += A.dot(message_frequency[i])
return message_frequency
def chatter_distance(G, t, u=None, v=None, normalized=True):
"""Invokes the Chatter Algorithm/chatter frequency to obtain chatter distance,
a graph topology metric.
Parameters
----------
G :NetworkX Graph
The graph to analyze
t : int
number of rounds to complete
u : node (optional)
starting node. if not provided, we return an array of distances
v : node (optional)
end node. if not provided, we return an array of distances
normalized : bool
if True, all distances are scaled to have a max value of 1
Notes
-----
The chatter distance between nodes `u` and `v` reflects the difficulty node `u`
is expected to have in transmitting a message to node `v`.
"""
message_frequency = chatter_frequency(G, t)
distance = 1 / message_frequency
if normalized:
distance /= np.max(distance)
if isinstance(u, type(None)) and isinstance(v, type(None)):
return distance
if isinstance(v, type(None)):
return distance[u]
if isinstance(u, type(None)):
return distance[:][v]
return distance[u][v]
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/utils/helpers.py | xflow/method/cosasi/utils/helpers.py | import operator
import functools
import numpy as np
import networkx as nx
def list_product(l):
"""Returns the product the elements of a list.
Parameters
----------
l : list
list of elements you want to multiply
"""
return functools.reduce(operator.mul, l, 1)
def longest_list(l):
"""Returns the longest list in an array-like of lists.
Parameters
----------
l : list or array-like
stores the lists of interest
"""
return max(l, key=len)
def longest_list_len(l):
"""Returns the length of the longest list in an array-like
of lists.
Parameters
----------
l : list or array-like
stores the lists of interest
"""
return max(map(len, l))
def soft_eccentricity(G, v):
"""A more flexible calculation of vertex eccentricity.
Parameters
----------
G : NetworkX graph
A graph
v : node
Return value of specified node
Notes
-----
If `G` is connected and has more than one node, this is regular eccentricity. If `G`
has only one node, returns 1. If `G` is disconnected, returns infinite eccentricity.
"""
if nx.number_connected_components(G) > 1:
return np.inf
if len(G) == 1:
return 1
return nx.eccentricity(G, v=v)
def attack_degree(infected, G, v):
"""Calculates the attack degree of node v in G.
Parameters
----------
infected : array-like
infected nodes in G at a particular time step
G : NetworkX graph
A graph
v : node
Return value of specified node
Notes
-----
Attack degree is the number of infected neighbor nodes a node has.
Attack degree is defined in Section 4.2.2 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
infected_neighbors = [i for i in G.neighbors(v) if i in infected]
return len(infected_neighbors)
def attack_degree_partition(node_set, infected, G):
"""Divides a node_set into disjoint subsets based on their attack degree.
Parameters
----------
node_set : array-like
nodes to partition, e.g. a frontier set
infected : array-like
infected nodes in G at a particular time step
G : NetworkX graph
A graph
Notes
-----
Attack degree and this partitioning method are outlined in Section 4.2.2 of [1]_.
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
"""
partitions = {}
for i in node_set:
d = attack_degree(infected, G, i)
if d in partitions.keys():
partitions[d].append(i)
else:
partitions[d] = [i]
return partitions
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/utils/__init__.py | xflow/method/cosasi/utils/__init__.py | from .helpers import *
from . import estimators
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/utils/tests/test_estimators.py | xflow/method/cosasi/utils/tests/test_estimators.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import cosasi
class TestEstimators(TestCase):
def setUp(self):
self.G = self.G = nx.gnp_random_graph(50, 0.2)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(30)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_source_subgraphs(self):
for i in range(1, 10):
subgraphs = cosasi.utils.estimators.source_subgraphs(
self.G, number_sources=i
)
assert len(subgraphs) == i
def test_number_sources(self):
for method in ["eigengap", "netsleuth", "chatter"]:
# check that number of sources matches when provided
for number_sources in range(1, 5):
n, subgraphs = cosasi.utils.estimators.number_sources(
I=self.I,
number_sources=number_sources,
return_source_subgraphs=True,
number_sources_method=method,
G=self.G,
)
for g in subgraphs:
assert type(g) == nx.Graph
assert n == number_sources
# estimating numbers as expected
n, subgraphs = cosasi.utils.estimators.number_sources(
I=self.I,
number_sources=None,
return_source_subgraphs=True,
number_sources_method=method,
G=self.G,
)
m = cosasi.utils.estimators.number_sources(
I=self.I,
number_sources=None,
return_source_subgraphs=False,
number_sources_method=method,
G=self.G,
)
assert n == m and (isinstance(n, np.int64) or isinstance(n, int))
# just return number_sources back
n = 2
m = cosasi.utils.estimators.number_sources(
I=self.I,
number_sources=n,
return_source_subgraphs=False,
number_sources_method=method,
G=self.G,
)
assert n == m
# check error-handling
with pytest.raises(NotImplementedError):
assert cosasi.utils.estimators.number_sources(
self.I,
number_sources=None,
return_source_subgraphs=False,
number_sources_method="BAD INPUT",
)
with pytest.raises(ValueError):
# Need `G` for NETSLEUTH method
assert cosasi.utils.estimators.number_sources(
G=None,
I=self.I,
number_sources=None,
return_source_subgraphs=False,
number_sources_method="netsleuth",
)
with pytest.raises(ValueError):
# Need `G` for chatter method
assert cosasi.utils.estimators.number_sources(
G=None,
I=self.I,
number_sources=None,
return_source_subgraphs=False,
number_sources_method="chatter",
)
with pytest.raises(ValueError):
# Need `G` for chatter method
assert cosasi.utils.estimators.number_sources(
G=self.G,
I=self.I,
number_sources="BAD INPUT",
return_source_subgraphs=False,
number_sources_method="chatter",
)
def test_eigengap(self):
assert isinstance(cosasi.utils.estimators.eigengap(self.G), np.int64)
# two disjoint complete graphs should have a spectral gap of 2
K = nx.complete_graph(10)
H = nx.disjoint_union(K, K)
assert cosasi.utils.estimators.eigengap(H) == 2
def test_bits_encode_integer(self):
last = 0
for i in range(-10, 10):
if i < 1:
with pytest.raises(ValueError):
cosasi.utils.estimators.bits_encode_integer(i)
else:
# number of bits should increase w/ integer
bits = cosasi.utils.estimators.bits_encode_integer(i)
assert bits > last
last = bits
assert cosasi.utils.estimators.bits_encode_integer(1) == pytest.approx(1.05259)
def test_bits_encode_seed(self):
seed = [1, 2, 3]
assert cosasi.utils.estimators.bits_encode_seed(
seed, self.G
) > cosasi.utils.estimators.bits_encode_integer(len(seed))
def test_bits_encode_ripple(self):
bits_ripple_1 = cosasi.utils.estimators.bits_encode_ripple(
list(range(1)), self.G
)
bits_ripple_2 = cosasi.utils.estimators.bits_encode_ripple(
list(range(5)), self.G
)
assert 0 < min(bits_ripple_1, bits_ripple_2)
def test_description_length(self):
seed = [1, 2, 3]
assert (
min(
cosasi.utils.estimators.description_length(seed, self.G),
cosasi.utils.estimators.description_length(seed, self.G),
)
> 0
)
def test_chatter_frequency(self):
message_frequency = cosasi.utils.estimators.chatter_frequency(self.G, 5)
assert message_frequency.size == len(self.G) * len(self.G)
assert np.min(message_frequency) >= 0
def test_chatter_distance(self):
for t in [1, 5, None]:
for G in [self.G, nx.disjoint_union(self.G, self.G)]:
# distances are non-negative
assert (
np.min(
cosasi.utils.estimators.chatter_distance(
G=G, t=t, normalized=False
)
)
>= 0
)
dist = cosasi.utils.estimators.chatter_distance(
G=G, t=t, normalized=True
)
if (
dist[0][1] != dist[0][1]
or np.max(dist) != np.max(dist)
or np.min(dist) != np.min(dist)
):
pass
else:
assert dist[0][1] == cosasi.utils.estimators.chatter_distance(
G=G, t=t, u=0, v=1, normalized=True
)
# check normalization
assert 1 >= np.max(dist) >= np.min(dist) >= 0
# array ops work right
assert len(
cosasi.utils.estimators.chatter_distance(G=G, t=t, u=0)
) == len(G)
assert len(
cosasi.utils.estimators.chatter_distance(self.G, 5, v=0)
) == len(G)
# should be symmetric
for i in range(len(G)):
for j in range(len(G)):
assert cosasi.utils.estimators.chatter_distance(
G=G, t=t, u=i, v=j, normalized=False
) == cosasi.utils.estimators.chatter_distance(
G=G, t=t, u=j, v=i, normalized=False
)
def test_chatter(self):
result = cosasi.utils.estimators.chatter(self.I, self.G)
assert isinstance(result, int)
assert result > 0
assert result <= len(self.I)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/utils/tests/__init__.py | xflow/method/cosasi/utils/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/utils/tests/test_helpers.py | xflow/method/cosasi/utils/tests/test_helpers.py | import os, sys
sys.path.insert(0, os.getcwd())
import pytest
import networkx as nx
import numpy as np
import random
from cosasi import utils
def test_list_product():
l = [1]
assert utils.list_product(l) == 1
l += [2]
assert utils.list_product(l) == 2
l += [-3]
assert utils.list_product(l) == -6
l += [0]
assert utils.list_product(l) == 0
return None
def test_longest_list():
n = 10
l = []
for i in range(n):
l.append(list(range(i)))
longest = utils.longest_list(l)
assert longest == list(range(n - 1))
def test_longest_list_len():
n = 10
l = []
for i in range(n):
l.append(list(range(i)))
assert utils.longest_list_len(l) == n - 1
def test_soft_eccentricity():
G = nx.complete_graph(4)
assert utils.soft_eccentricity(G, 1) < np.inf
H = nx.disjoint_union(G, G)
assert utils.soft_eccentricity(H, 1) == np.inf
G = nx.complete_graph(1)
assert utils.soft_eccentricity(G, 0) == 1
def test_attack_degree():
G = nx.complete_graph(4)
infected = [3]
for i in range(3):
assert utils.attack_degree(infected, G, i) == 1
def attack_degree_partition():
G = nx.gnp_random_graph(50, 0.2)
node_set = [1, 3, 4]
infected = [1, 16, 17, 19, 24, 34, 36, 41, 43, 49]
partition = utils.attack_degree_partition(node_set, infected, G)
vals = []
for v in partition.values():
vals += v
assert sorted(vals) == sorted(node_set)
assert max(partition) <= max(G.degree())[1]
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/contagion/static_network_contagion.py | xflow/method/cosasi/contagion/static_network_contagion.py | import random
import numpy as np
import operator
import networkx as nx
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
class StaticNetworkContagion:
"""A stochastic epidemic process defined on a static network.
Parameters
----------
G : NetworkX Graph
The network for the diffusion process to run on
model : str
Specifies the epidemic model. Currently handles the following diffusion models:
- SI (susceptible-infected)
- SIS (susceptible-infected-susceptible)
- SIR (susceptible-infected-recovered)
infection_rate : float
Inter-node infection efficiency
must be in [0, 1]
recovery_rate : float or None
The recovery rate
must be in [0, 1] (or None if diffusion model is SI)
fraction_infected : float or None
fraction of nodes to initialize as infected (selected uniformly at random)
if both fraction_infected and number_infected are None, initializes with 1 infected node
number_infected : float or None
number of nodes to initialize as infected (selected uniformly at random)
if both fraction_infected and number_infected are None, initializes with 1 infected node
seed : integer, random_state, or None (default)
random number generation state.
Notes
-----
A wrapper for `ndlib` with convenience utilities added.
"""
def __init__(
self,
G,
model="si",
infection_rate=0.01,
recovery_rate=None,
fraction_infected=None,
number_infected=None,
seed=None,
model_config=None # added by Zhiqian
):
"""A stochastic epidemic process defined on a static network.
Parameters
----------
G : NetworkX Graph
The network for the diffusion process to run on
model : str
Specifies the epidemic model. Currently handles the following diffusion models:
SI
SIS
SIR
infection_rate : float
Inter-node infection efficiency
must be in [0, 1]
recovery_rate : float or None
The recovery rate
must be in [0, 1] (or None if diffusion model is SI)
fraction_infected : float or None
fraction of nodes to initialize as infected (selected uniformly at random)
if both fraction_infected and number_infected are None, initializes with 1 infected node
number_infected : float or None
number of nodes to initialize as infected (selected uniformly at random)
if both fraction_infected and number_infected are None, initializes with 1 infected node
seed : integer, random_state, or None (default)
random number generation state.
Notes
-----
A wrapper for `ndlib` with convenience utilities added.
"""
self.model = model.lower()
self.seed = seed
self.model_config = model_config if model_config else mc.Configuration() # added by Zhiqian
if not isinstance(self.seed, type(None)):
random.seed(self.seed)
np.random.seed(self.seed)
if isinstance(G, nx.classes.graph.Graph):
self.G = G
else:
raise ValueError("G must be a NetworkX instance.")
if isinstance(infection_rate, float) and 0.0 <= infection_rate <= 1.0:
self.infection_rate = infection_rate
else:
raise ValueError("Infection rate must be a float between 0 and 1.")
if not recovery_rate or (
isinstance(recovery_rate, float) and 0.0 <= recovery_rate <= 1.0
):
self.recovery_rate = recovery_rate
else:
raise ValueError("Recovery rate must be a float between 0 and 1.")
if fraction_infected and number_infected:
raise ValueError(
"User can only provide one of fraction_infected, number_infected."
)
elif not fraction_infected and not number_infected:
self.fraction_infected = fraction_infected
self.number_infected = 1
else:
self.fraction_infected = fraction_infected
self.number_infected = number_infected
self._init_sim()
self.history = []
return None
def _init_sim(self):
"""Initializes the diffusion process properties and initial infectivity."""
config = self.model_config # added by Zhiqian
config.add_model_parameter("beta", self.infection_rate)
if self.model == "sir":
self.sim = ep.SIRModel(graph=self.G, seed=self.seed)
if not self.recovery_rate:
raise ValueError("Recovery rate must be defined for SIR model.")
config.add_model_parameter("gamma", self.recovery_rate)
elif self.model == "si":
self.sim = ep.SIModel(graph=self.G, seed=self.seed)
elif self.model == "sis":
self.sim = ep.SISModel(graph=self.G, seed=self.seed)
if not self.recovery_rate:
raise ValueError("Recovery rate must be defined for SIS model.")
config.add_model_parameter("lambda", self.recovery_rate)
else:
raise NotImplementedError("Diffusion model not recognized.")
if self.number_infected:
if not isinstance(self.seed, type(None)):
random.seed(self.seed)
infected = random.sample(range(len(self.G)), self.number_infected)
config.add_model_initial_configuration("Infected", infected)
elif self.fraction_infected:
config.add_model_parameter("fraction_infected", self.fraction_infected)
elif self.mc: # added by Zhiqian
config = self.model_config
else:
raise NotImplementedError
self.sim.set_initial_status(config)
return None
def forward(self, steps=100, verbose=False):
"""Executes specified number of diffusion process steps. Records simulation history.
Parameters
----------
steps : int
Number of simulation steps.
verbose : bool (default False)
Specifies whether to return the simulation history.
Notes
-----
Can be run more than once; this just adds steps to the simulation history.
"""
self.history += self.sim.iteration_bunch(steps)
if verbose:
return self.history
return None
def reset_sim(self):
"""Resets the simulation to its initialized states. Does not preserve compartmental histories."""
self.history = []
self.sim.reset()
return None
def get_infected_indices(self, step=0):
"""Retrieves the indices of all vertices in the infected compartment at the provided step.
Parameters
----------
step : int
Iteration step
Returns
-------
list
"""
nodes = list(self.G)
def status_to_delta(status):
"""Converts the history's status to a vector representing movement in
(+1) and out (-1) of the infected compartment
Parameters
----------
status : dict
status dictionary from history, e.g. self.history[step]["status"]
"""
delta = np.zeros(len(self.G))
for idx in status:
s = status[idx]
if s == 1:
# node became infected this step
delta[idx] = 1
if s == 2:
# node became removed this step
delta[idx] = -1
return delta
if step >= len(self.history):
raise ValueError(
"Invalid step. Continue the simulation to reach this step."
)
infected = np.zeros(len(self.G))
for s in range(step + 1):
infected += status_to_delta(self.history[s]["status"])
return [nodes[i] for i in np.where(infected == 1)[0]]
def get_infected_subgraph(self, step=0):
"""Returns the subgraph of the contact network whose vertices are marked infected.
Parameters
----------
step : int
Iteration step
Returns
-------
NetworkX Graph
Notes
-----
This is only guaranteed to be connected in the SI model.
"""
infected_indices = self.get_infected_indices(step=step)
not_infected_indices = set(self.G.nodes) - set(infected_indices)
H = self.G.copy()
H.remove_nodes_from(not_infected_indices)
return H
def get_observers(self, observers=1):
"""Observers record the step number when they become infected. For a specified number
or list of observers, returns a dict of observers and the timestamps at which they
become infected.
Parameters
----------
observers : int or list
If int, observers specifies the number of observation nodes
If list, observers specifies the observation nodes directly
Notes
-----
If self.model == "sis", nodes may be reinfected, so observers record a list of the timestamps
at which they are infected. Otherwise, observers record one timestamp (step number) only.
If an observer is not infected during the simulation history, its corresponding infection
timestamp is recorded as infinity.
"""
if not self.history:
raise ValueError(
"Simulation must be run before retrieving observer information."
)
timestamp_placeholder = np.inf if self.model == "si" else list()
if isinstance(observers, int):
if not isinstance(self.seed, type(None)):
random.seed(self.seed)
np.random.seed(self.seed)
observer_dict = {
i: timestamp_placeholder for i in random.sample(self.G.nodes, observers)
}
elif isinstance(observers, list):
observer_dict = {i: timestamp_placeholder for i in observers}
else:
raise NotImplementedError
for i in range(len(self.history)):
status = self.history[i]["status"]
if self.model == "sis":
for j in observer_dict:
if j in status and status[j] == 1:
observer_dict[j].append(i)
else:
for j in observer_dict:
if j in status and status[j] == 1:
observer_dict[j] = i
return observer_dict
def get_source(self, return_subgraph=False):
"""Returns the vertices marked infected at initialization.
Parameters
----------
return_subgraph : bool
If True, returns a subgraph of infected vertices.
If False, returns a list of indices.
Returns
-------
list or NetworkX Graph
"""
if not isinstance(return_subgraph, bool):
raise ValueError("return_subgraph param must be a bool")
if return_subgraph:
return self.get_infected_subgraph(step=0)
return self.get_infected_indices(step=0)
def get_frontier(self, step=0):
"""Retrieves the frontier set of a given step. This is the set of infected nodes
with an uninfected neighbor.
Parameters
----------
step : int
Iteration step
Returns
-------
NetworkX Graph
Notes
-----
In the SI model, the frontier set consists of nodes likely to have been
infected last, by the given timestep.
"""
T = self.get_infected_indices(step=step)
S = [v for v in self.G if v not in T]
frontier = nx.node_boundary(G=self.G, nbunch1=S, nbunch2=T)
return frontier
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/contagion/__init__.py | xflow/method/cosasi/contagion/__init__.py | from .static_network_contagion import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/contagion/tests/__init__.py | xflow/method/cosasi/contagion/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/contagion/tests/test_static_network_contagion.py | xflow/method/cosasi/contagion/tests/test_static_network_contagion.py | import os, sys
import collections
sys.path.insert(0, os.getcwd())
import pytest
from unittest import TestCase
import networkx as nx
import numpy as np
import cosasi
class Test_StaticNetworkContagion(TestCase):
def setUp(self):
self.number_infected_init = 10
self.sim_steps = 10
self.G = nx.fast_gnp_random_graph(200, 0.25)
self.contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.1,
number_infected=self.number_infected_init,
)
self.contagion.forward(self.sim_steps)
return None
def test_argument_exceptions(self):
with pytest.raises(ValueError):
# G must be a NetworkX graph
cosasi.StaticNetworkContagion(G="BAD INPUT", model="si", infection_rate=0.1)
with pytest.raises(NotImplementedError):
# model must be "sir", "si", or "sis"
cosasi.StaticNetworkContagion(
G=self.G, model="BAD INPUT", infection_rate=0.1
)
with pytest.raises(ValueError):
# infection_rate must be between 0 and 1
cosasi.StaticNetworkContagion(G=self.G, model="si", infection_rate=10)
with pytest.raises(ValueError):
# recovery_rate must be between 0 and 1
cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, recovery_rate=10
)
with pytest.raises(ValueError):
# can only provide one of fraction_infected, number_infected
cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.1,
fraction_infected=0.1,
number_infected=self.number_infected_init,
)
for m in ["sir", "sis"]:
with pytest.raises(ValueError):
# requires recovery rate
cosasi.StaticNetworkContagion(G=self.G, model=m, infection_rate=0.1)
cosasi.StaticNetworkContagion(
G=self.G, model=m, infection_rate=0.1, recovery_rate=0.05
)
assert True
def test_fraction_infected(self):
contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.1,
fraction_infected=self.number_infected_init / len(self.G),
)
contagion.forward(5)
assert self.number_infected_init == len(contagion.get_source())
def test_get_infected_indices(self):
assert len(self.contagion.get_infected_indices()) == self.number_infected_init
temp_contagion = cosasi.StaticNetworkContagion(
G=self.G,
model="si",
infection_rate=0.1,
fraction_infected=None,
number_infected=None,
)
temp_contagion.forward()
assert len(temp_contagion.get_infected_indices()) == 1
return None
def test_forward(self):
assert len(self.contagion.history) == self.sim_steps
return None
def test_reset_sim(self):
self.contagion.reset_sim()
assert len(self.contagion.history) == 0
self.contagion.forward(self.sim_steps)
return None
def test_get_infected_subgraph(self):
sg = self.contagion.get_infected_subgraph(step=self.sim_steps - 1)
assert isinstance(sg, nx.Graph)
assert len(sg) == len(
self.contagion.get_infected_indices(step=self.sim_steps - 1)
)
assert set(sg.nodes) == set(
self.contagion.get_infected_indices(step=self.sim_steps - 1)
) # sets are unordered
return None
def test_get_observers(self):
num_observers = 5
self.contagion.forward(steps=100)
observers = self.contagion.get_observers(observers=num_observers)
assert len(observers) == num_observers # check size
for i in observers.keys():
assert i in self.G.nodes
# check types
assert isinstance(observers[i], (int, float, type(None)))
if isinstance(observers[i], float):
observers[i] == np.inf
return None
def test_get_source(self):
source_verts = self.contagion.get_source()
assert isinstance(source_verts, list)
assert len(source_verts) <= len(self.G)
source_graph = self.contagion.get_source(return_subgraph=True)
assert isinstance(source_graph, nx.Graph)
assert set(source_graph.nodes) == set(source_verts)
return None
def test_get_frontier(self):
s = 15
G = nx.fast_gnp_random_graph(100, 0.25)
contagion = cosasi.StaticNetworkContagion(
G=G, model="si", infection_rate=0.01, number_infected=3
)
contagion.forward(500)
I = contagion.get_infected_subgraph(step=s)
frontier = contagion.get_frontier(step=s)
assert isinstance(frontier, (list, set)) # basic type checking
assert all(
[v in I for v in frontier]
) # frontier is a subset of infection subgraph
beyond_frontier = []
for i in I:
if i not in frontier:
# every node not in the frontier has all neighbors in I
assert all([v in I for v in G.neighbors(v)])
# frontier at step 0 should just be the initially-infected indices
frontier_0 = contagion.get_frontier(step=0)
assert collections.Counter(frontier_0) == collections.Counter(
contagion.get_source()
)
# when the graph is saturated/maximally-infected, frontier should be empty
largest_cc_size = len(max(nx.connected_components(G), key=len))
while len(contagion.get_infected_indices(step=s)) < largest_cc_size:
s += 1
assert contagion.get_frontier(step=s) == set()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/__init__.py | xflow/method/cosasi/source_inference/__init__.py | from . import single_source
from . import multiple_source
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/source_results.py | xflow/method/cosasi/source_inference/source_results.py | """Generic objects for the result of single-source and multi-source localization.
All inference algorithms should return an instance of one of these classes.
"""
import json
from collections import Counter
from collections.abc import Iterable
import itertools
import numpy as np
import networkx as nx
MODULE_PATH = __file__[: -len("source_results.py")]
MODULE_PATH = (
MODULE_PATH
if len(MODULE_PATH) > 0 and (MODULE_PATH[-1] == "/" or MODULE_PATH[-1] == "\\")
else MODULE_PATH + "/"
)
def node_set_distance(s1, s2, G):
"""Implements a distance measure between vertex sets (of possibly different sizes).
Parameters
----------
s1 : array-like
first vertex set
s2 : array-like
second vertex set
G : NetworkX Graph
graph to search on
"""
perm_scores = {}
if isinstance(s1, Iterable):
s1 = list(s1)
else:
s1 = [s1]
if isinstance(s2, Iterable):
s2 = list(s2)
else:
s2 = [s2]
for s2_perm in itertools.permutations(s2):
perm_scores[s2_perm] = 0
for i in range(min(len(s1), len(s2))):
perm_scores[s2_perm] += nx.shortest_path_length(
G, source=s1[i], target=s2_perm[i]
)
if len(s1) > len(s2):
for j in range(i, len(s1)):
min_add = np.inf
for s in s2_perm:
d = nx.shortest_path_length(G, source=s1[j], target=s)
if d < min_add:
min_add = d
perm_scores[s2_perm] += min_add
if len(s2) > len(s1):
for j in range(i, len(s2_perm)):
min_add = np.inf
for s in s1:
d = nx.shortest_path_length(G, source=s2_perm[j], target=s)
if d < min_add:
min_add = d
perm_scores[s2_perm] += min_add
return min(perm_scores.values())
class SourceResult:
"""Abstract class outlining response object for the result of a source inference algorithm.
Parameters
----------
source_type : str
either "single-source" or "multi-source"
inference_method : str
name of the source localization algorithm used
scores : dict
per-item scores for ranking, retrieval, etc.
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
def __init__(
self,
source_type,
inference_method,
scores,
G,
algorithm_details=True,
reverse=True,
):
"""Abstract class outlining response object for the result of a source inference algorithm.
Parameters
----------
source_type : str
either "single-source" or "multi-source"
inference_method : str
name of the source localization algorithm used
scores : dict
per-item scores for ranking, retrieval, etc.
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
if not isinstance(G, nx.classes.graph.Graph):
raise ValueError("G must be a NetworkX graph.")
else:
self.G = G
source_type = source_type.lower()
if source_type not in ["single-source", "multi-source"]:
raise ValueError("Source type must be single- or multi-source.")
self.data = {
"scores": scores,
"inference method": {"name": inference_method, "source_type": source_type},
"G": G,
}
if algorithm_details:
algorithms = json.load(open(MODULE_PATH + "algorithm_details.json"))
for k in algorithms[source_type][inference_method]:
self.data["inference method"][k] = algorithms[source_type][
inference_method
][k]
self.reverse = reverse
return None
def rank(self):
"""Rank nodes by score.
Returns
-------
list of item indices
"""
scores = self.data["scores"]
return sorted(scores, key=scores.get, reverse=self.reverse)
def topn(self, n=1):
"""Returns the top n item indices by rank.
Rank can be highest-first (reverse==True) or lowest-first (reverse==False)
Parameters
----------
n : int
number of item indices to return
Returns
-------
list of item indices
"""
if not isinstance(n, int):
raise ValueError("n must be an integer.")
rank = self.rank()
return rank[:n]
def evaluate_solution_rank(self, true_source):
"""Finds the rank of the true source, by the algorithm's scoring protocol.
Parameters
----------
true_source : graph index - str, int, etc.
the actual source node
"""
single_source = len(self.topn(n=1)) == 1
if isinstance(true_source, (list, tuple)) and len(true_source) == 1:
true_source = true_source[0]
return self.get_rank(true_source, soft_rank=True)
def evaluate_distance(self, true_source):
"""Finds the shortest path length between each node in the solution set and the
true souce.
Parameters
----------
true_source : tuple
the actual source set
"""
eval_scores = {h: np.inf for h in self.data["scores"]}
for s in eval_scores.keys():
eval_scores[s] = node_set_distance(G=self.G, s1=s, s2=true_source)
return eval_scores
def evaluate(self, true_source):
"""Runs evaluation algorithms and returns a dictionary of results.
Parameters
----------
true_source : graph index - str, int, etc.
the actual source node
"""
dist = self.evaluate_distance(true_source=true_source)
top_sol = self.topn(n=1)[0]
rank = self.evaluate_solution_rank(true_source=true_source)
evaluation_results = {
"true source": true_source,
"distance": {
"top score's distance": {top_sol: dist[top_sol]},
"all distances": dist,
},
"rank": rank,
"rank %": rank / len(self.data["scores"]),
}
return evaluation_results
class SingleSourceResult(SourceResult):
"""Response object for the result of single-source inference.
Parameters
----------
inference_method : str
name of the source localization algorithm used
scores : dict
per-node scores for ranking, retrieval, etc.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
def __init__(self, *args, **kwargs):
"""Response object for the result of single-source inference.
Parameters
----------
inference_method : str
name of the source localization algorithm used
scores : dict
per-node scores for ranking, retrieval, etc.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
super().__init__(*args, **kwargs)
return None
def get_rank(self, v, soft_rank=False):
"""Returns the rank of vertex (1 = "best")
Parameters
----------
v : graph index - str, int, etc.
vertex of interest
soft_rank : bool
if True and v is not in the list of hypotheses, returns 1 more
than the number of hypotheses
Returns
-------
int
"""
rank = self.rank()
if soft_rank and v not in rank:
return len(rank) + 1
return rank.index(v) + 1
class MultiSourceResult(SourceResult):
"""Response object for the result of mutli-source inference.
Parameters
----------
inference_method : str
name of the source localization algorithm used
scores : dict
per-item scores for ranking, retrieval, etc.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
def __init__(self, *args, **kwargs):
"""Response object for the result of mutli-source inference.
Parameters
----------
inference_method : str
name of the source localization algorithm used
scores : dict
per-item scores for ranking, retrieval, etc.
algorithm_details : bool
if True, includes relevant information about the source
inference algorithm used
reverse : bool (default True)'
if True, ranks items from highest score to lowest
if False, ranks items from lowest score to highest
"""
super().__init__(*args, **kwargs)
return None
def get_rank(self, s, soft_rank=False):
"""Returns the rank of the provided node set (1 = "best")
Parameters
----------
s : list
node set of graph indices
soft_rank : bool
if True and v is not in the list of hypotheses, returns 1 more
than the number of hypotheses
Returns
-------
int
"""
rank = self.rank()
r = 1
for q in rank:
if Counter(s) == Counter(q):
break
r += 1
if not soft_rank and r > len(self.data["scores"]):
raise ValueError("Proposed source set not found among top hypotheses.")
return r
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/tests/test_source_results.py | xflow/method/cosasi/source_inference/tests/test_source_results.py | import os, sys
import pytest
import itertools
import random
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import networkx as nx
import numpy as np
import cosasi
from ..source_results import SourceResult, SingleSourceResult, MultiSourceResult, node_set_distance
def test_node_set_distance():
G = nx.karate_club_graph()
# sets are individual nodes
assert node_set_distance(5, 2, G) == 2
# mixed
assert node_set_distance(5, [2, 3], G) == 6
assert node_set_distance([5, 6, 7], 2, G) == 7
# bother iterable
assert node_set_distance([5, 6], [2, 3], G) == 4
# overlapping
assert node_set_distance([5, 6], [5, 6], G) == 0
assert node_set_distance([5, 6], [2, 6], G) == node_set_distance(5, 2, G) == 2
class Test_SourceResult(TestCase):
def setUp(self):
self.inference_method = "rumor centrality"
self.scores = {1: 1, 2: 10, 3: 0.025}
self.G = nx.fast_gnp_random_graph(50, 0.25)
self.result = SourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G=self.G,
)
return None
def test_rank(self):
ranked = self.result.rank()
assert self.result.rank() == [2, 1, 3]
def test_topn(self):
with pytest.raises(ValueError):
assert self.result.topn("BAD INPUT")
assert self.result.topn(n=1) == [2]
def test_bad_graph_input(self):
with pytest.raises(ValueError):
assert SourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G="BAD INPUT",
)
def test_bad_source_type_input(self):
with pytest.raises(ValueError):
assert SourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="BAD INPUT",
G=self.G,
)
class Test_SingleSourceResult(TestCase):
def setUp(self):
self.inference_method = "rumor centrality"
self.scores = {1: 1, 2: 10, 3: 0.025}
self.G = nx.fast_gnp_random_graph(50, 0.25)
self.result = SingleSourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G=self.G,
)
return None
def test_get_rank(self):
assert self.result.get_rank(2) == 1
assert self.result.get_rank(3) == 3
with pytest.raises(ValueError):
assert self.result.get_rank("BAD INPUT")
def test_bad_graph_input(self):
with pytest.raises(ValueError):
assert SingleSourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G="BAD INPUT",
)
class Test_MultiSourceResult(TestCase):
def setUp(self):
self.inference_method = "fast multi-source jordan centrality"
self.scores = {}
self.G = nx.fast_gnp_random_graph(50, 0.25)
for s in list(itertools.combinations(range(5), 2)):
self.scores[s] = random.random()
self.result = MultiSourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="multi-source",
G=self.G,
)
return None
def test_get_rank(self):
max_key = max(self.scores, key=self.scores.get)
assert self.result.get_rank(max_key) == 1
def test_bad_graph_input(self):
with pytest.raises(ValueError):
assert MultiSourceResult(
inference_method=self.inference_method,
scores=self.scores,
source_type="single-source",
G="BAD INPUT",
)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/tests/__init__.py | xflow/method/cosasi/source_inference/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/multiple_source/lisn.py | xflow/method/cosasi/source_inference/multiple_source/lisn.py | import itertools
import networkx as nx
import numpy as np
from ..source_results import MultiSourceResult
from ...utils import estimators
from .. import single_source
def fast_multisource_lisn(I, G, t, number_sources=None):
"""Greedily runs single-source LISN algorithm on each estimated infection
subgraph attributable to each of the hypothesized number of sources.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
t : int
the observation timestep corresponding to I
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
Notes
-----
The Jordan infection center is the vertex with minimum infection eccentricity.
This is described in [1]_ and [2]_.
Examples
--------
>>> result = cosasi.multiple_source.fast_multisource_jordan_centrality(I, G)
References
----------
.. [1] L. Ying and K. Zhu,
"On the Universality of Jordan Centers for Estimating Infection Sources in Tree Networks"
IEEE Transactions of Information Theory, 2017
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
if not number_sources:
number_sources, subgraphs = estimators.number_sources(
I, return_source_subgraphs=True
)
else:
number_sources, subgraphs = estimators.number_sources(
I, number_sources=number_sources, return_source_subgraphs=True
)
sources_scores = [
{
k: v
for k, v in single_source.lisn(I=subgraphs[i], G=G, t=t)
.data["scores"]
.items()
if v != -np.inf
}
for i in range(number_sources)
]
data = [list(d.keys()) for d in sources_scores]
product_scores = {}
for item in itertools.product(*data):
idx = tuple(item)
product_scores[idx] = 0
for i in range(len(idx)):
product_scores[idx] += sources_scores[i][idx[i]]
result = MultiSourceResult(
source_type="multi-source",
inference_method="fast multi-source lisn",
scores=product_scores,
G=G,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/multiple_source/jordan.py | xflow/method/cosasi/source_inference/multiple_source/jordan.py | import itertools
import networkx as nx
import numpy as np
from ..source_results import MultiSourceResult
from ...utils import estimators
from .. import single_source
def fast_multisource_jordan_centrality(I, G, number_sources=None):
"""Greedily runs single-source Jordan centrality on each estimated infection
subgraph attributable to each of the hypothesized number of sources.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
Notes
-----
The Jordan infection center is the vertex with minimum infection eccentricity.
This is described in [1]_ and [2]_.
Examples
--------
>>> result = cosasi.multiple_source.fast_multisource_jordan_centrality(I, G)
References
----------
.. [1] L. Ying and K. Zhu,
"On the Universality of Jordan Centers for Estimating Infection Sources in Tree Networks"
IEEE Transactions of Information Theory, 2017
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
if not number_sources:
number_sources, subgraphs = estimators.number_sources(
I, return_source_subgraphs=True
)
else:
number_sources, subgraphs = estimators.number_sources(
I, number_sources=number_sources, return_source_subgraphs=True
)
sources_scores = [
{
k: v
for k, v in single_source.jordan_centrality(subgraphs[i], G)
.data["scores"]
.items()
if v != -np.inf
}
for i in range(number_sources)
]
data = [list(d.keys()) for d in sources_scores]
product_scores = {}
for item in itertools.product(*data):
idx = tuple(item)
product_scores[idx] = 0
for i in range(len(idx)):
product_scores[idx] += sources_scores[i][idx[i]]
result = MultiSourceResult(
source_type="multi-source",
inference_method="fast multi-source jordan centrality",
scores=product_scores,
G=G,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/multiple_source/netsleuth.py | xflow/method/cosasi/source_inference/multiple_source/netsleuth.py | import itertools
import networkx as nx
import numpy as np
from ..source_results import MultiSourceResult
from ...utils import estimators
from .. import single_source
def netsleuth(I, G, hypotheses_per_step=1):
"""Implements the multi-source NETSLEUTH algorithm to score combinations
of nodes in G.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
hypotheses_per_step : int (default 1)
number of candidate sources to be kept per iteration of NETSLEUTH.
Particular usage is described in greater detail in `Notes` section.
Notes
-----
The number of source hypotheses returned will be hypotheses_per_step*[number of seed nodes],
the latter of which is automatically determined via minimum description length
calculations.
NETSLEUTH is described in [1]_ and [2]_.
NETSLEUTH has linear complexity with the number of edges of the infected subgraph,
edges of the frontier set, and vertices of the infected subgraph.
The standard n-source version of NETSLEUTH operates as follows:
1. Obtain Source 1 via single-source method
2. Delete Source 1 from infection subgraph; obtain Source 2 via single-source method
...
n. Delete Source n-1 from infection subgraph; obtain Source n via single-source method.
This does not lend itself to ranking alternative hypotheses, so we implement a
more general variant:
1. Obtain top ``hypotheses_per_step``-many candidates for Source 1 via single-source
method; each corresponds to one hypothesis source set, each of size 1
2. For each hypothesis source set, delete these nodes from a copy of the infection subgraph,
then obtain top ``hypotheses_per_step``-many candidates for Source 2 via single-source
method; construct ``|source sets| * hypotheses_per_step`` new source sets to replace the old
source sets, each of size 2
...
n. For each hypothesis source set, delete these nodes from a copy of the infection subgraph,
then obtain top ``hypotheses_per_step``-many candidates for Source n via single-source
method; construct |source sets|*``hypotheses_per_step`` new source sets to replace the old
source sets, each of size n
Examples
--------
>>> result = cosasi.multiple_source.netsleuth(I, G, number_sources=3, hypotheses_per_step=3)
References
----------
.. [1] B. Prakash, J. Vreeken, C. Faloutsos,
"Spotting Culprits in Epidemics: How Many and Which Ones?"
IEEE 12th International Conference on Data Mining, 2012
https://ieeexplore.ieee.org/document/6413787
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
multisource_scores = {}
mdl_decreasing = True
this_mdl = np.inf
last_mdl = np.inf
i = 1
while mdl_decreasing:
if i == 1:
step_result = single_source.netsleuth(I, G)
for s in step_result.topn(hypotheses_per_step):
multisource_scores[(s)] = estimators.description_length([s], G)
else:
new_multisource_scores = {}
for j in multisource_scores.keys():
H = I.copy()
if i == 2:
H.remove_nodes_from([j])
else:
H.remove_nodes_from(j)
step_result = single_source.netsleuth(H, G)
for s in step_result.topn(hypotheses_per_step):
if i == 2:
new_s = tuple([j] + [s])
else:
new_s = tuple(list(j) + [s])
new_multisource_scores[new_s] = estimators.description_length(
list(new_s), G
)
multisource_scores = new_multisource_scores
# update mdl tracker
last_mdl = this_mdl
this_mdl = min(multisource_scores.values())
mdl_decreasing = this_mdl < last_mdl
i += 1
result = MultiSourceResult(
source_type="multi-source",
inference_method="netsleuth",
scores=multisource_scores,
G=G,
reverse=False,
)
return result
def fast_multisource_netsleuth(I, G, number_sources=None):
"""Greedily runs single-source NETSLEUTH on each estimated infection subgraph attributable
to each of the hypothesized number of sources.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
number_sources : int or None (optional)
if int, this is the hypothesized number of infection sources
if None, estimates the number of sources
Examples
--------
>>> result = cosasi.multiple_source.fast_multisource_netsleuth(I, G)
Notes
-----
Unofficial variant of multisource NETSLEUTH intended for fast computation and ranking,
because the typical multisource version does not lend itself to scoring many possible
source sets.
NETSLEUTH is described in [1]_ and [2]_. More authoritative implementation is found in
`multisource.netsleuth`.
References
----------
.. [1] B. Prakash, J. Vreeken, C. Faloutsos,
"Spotting Culprits in Epidemics: How Many and Which Ones?"
IEEE 12th International Conference on Data Mining, 2012
https://ieeexplore.ieee.org/document/6413787
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
if not number_sources:
number_sources, subgraphs = estimators.number_sources(
I, return_source_subgraphs=True
)
else:
number_sources, subgraphs = estimators.number_sources(
I, number_sources=number_sources, return_source_subgraphs=True
)
sources_scores = [
{
k: v
for k, v in single_source.netsleuth(subgraphs[i], G).data["scores"].items()
if v != -np.inf
}
for i in range(number_sources)
]
data = [list(d.keys()) for d in sources_scores]
product_scores = {}
for item in itertools.product(*data):
idx = tuple(item)
product_scores[idx] = 0
for i in range(len(idx)):
product_scores[idx] += sources_scores[i][idx[i]]
result = MultiSourceResult(
source_type="multi-source",
inference_method="fast multi-source netsleuth",
scores=product_scores,
G=G,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/multiple_source/__init__.py | xflow/method/cosasi/source_inference/multiple_source/__init__.py | from .netsleuth import *
from .jordan import *
from .lisn import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/multiple_source/tests/test_netsleuth.py | xflow/method/cosasi/source_inference/multiple_source/tests/test_netsleuth.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import math
import cosasi
class TestNETSLEUTH(TestCase):
def setUp(self):
self.G = nx.complete_graph(n=100)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_fast_multisource_netsleuth(self):
result = cosasi.source_inference.multiple_source.fast_multisource_netsleuth(
self.I, self.G, number_sources=3
)
assert isinstance(
result, cosasi.source_inference.source_results.MultiSourceResult
)
top5 = result.topn(5)
assert [len(i) == 3 for i in top5]
result = cosasi.source_inference.multiple_source.fast_multisource_netsleuth(
self.I, self.G
)
l = None
for k in result.data["scores"].keys():
if not l:
l = len(k)
else:
assert len(k) == l
def test_netsleuth(self):
result = cosasi.source_inference.multiple_source.netsleuth(self.I, self.G)
assert isinstance(
result, cosasi.source_inference.source_results.MultiSourceResult
)
result = cosasi.source_inference.multiple_source.netsleuth(self.I, self.G)
l = None
for k in result.data["scores"].keys():
if not l:
l = len(k)
else:
assert len(k) == l
assert result.data["scores"][k] > 0
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/multiple_source/tests/test_lisn.py | xflow/method/cosasi/source_inference/multiple_source/tests/test_lisn.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import cosasi
class TestLISN(TestCase):
def setUp(self):
self.G = nx.complete_graph(n=100)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_fast_multisource_lisn(self):
result = cosasi.source_inference.multiple_source.fast_multisource_lisn(
self.I, self.G, self.t, 3
)
assert isinstance(
result, cosasi.source_inference.source_results.MultiSourceResult
)
top5 = result.topn(5)
assert [len(i) == 3 for i in top5]
result = cosasi.source_inference.multiple_source.fast_multisource_lisn(
self.I, self.G, self.t
)
l = None
for k in result.data["scores"].keys():
if not l:
l = len(k)
else:
assert len(k) == l
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/multiple_source/tests/__init__.py | xflow/method/cosasi/source_inference/multiple_source/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/multiple_source/tests/test_jordan.py | xflow/method/cosasi/source_inference/multiple_source/tests/test_jordan.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import cosasi
class TestJordan(TestCase):
def setUp(self):
self.G = nx.complete_graph(n=100)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_fast_multisource_jordan_centrality(self):
result = (
cosasi.source_inference.multiple_source.fast_multisource_jordan_centrality(
self.I, self.G, 3
)
)
assert isinstance(
result, cosasi.source_inference.source_results.MultiSourceResult
)
top5 = result.topn(5)
assert [len(i) == 3 for i in top5]
result = (
cosasi.source_inference.multiple_source.fast_multisource_jordan_centrality(
self.I, self.G
)
)
l = None
for k in result.data["scores"].keys():
if not l:
l = len(k)
else:
assert len(k) == l
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/lisn.py | xflow/method/cosasi/source_inference/single_source/lisn.py | import math
import networkx as nx
import numpy as np
import scipy as sp
from ..source_results import SingleSourceResult
def lisn(I, G, t=None, infection_rate=0.1):
"""Implements the algorithm from Localizing the Information Source in a Network to
score all nodes in G [1]_.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
t : int (optional)
the observation timestep corresponding to I
infection_rate : float (optional)
Inter-node infection efficiency from the original contagion process
must be in [0, 1]
Notes
-----
Because the probabilities can be quite small, we report the log-score, rather than the
raw score itself.
To our knowledge, this algorithm has no official name; it is referred to as "Algorithm 1"
in its corresponding publication [1]_. We dub it LISN, the acronym of the publication
title (Localizing the Information Source in a Network).
Nodes outside the infection subgraph receive a score of negative infinity.
Examples
--------
>>> result = cosasi.single_source.lisn(I, G)
References
----------
.. [1] G. Nie and C. Quinn,
"Localizing the Information Source in a Network"
TrueFact 2019: KDD 2019 Workshop on Truth Discovery and Fact Checking: Theory and Practice, 2019
"""
scores = {v: -np.inf for v in G.nodes}
for v in I.nodes:
scores[v] = 0
for u in G.nodes:
if u == v:
continue
n = nx.shortest_path_length(G, v, u)
if u in I.nodes:
scores[v] += math.log(distance_prob(t, n, infection_rate))
else:
scores[v] += math.log(1 - distance_prob(t, n, infection_rate))
result = SingleSourceResult(
source_type="single-source", inference_method="lisn", scores=scores, G=G
)
return result
def distance_prob(t, n, infection_rate=0.1):
"""Approximates the probability of one node receiving the rumor/contagion from another node
n edges away within time t.
Parameters
----------
t : int (optional)
the observation timestep corresponding to I
This is not actually used, but exists to match the format of other algorithms
n : int
shortest path distance
infection_rate : float (optional)
Inter-node infection efficiency from the original contagion process
must be in [0, 1]
Notes
-----
This function is defined in Section 3 of [1]_.
References
----------
.. [1] G. Nie and C. Quinn,
"Localizing the Information Source in a Network"
TrueFact 2019: KDD 2019 Workshop on Truth Discovery and Fact Checking: Theory and Practice, 2019
"""
def gamma(s, x):
def gamma_integrand(x, s):
return x ** (s - 1) * math.e ** (-x)
return sp.integrate.quad(gamma_integrand, 0, x, args=s)[0]
return gamma(n, infection_rate * t) / sp.special.gamma(n)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/jordan.py | xflow/method/cosasi/source_inference/single_source/jordan.py | import networkx as nx
import numpy as np
from ...utils import soft_eccentricity
from ..source_results import SingleSourceResult
def jordan_centrality(I, G):
"""Computes the infection eccentricity of each node in the infection subgraph. To
produce a score with highest value corresponding to the Jordan center, we return
the inverse of the infection eccentricity.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
Notes
-----
The Jordan infection center is the vertex with minimum infection eccentricity.
This is described in [1]_ and [2]_.
Nodes outside the infection subgraph receive a score of negative infinity.
Examples
--------
>>> result = cosasi.single_source.jordan_centrality(I, G)
References
----------
.. [1] L. Ying and K. Zhu,
"On the Universality of Jordan Centers for Estimating Infection Sources in Tree Networks"
IEEE Transactions of Information Theory, 2017
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
scores = {
v: 1 / soft_eccentricity(I, v=v) if v in I.nodes else -np.inf for v in G.nodes
}
result = SingleSourceResult(
source_type="single-source",
inference_method="jordan centrality",
scores=scores,
G=G,
reverse=True,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/netsleuth.py | xflow/method/cosasi/source_inference/single_source/netsleuth.py | import networkx as nx
import numpy as np
import warnings
from ..source_results import SingleSourceResult
def netsleuth(I, G):
"""Implements the single-source NETSLEUTH algorithm to score all nodes in G.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
Notes
-----
NETSLEUTH is described in [1]_. General idea is that, under mean field
approximation, the probability of observing an infection subgraph given a
particular source s is proportional to the sth entry of the largest eigenvector
of the infection subgraph Laplacian. The implementation below is described in
[2]_.
Nodes outside the infection subgraph (i.e. the frontier set) receive a score of
negative infinity.
NETSLEUTH has linear complexity with the number of edges of the infected subgraph,
edges of the frontier set, and vertices of the infected subgraph.
Examples
--------
>>> result = cosasi.single_source.netsleuth(I, G)
References
----------
.. [1] B. A. Prakash, J. Vreeken, C. Faloutsos,
"Efficiently spotting the starting points of an epidemic in a large graph"
Knowledge and Information Systems, 2013
https://link.springer.com/article/10.1007/s10115-013-0671-5
.. [2] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
warnings.filterwarnings("ignore", module="networkx\..*")
L = nx.laplacian_matrix(G).toarray()
infection_indices = [i for i in I.nodes]
L_I = L[np.ix_(infection_indices, infection_indices)]
eigenvalues, eigenvectors = np.linalg.eig(L_I)
largest_eigenvalue = max(eigenvalues)
largest_eigenvector = eigenvectors[:, list(eigenvalues).index(largest_eigenvalue)]
scores = {
v: largest_eigenvector[infection_indices.index(v)]
if v in infection_indices
else -np.inf
for v in G.nodes
}
result = SingleSourceResult(
source_type="single-source", inference_method="netsleuth", scores=scores, G=G
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/__init__.py | xflow/method/cosasi/source_inference/single_source/__init__.py | from .rumor_centrality import *
from .short_fat_tree import *
from .netsleuth import *
from .jordan import *
from .lisn import *
from .earliest_infection_first import *
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/short_fat_tree.py | xflow/method/cosasi/source_inference/single_source/short_fat_tree.py | import math
import random
import networkx as nx
import numpy as np
from ...utils import longest_list_len
from ..source_results import SingleSourceResult
def short_fat_tree(I, G, infection_rate=0.1):
"""Implements the Short-Fat-Tree (SFT) algorithm to score all nodes in G.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
infection_rate : float (optional)
Inter-node infection efficiency from the original contagion process
must be in [0, 1]
Examples
--------
>>> result = cosasi.single_source.short_fat_tree(I, G)
Notes
-----
Algorithm attempts to find infection center by identifying the vertex with
largest weighted boundary node degree. The algorithm was introduced in [1]_.
Nodes outside the infection subgraph receive a score of negative infinity.
References
----------
.. [1] K. Zhu and L. Ying,
"Information source detection in the SIR model: A sample-path-based approach."
IEEE/ACM Transactions on Networking, 2014
https://ieeexplore.ieee.org/document/6962907
"""
N = len(I)
# each node receives its own node ID at time 0
t_messages = {i: list() for i in I.nodes} # timestep t
t_minus_messages = {i: [i] for i in I.nodes} # timestep t-1
earlier_messages = {i: set() for i in I.nodes} # timesteps earlier than t-1
all_messages = {i: {i} for i in I.nodes} # full history
t = 1
while longest_list_len(all_messages.values()) < N:
for v in I.nodes:
new_ids = set(t_minus_messages[v]) - earlier_messages[v]
if new_ids: # v received new node IDs in t-1 time slot
for u in I.neighbors(v):
# v broadcasts the new node IDs to its neighbors
t_messages[u] += new_ids
t += 1
# update message history
earlier_messages = {
i: earlier_messages[i].union(t_minus_messages[i]) for i in I.nodes
}
all_messages = {
i: all_messages[i].union(t_minus_messages[i]).union(t_messages[i])
for i in I.nodes
}
# push back recent message record
t_minus_messages = t_messages
t_messages = {i: list() for i in I.nodes}
# S keys are the set of nodes that receive |I| distinct node IDs
S = {
v: weighted_boundary_node_degree(I=I, G=G, v=v, infection_rate=infection_rate)
if v in I.nodes and len(all_messages[v]) >= N
else -np.inf
for v in G.nodes
}
result = SingleSourceResult(
source_type="single-source", inference_method="short-fat-tree", scores=S, G=G
)
return result
def weighted_boundary_node_degree(I, G, v, infection_rate=0.01, return_boundary=False):
"""Computes the weighted boundary node degree (WBND) with respect to node v and
the set of infected nodes I.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
infection_rate : float (optional)
Inter-node infection efficiency from the original contagion process
must be in [0, 1]
return_boundary : bool
if True, you get both the weighted boundary node degree and the involved boundary nodes
if False, you only get the weighted boundary node degree
Notes
-----
This implementation is based on the WBND Algorithm, described in Algorithm 2.2
on p. 10 of [1]_.
References
----------
.. [1] L. Ying and K. Zhu,
"Diffusion Source Localization in Large Networks"
Synthesis Lectures on Communication Networks, 2018
"""
wbnd = 0
v_infection_eccentricity = nx.eccentricity(I, v=v)
v_boundary = [
w
for w in I.nodes
if nx.shortest_path_length(G, source=v, target=w) == v_infection_eccentricity
]
v_boundary_len = len(v_boundary)
wbnd = sum([G.degree(u) - v_boundary_len for u in v_boundary]) * abs(
math.log(1 - infection_rate)
)
if return_boundary:
return wbnd, v_boundary
return wbnd
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/rumor_centrality.py | xflow/method/cosasi/source_inference/single_source/rumor_centrality.py | import math
import random
import networkx as nx
from ...utils import list_product
from ..source_results import SingleSourceResult
def rumor_centrality_root(I, v, return_all_values=True):
"""Computes rumor centrality for all nodes, assuming a spanning tree rooted at v.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
v : graph index - str, int, etc.
The vertex rooting
return_all_values : bool
Specifies whether you want the full rumor centrality dict.
If False, returns only the value for node v
Notes
-----
Rumor centrality was introduced in the seminal work [1]_. This is a more "literal"
interpretation of their algorithm. `rumor_centrality` averages these results over all
possible BFS rooting schemes.
References
----------
.. [1] S. Devavrat and T. Zaman,
"Rumors in a network: Who's the culprit?."
IEEE Transactions on Informatidon Theory, 2011
https://devavrat.mit.edu/wp-content/uploads/2017/10/Rumors-in-a-network-whos-the-culprit.pdf
"""
N = len(I)
G = nx.bfs_tree(I, v)
# sort nodes by depth from leaves to root
depths = nx.shortest_path_length(G, v)
nodes_by_depth = sorted(depths, key=depths.get, reverse=True)
# message-passing data objects; indexing is dict[destination][source]
t = {
i: {j: 0 for j in nodes_by_depth} for i in nodes_by_depth
} # subtree size messages
p = {
i: {j: 0 for j in nodes_by_depth} for i in nodes_by_depth
} # subtree product messages
r = {i: 0 for i in nodes_by_depth} # rumor centrality values
for u in nodes_by_depth:
children_u = [e[1] for e in G.out_edges(u)]
if u != v:
parent_u = list(G.in_edges(u))[0][0]
if G.out_degree(u) == 0:
# u is a leaf
if "parent_u" in locals():
t[parent_u][u] = 1
p[parent_u][u] = 1
else:
if u != v:
# u is not root
if "parent_u" in locals():
t[parent_u][u] = 1 + sum([t[u][j] for j in children_u])
p[parent_u][u] = t[parent_u][u] * list_product(
[p[u][j] for j in children_u]
)
for u in nodes_by_depth[::-1]:
children_u = [e[1] for e in G.out_edges(u)]
if u == v:
# u is root
r[u] = math.factorial(N) / list_product([p[u][j] for j in children_u])
else:
parent_u = list(G.in_edges(u))[0][0]
r[u] = r[parent_u] * t[parent_u][u] / (N - t[parent_u][u])
for u in nodes_by_depth:
r[u] /= len(G)
if not return_all_values:
return r[v]
return r
def rumor_centrality(I, G=None, v=None, normalize=True, only_roots=False):
"""Computes rumor centrality for all nodes in G.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph (optional)
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
This is not actually used, but exists to match the format of other algorithms
v : graph index - str, int, etc. (optional)
if provided, returns the rumor centrality of v only.
normalize : bool
If True, scales all rumor centrality values to between 0 and 1
only_roots : bool
Aggregation strategy, as we compute rumor_centrality_root over all possible
root nodes.
If True, we only keep the rumor_centrality_root value for the root node
If False, we keep the rumor_centrality_root values for all nodes
Notes
-----
Rumor centrality was introduced in the seminal work [1]_. `rumor_centrality_root` is a
more "literal" interpretation of their algorithm. `rumor_centrality` (this function)
averages these results over all possible BFS rooting schemes.
Examples
--------
>>> result = cosasi.single_source.rumor_centrality(I, G)
References
----------
.. [1] S., Devavrat and T. Zaman,
"Rumors in a network: Who's the culprit?."
IEEE Transactions on Information Theory, 2011
https://devavrat.mit.edu/wp-content/uploads/2017/10/Rumors-in-a-network-whos-the-culprit.pdf
"""
if v and v not in I:
raise ValueError("Provided node is not in I.")
# iterate over possible roots, and average over spanning trees
rumor_centrality_dict = {i: 0 for i in I.nodes}
for root in rumor_centrality_dict:
if only_roots:
rumor_centrality_dict[root] = rumor_centrality_root(
I, root, return_all_values=False
)
else:
r = rumor_centrality_root(I, root, return_all_values=True)
for node in I.nodes:
if node in r:
rumor_centrality_dict[node] += r[node]
for node in rumor_centrality_dict:
rumor_centrality_dict[node] /= len(I)
if normalize:
max_val = max(rumor_centrality_dict.values())
for node in rumor_centrality_dict:
rumor_centrality_dict[node] /= max_val
if v:
return rumor_centrality_dict[v]
result = SingleSourceResult(
source_type="single-source",
inference_method="rumor centrality",
scores=rumor_centrality_dict,
G=G,
)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/earliest_infection_first.py | xflow/method/cosasi/source_inference/single_source/earliest_infection_first.py | import random
import networkx as nx
import numpy as np
from ...utils import soft_eccentricity
from ..source_results import SingleSourceResult
def earliest_infection_first(I, G, observer_dict):
"""Implements the Earliest Infection First algorithm to score all nodes in I.
This algorithm is useful if some infection timestamp information is available.
Parameters
----------
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
observer_dict : dict
observers dictionary, a la contagion.get_observers()
Notes
-----
This is the greedy algorithm outlined in Section 3 of [1]_. We iterate over rooting
schemes and score each source hypothesis by the cost of their corresponding EIF
spreading tree. In particular, we implement the "cost-based ranking" approach described
in Section 4 of [1]_.
References
----------
.. [1] K. Zhu, Z. Chen, L. Ying,
"Locating Contagion Sources in Networks with Partial Timestamps"
Data Mining and Knowledge Discovery, 2016
https://link.springer.com/article/10.1007/s10618-015-0435-9
"""
if not nx.is_connected(G):
raise ValueError("G must be connected for EIF algorithm.")
observers = observer_dict.copy()
# SI-ify the observers dict
for k in observers:
if isinstance(observers[k], list):
observers[k] = observers[k][0]
mu = _estimate_mu(G, observers)
alpha = [i[0] for i in sorted(observers.items(), key=lambda j: j[1]) if i[0] in I]
scores = {i: np.inf for i in G}
for v in I:
scores[v] = eif_root(
root=v,
I=I,
G=G,
observers=observers,
mu=mu,
alpha=alpha,
only_return_cost=True,
)
result = SingleSourceResult(
source_type="single-source",
inference_method="earliest infection first",
scores=scores,
G=G,
reverse=False,
)
return result
def eif_root(root, I, G, observers, mu, alpha, only_return_cost=True):
"""Computes the cost of a greedy EIF spreading tree whose "patient zero" is root.
Parameters
----------
root : graph index - str, int, etc.
The vertex rooting
I : NetworkX Graph
The infection subgraph observed at a particular time step
G : NetworkX Graph
The original graph the infection process was run on.
I is a subgraph of G induced by infected vertices at observation time.
observers : dict
observers dictionary, a la contagion.get_observers()
mu : float
a constant, estimated by _estimate_mu()
alpha : list
list of vertices in observers dictionary, sorted from earliest to latest timestamp-wise
only_return_cost : bool
if True, only returns the calculated spreading tree's cost
Notes
-----
This is the greedy algorithm outlined in Section 3 of [1]_.
References
----------
.. [1] K. Zhu, Z. Chen, L. Ying,
"Locating Contagion Sources in Networks with Partial Timestamps"
Data Mining and Knowledge Discovery, 2016
https://link.springer.com/article/10.1007/s10618-015-0435-9
"""
timestamps = observers.copy()
if root not in timestamps:
timestamps[root] = min(timestamps.values()) - mu
spreading_tree = nx.Graph()
spreading_tree.add_nodes_from([root])
spreading_tree_cost = 0
for a in alpha:
path = None
path_cost = np.inf
for m in spreading_tree:
# find a modified shortest path (msp) from m to a
surrogate = G.copy()
to_remove = [v for v in alpha if v != a] + [
v for v in spreading_tree if v != m
]
surrogate.remove_nodes_from(to_remove)
try:
msp = nx.shortest_path(surrogate, source=m, target=a)
except:
# no msp exists
continue
# calculate msp's cost
msp_len = len(msp)
msp_cost = msp_len * (
(((timestamps[a] - timestamps[m]) / msp_len) - mu) ** 2
)
# compare cost to existing minimum path cost
if msp_cost < path_cost:
path = msp
path_cost = msp_cost
if isinstance(path, type(None)):
continue
# add path to spreading tree
for i in range(len(path) - 1):
spreading_tree.add_edge(path[i], path[i + 1])
# update observers/timestamps
path_len_iter = 1
path_factor = (timestamps[path[-1]] - timestamps[path[0]]) / len(path)
for g in path:
timestamps[g] = timestamps[path[0]] + (path_len_iter - 1) * path_factor
path_len_iter += 1
# update tree cost
spreading_tree_cost += path_cost
# add remaining nodes
not_in_tree = [v for v in G if v not in spreading_tree]
new_len = len(not_in_tree)
while new_len > 0:
for v in not_in_tree:
breaker = False
for p in G.neighbors(v):
if breaker:
break
if p in spreading_tree:
spreading_tree.add_edge(v, p)
timestamps[v] = (
timestamps[p] + mu
) # cost does not change in this step
breaker = True
old_len = new_len
not_in_tree = [v for v in G if v not in spreading_tree]
new_len = len(not_in_tree)
if new_len == old_len:
break
if only_return_cost:
return spreading_tree_cost
return spreading_tree, spreading_tree_cost, timestamps
def _estimate_mu(G, observers):
"""Estimates the constant mu from the quadratic tree cost function.
Parameters
----------
G : NetworkX Graph
The network for the diffusion process to run on
observers : dict
observers dictionary, a la contagion.get_observers()
Notes
-----
The mu parameter is introduced in Equation 2 of [1]_.
Some very minor details are modified for extensibility throughout cosasi.
For instance, the observers record is a dictionary of time steps, rather than
a list of time stamps. Non-observers are recorded with an infection time of infinity
rather than "*", as described in the paper [1]_.
References
----------
.. [1] K. Zhu, Z. Chen, L. Ying,
"Locating Contagion Sources in Networks with Partial Timestamps"
Data Mining and Knowledge Discovery, 2016
https://link.springer.com/article/10.1007/s10618-015-0435-9
"""
num_val = 0
denom_val = 0
for v in observers:
for w in observers:
if v != w and v != np.inf and w != np.inf:
num_val += abs(observers[v] - observers[w])
denom_val += nx.shortest_path_length(G, source=v, target=w)
return num_val / denom_val
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/tests/test_netsleuth.py | xflow/method/cosasi/source_inference/single_source/tests/test_netsleuth.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import math
import cosasi
class TestNETSLEUTH(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_netsleuth(self):
result = cosasi.source_inference.single_source.netsleuth(self.I, self.G)
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
noninf_vals = [i for i in result.data["scores"].values() if i != -np.inf]
assert len(noninf_vals) == len(self.I)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/tests/test_lisn.py | xflow/method/cosasi/source_inference/single_source/tests/test_lisn.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import cosasi
class TestLISN(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_distance_prob(self):
# for constant distance, probability should weakly increase with time
last_prob = -np.inf
n = random.randint(1, 10)
for t in range(1, 10):
prob = cosasi.source_inference.single_source.distance_prob(t, n, 0.05)
assert prob >= last_prob
last_prob = prob
# for constant time, probability should weakly decrease with distance
last_prob = np.inf
t = random.randint(1, 10)
for n in range(1, 10):
prob = cosasi.source_inference.single_source.distance_prob(t, n, 0.05)
assert prob <= last_prob
last_prob = prob
def test_lisn(self):
result = cosasi.source_inference.single_source.lisn(self.I, self.G, self.t)
# type check
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
# -inf only for nodes outside infection subgraph
vals = list(result.data["scores"].values())
noninf_vals = [i for i in result.data["scores"].values() if i != -np.inf]
assert len(noninf_vals) == len(self.I) and len(vals) == len(self.G)
# scores are log probabilities
assert -np.inf <= max(noninf_vals) <= max(noninf_vals) <= 0
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/tests/test_earliest_infection_first.py | xflow/method/cosasi/source_inference/single_source/tests/test_earliest_infection_first.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import cosasi
class TestEarliestInfectionFirst(TestCase):
def setUp(self):
self.G = nx.fast_gnp_random_graph(100, 0.25)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.01, number_infected=1
)
contagion.forward(50)
self.t = 20
self.I = contagion.get_infected_subgraph(self.t)
self.observers = contagion.get_observers(10)
return None
def test_earliest_infection_first_disconnected(self):
H = nx.disjoint_union(self.G, self.G)
with pytest.raises(ValueError):
cosasi.single_source.earliest_infection_first(
I=self.G, G=H, observer_dict=self.observers
)
def test_earliest_infection_first(self):
result = cosasi.source_inference.single_source.earliest_infection_first(
I=self.I, G=self.G, observer_dict=self.observers
)
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
result_data = result.data["scores"]
assert isinstance(result_data, dict)
for i in result_data.keys():
assert i in self.G.nodes()
assert isinstance(result_data[i], (float, int)) and result_data[i] > 0
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/tests/test_short_fat_tree.py | xflow/method/cosasi/source_inference/single_source/tests/test_short_fat_tree.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import math
import cosasi
class TestShortFatTree(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_weighted_boundary_node_degree(self):
# basic type check
wbnd = cosasi.source_inference.single_source.weighted_boundary_node_degree(
self.I, self.G, random.choice(list(self.I.nodes()))
)
assert isinstance(wbnd, (int, float))
# double-check worked example
G = nx.Graph()
G.add_edges_from(
[
(1, 2),
(2, 5),
(2, 6),
(2, 7),
(1, 4),
(4, 8),
(4, 9),
(4, 10),
(1, 3),
(3, 11),
]
)
I = G.subgraph([1, 2, 3, 4, 5])
wbnd_1 = cosasi.source_inference.single_source.weighted_boundary_node_degree(
I, G, 1, abs(math.log(0.5))
)
assert wbnd_1 == 0
(
wbnd_2,
v_boundary,
) = cosasi.source_inference.single_source.weighted_boundary_node_degree(
I, G, 2, abs(math.log(0.5)), True
)
assert sorted(v_boundary) == [3, 4]
assert wbnd_2 > wbnd_1
def test_short_fat_tree(self):
result = cosasi.source_inference.single_source.short_fat_tree(self.I, self.G)
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
noninf_vals = [i for i in result.data["scores"].values() if i != -np.inf]
assert len(noninf_vals) <= len(
self.I
) # score vals are wbnd, these are checked in test_weighted_boundary_node_degree
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/tests/test_rumor_centrality.py | xflow/method/cosasi/source_inference/single_source/tests/test_rumor_centrality.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import random
import cosasi
class TestRumorCentrality(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_rumor_centrality_root(self):
for _ in range(5):
v = random.choice(list(self.I.nodes()))
result_v = cosasi.single_source.rumor_centrality_root(self.I, v, False)
assert isinstance(result_v, (int, float)) and result_v > 0
result_dict = cosasi.single_source.rumor_centrality_root(self.I, v, True)
for u in result_dict.keys():
assert u in self.I.nodes()
assert result_dict[u] > 0
return None
def test_rumor_centrality(self):
with pytest.raises(ValueError):
cosasi.single_source.rumor_centrality(self.I, self.G, "BAD INPUT")
result = cosasi.single_source.rumor_centrality(
self.I, self.G, None, False, False
)
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
result_data = result.data["scores"]
assert isinstance(result_data, dict)
for i in result_data.keys():
assert i in self.G.nodes()
assert isinstance(result_data[i], (float, int)) and result_data[i] > 0
def test_rumor_centrality_root_example(self):
"""Verifies worked example from Section III.A of [1]_.
References
----------
.. [1] S., Devavrat and T. Zaman,
"Rumors in a network: Who's the culprit?."
IEEE Transactions on Information Theory, 2011
https://devavrat.mit.edu/wp-content/uploads/2017/10/Rumors-in-a-network-whos-the-culprit.pdf
"""
I = nx.Graph()
I.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)])
assert (
cosasi.single_source.rumor_centrality_root(I, 1, False)
== cosasi.single_source.rumor_centrality_root(I, 1, True)[1]
== 8
)
# assert cosasi.single_source.rumor_centrality_root(I, 1, True)[1] == 8
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/tests/__init__.py | xflow/method/cosasi/source_inference/single_source/tests/__init__.py | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false | |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/method/cosasi/source_inference/single_source/tests/test_jordan.py | xflow/method/cosasi/source_inference/single_source/tests/test_jordan.py | import os, sys
sys.path.insert(0, os.getcwd())
from unittest import TestCase
import pytest
import networkx as nx
import numpy as np
import cosasi
class TestJordan(TestCase):
def setUp(self):
self.G = nx.random_tree(n=500, seed=0)
contagion = cosasi.StaticNetworkContagion(
G=self.G, model="si", infection_rate=0.1, number_infected=1
)
contagion.forward(50)
self.t = 25
self.I = contagion.get_infected_subgraph(self.t)
return None
def test_jordan_centrality(self):
result = cosasi.source_inference.single_source.jordan_centrality(self.I, self.G)
# type check
assert isinstance(
result, cosasi.source_inference.source_results.SingleSourceResult
)
# soft eccentricity values should either be -inf or in [0, 1]
noninf_vals = [i for i in result.data["scores"].values() if i != -np.inf]
assert all(0 <= val <= 1 for val in noninf_vals)
# confirm the set of nodes w/ highest score is the infection graph center
center = list(nx.center(self.I))
result_center = [
i
for i in result.data["scores"].keys()
if result.data["scores"][i] == max(result.data["scores"].values())
]
assert sorted(center) == sorted(result_center)
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/diffusion/LT.py | xflow/diffusion/LT.py | import torch_geometric.datasets as ds
import random
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
def LT(g, config, seed, rounds=100):
result = []
for iter in range(rounds):
model_temp = ep.ThresholdModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seed)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
# g_temp[a][b]['weight'] = weight
config_temp.add_edge_configuration('threshold', (a, b), weight)
for i in g.nodes():
threshold = random.randrange(1, 20)
threshold = round(threshold / 100, 2)
config_temp.add_node_configuration("threshold", i, threshold)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
total_no = iterations[4]['node_count'][1]
result.append(total_no)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/diffusion/IC.py | xflow/diffusion/IC.py | import torch_geometric.datasets as ds
import random
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
# diffusion models
def IC(g, config, seed, rounds=100):
result = []
for iter in range(rounds):
model_temp = ep.IndependentCascadesModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seed)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
# g_temp[a][b]['weight'] = weight
config_temp.add_edge_configuration('threshold', (a, b), weight)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
total_no = 0
for j in range(5):
a = iterations[j]['node_count'][1]
total_no += a
result.append(total_no)
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/diffusion/SI.py | xflow/diffusion/SI.py | import torch_geometric.datasets as ds
import random
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
def SI(g, config, seed, rounds=100, beta=0.1):
result = []
for iter in range(rounds):
model_temp = ep.SIModel(g) # _temp
config_temp = mc.Configuration()
config_temp.add_model_initial_configuration('Infected', seed)
config_temp.add_model_parameter('beta', beta)
for a, b in g.edges(): # _temp
weight = config.config["edges"]['threshold'][(a, b)]
config_temp.add_edge_configuration('threshold', (a, b), weight)
model_temp.set_initial_status(config_temp)
iterations = model_temp.iteration_bunch(5)
result.append(iterations[4]['node_count'][1])
return result
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/diffusion/__init__.py | xflow/diffusion/__init__.py | from .SI import SI
from .IC import IC
from .LT import LT
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/llm/graph_generation.py | xflow/llm/graph_generation.py | import networkx as nx
import torch_geometric.datasets as ds
import random
import ndlib
import ndlib.models.epidemics as ep
import ndlib.models.ModelConfig as mc
from torch_geometric.datasets import Planetoid
def connSW(n, beta=None):
g = nx.connected_watts_strogatz_graph(n, 10, 0.1)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
if beta:
weight = beta
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def BA():
g = nx.barabasi_albert_graph(1000, 5)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
g[a][b]['weight'] = weight
config.add_edge_configuration("threshold", (a, b), weight)
return g, config
def ER():
g = nx.erdos_renyi_graph(5000, 0.002)
while nx.is_connected(g) == False:
g = nx.erdos_renyi_graph(5000, 0.002)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def CiteSeer():
dataset = Planetoid(root='./Planetoid', name='CiteSeer') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def PubMed():
dataset = Planetoid(root='./Planetoid', name='PubMed') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def Cora():
dataset = Planetoid(root='./Planetoid', name='Cora') # Cora, CiteSeer, PubMed
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
c = max(nx.connected_components(G), key=len)
g = G.subgraph(c).copy()
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(40,80)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def photo():
dataset = ds.Amazon(root='./geo', name = 'Photo')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
g = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(5,20)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config
def coms():
dataset = ds.Amazon(root='./geo', name = 'Computers')
data = dataset[0]
edges = (data.edge_index.numpy()).T.tolist()
G = nx.from_edgelist(edges)
g = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
config = mc.Configuration()
for a, b in g.edges():
weight = random.randrange(5,20)
weight = round(weight / 100, 2)
config.add_edge_configuration("threshold", (a, b), weight)
g[a][b]['weight'] = weight
return g, config | python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
XGraph-Team/XFlow | https://github.com/XGraph-Team/XFlow/blob/1efc1844d3940fb726324c2a72c5f4325690910a/xflow/llm/test.py | xflow/llm/test.py | import networkx as nx
import ndlib.models.ModelConfig as mc
import ndlib.models.epidemics as ep
from time import time
from graph_generation import Cora, CiteSeer, PubMed, connSW, ER, coms, photo
import matplotlib.pyplot as plt
#
size = 50
beta = 0.1
gamma = 0.01
G, config = connSW(size, beta)
# Model selection
model = ep.SIRModel(G)
# Model Configuration
config = mc.Configuration()
config.add_model_parameter('beta', beta) # The infection rate
config.add_model_parameter('gamma', gamma) # The recovery rate
config.add_model_parameter("fraction_infected", 0.1) # Initial fraction of infected nodes
model.set_initial_status(config)
# Simulation execution for 10 steps
iterations = model.iteration_bunch(10)
print(iterations)
# Update the graph with the status from the last iteration
for i, node_status in model.status.items():
G.nodes[i]['status'] = node_status
# After the simulation, set up the colors for the nodes
status_colors = {0: 'green', # Susceptible
1: 'red', # Infected
2: 'blue'} # Recovered
colors = [status_colors[node[1]['status']] for node in G.nodes(data=True)]
# Draw the graph and save to a file
pos = nx.spring_layout(G) # Compute layout for visualizing the graph
nx.draw(G, pos, node_color=colors, with_labels=False, node_size=20)
# Save the plot as a file
plt.savefig('graph_infected_state.png', format='PNG')
# Close the plot to release memory
plt.close()
| python | MIT | 1efc1844d3940fb726324c2a72c5f4325690910a | 2026-01-05T07:14:40.788228Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.