repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
DRT
|
DRT-master/caffe/scripts/copy_notebook.py
|
#!/usr/bin/env python
"""
Takes as arguments:
1. the path to a JSON file (such as an IPython notebook).
2. the path to output file
If 'metadata' dict in the JSON file contains 'include_in_docs': true,
then copies the file to output file, appending the 'metadata' property
as YAML front-matter, adding the field 'category' with value 'notebook'.
"""
import os
import sys
import json
filename = sys.argv[1]
output_filename = sys.argv[2]
content = json.load(open(filename))
if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']:
yaml_frontmatter = ['---']
for key, val in content['metadata'].iteritems():
if key == 'example_name':
key = 'title'
if val == '':
val = os.path.basename(filename)
yaml_frontmatter.append('{}: {}'.format(key, val))
yaml_frontmatter += ['category: notebook']
yaml_frontmatter += ['original_path: ' + filename]
with open(output_filename, 'w') as fo:
fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n')
fo.write(open(filename).read())
| 1,089
| 32.030303
| 87
|
py
|
DRT
|
DRT-master/caffe/data/coco/make_trainval.py
|
#!/usr/bin/env python
# This file is only meant to be run as a script with 0 arguments,
# and depends on steps 1-3 of README.md.
#
# It creates a "trainval" set by combining the COCO 2014 train and val sets.
# The trainval set is intended for use only when training a single final model
# for submission of results on the test set to the COCO evaluation server.
import os
import json
# get path to directory where this script is
script_dir = os.path.dirname(os.path.realpath(__file__))
anno_dir_path = '%s/coco/annotations' % script_dir
image_root = '%s/coco/images' % script_dir
abs_image_root = os.path.abspath(image_root)
out_coco_id_filename = '%s/coco2014_cocoid.trainval.txt' % script_dir
filename_pattern = 'captions_%s2014.json'
in_sets = ['train', 'val']
out_set = 'trainval'
path_pattern = '%s/%s' % (anno_dir_path, filename_pattern)
out_data = {}
for in_set in in_sets:
filename = path_pattern % in_set
print 'Loading input dataset from: %s' % filename
data = json.load(open(filename, 'r'))
for key, val in data.iteritems():
if type(val) == list:
if key not in out_data:
out_data[key] = []
out_data[key] += val
else:
if key not in out_data:
out_data[key] = val
assert out_data[key] == val
filename = path_pattern % out_set
print 'Dumping output dataset to: %s' % filename
json.dump(out_data, open(filename, 'w'))
out_ids = [str(im['id']) for im in out_data['images']]
print 'Writing COCO IDs to: %s' % out_coco_id_filename
with open(out_coco_id_filename, 'w') as coco_id_file:
coco_id_file.write('\n'.join(out_ids) + '\n')
# make a trainval dir with symlinks to all train+val images
out_dir = '%s/%s2014' % (image_root, out_set)
os.makedirs(out_dir)
print 'Writing image symlinks to: %s' % out_dir
for im in out_data['images']:
filename = im['file_name']
set_name = None
for in_set in in_sets:
if in_set in filename:
set_name = in_set
break
assert set_name is not None
real_path = '%s/%s2014/%s' % (abs_image_root, set_name, filename)
link_path = '%s/%s' % (out_dir, filename)
os.symlink(real_path, link_path)
| 2,204
| 34
| 78
|
py
|
DRT
|
DRT-master/caffe/data/coco/make_test.py
|
#!/usr/bin/env python
# This file is only meant to be run as a script with 0 arguments,
# and depends on steps 1-3 of README.md.
#
# It creates a test set from the image filenames of the test set.
import json
import os
import re
# get path to directory where this script is
script_dir = os.path.dirname(os.path.realpath(__file__))
set_name = 'test2014'
image_root = '%s/coco/images/%s' % (script_dir, set_name)
out_filename = '%s/coco/annotations/captions_%s.json' % (script_dir, set_name)
image_ext = 'jpg'
imname_re = re.compile('COCO_%s_(?P<image_id>\d+)\.%s' % (set_name, image_ext))
full_image_ext = '.%s' % image_ext
image_filenames = filter(lambda f: f.endswith(full_image_ext), os.listdir(image_root))
print 'Creating dummy annotation file for %d images at: %s' % \
(len(image_filenames), out_filename)
out_data = {'type': 'captions', 'images': [], 'annotations': [],
'licenses': [], 'info': {}}
for index, filename in enumerate(image_filenames):
match = imname_re.match(filename)
if match is None: raise Exception('Unsupported filename: %s' % filename)
image_id = int(match.group('image_id'))
out_data['images'].append({'file_name': filename, 'id': image_id})
for dummy_index in range(2):
annotation = {'caption': 'dummy caption %d' % dummy_index,
'id': index, 'image_id': image_id}
out_data['annotations'].append(annotation)
with open(out_filename, 'w') as out_file:
json.dump(out_data, out_file)
| 1,490
| 38.236842
| 86
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/evaluation/eval_glossary.py
|
import sys
from eval_acronym import read_manual_acrn_eval
from eval_concept import read_concept_answer
from tqdm import tqdm
sys.path.append(".")
from evaluation import utils
import jsonlines
import os
import matplotlib.pyplot as plt
from matplotlib_venn import *
def read_glossary(file_path):
cpt_set = set()
with jsonlines.open(file_path) as fin:
for item in fin:
cpt = item.get("concepts", item.get("short", None))
if cpt is not None:
cpt_set.add(cpt)
return cpt_set
def find_concpet_in_art(art_cpts, target_cpt):
res = set()
for tcpt in target_cpt:
if tcpt in art_cpts:
res.add(tcpt)
else:
for cpt in art_cpts:
if utils.find_concept_in_text(cpt, tcpt) or utils.find_concept_in_text(
tcpt, cpt
):
res.add(tcpt)
return res
def run(proj_name):
data_dir = os.path.join("./data/projects", proj_name)
s_art, t_art, link_dict, cpts_set = utils.read_project(data_dir)
info = dict()
for d in ["bot_up", "top_down"]:
out_dir = os.path.join("./output", proj_name, d)
eval_dir = os.path.join("./evaluation", proj_name, d)
def_index = utils.read_definition(out_dir, file_name="definition_sel.jsonl")
def_ans = read_concept_answer(eval_dir, "def_manual_eval.txt")
def_index = set([x for x in def_index if x in def_ans])
acrn_index = utils.read_acronym(out_dir, file_name="acronym_sel.jsonl")
acrn_ans = read_manual_acrn_eval(eval_dir)
acrn_index = set([x for x in acrn_index if x in acrn_ans])
ctx_index = utils.read_context(out_dir, file_name="context_sel.jsonl")
ctx_ans = read_concept_answer(eval_dir, "ctx_manual_eval.txt")
ctx_index = set([x for x in ctx_index if x in ctx_ans])
d_cpts = set()
d_cpts.update(acrn_index)
d_cpts.update(ctx_index)
d_cpts.update(def_index)
info[d] = d_cpts
gls_cpts = set()
gls_cpts.update(read_glossary(os.path.join(data_dir, "glossary_acronym.jsonl")))
gls_cpts.update(read_glossary(os.path.join(data_dir, "glossary_definition.jsonl")))
# gen numbers
gls_in_art = find_concpet_in_art(cpts_set, gls_cpts)
top_down = find_concpet_in_art(cpts_set, info["top_down"])
bot_up = find_concpet_in_art(cpts_set, info["bot_up"])
venn_data = {}
venn_data["111"] = gls_in_art & top_down & bot_up
venn_data["110"] = (top_down & bot_up) - venn_data["111"]
venn_data["101"] = (top_down & gls_in_art) - venn_data["111"]
venn_data["011"] = (bot_up & gls_in_art) - venn_data["111"]
venn_data["100"] = top_down - bot_up - gls_in_art
venn_data["010"] = bot_up - top_down - gls_in_art
venn_data["001"] = gls_in_art - top_down - bot_up
res = dict()
for k in venn_data:
res[k] = len(venn_data[k])
draw_venn3(res, proj_name)
def draw_venn3(res, proj_name):
out = venn3(
subsets=res, set_labels=("Top-down", "Bottom-up", "Glossary"), alpha=0.5
)
venn3_circles(subsets=res, linestyle="solid")
for text in out.set_labels:
text.set_fontsize(16)
for i in range(len(out.subset_labels)):
if out.subset_labels[i] is not None:
text.set_fontsize(16)
plt.savefig(f"./figures/venn3_{proj_name}.png")
plt.clf()
if __name__ == "__main__":
for proj_name in tqdm(["CCHIT", "CM1", "PTC"]):
res = run(proj_name)
| 3,495
| 33.613861
| 87
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/evaluation/eval_acronym.py
|
from collections import defaultdict
from typing_extensions import get_args
import jsonlines
import os
from pattmatch import kmp
import sys
from tqdm import tqdm
sys.path.append(".")
sys.path.append("..")
from domain_data_collection.relation_graph import RelationGraph
import utils, argparse
def acronym_as_explain(acrns, content, acrn_index):
res = set()
for short in acrns:
for long in acrn_index[short]:
if utils.find_concept_in_text(content, long):
res.add((short, long))
return res
def read_manual_acrn_eval(dir_path, file_name="acronym_manual_eval.csv"):
file_path = os.path.join(dir_path, file_name)
answer = set()
with open(file_path) as fin:
for line in fin:
items = line.split(",")
acrn, label = items[0], items[1]
if "1" in label:
answer.add(acrn)
return answer
# Total acronyms
# How many acronyms appeared in the artifacts
# Acronym ambiguity distribution
# Acronym as explaination absolute value
def evaluate_acronym(proj_dir, acrn_index, eval_res_dir, answer):
if not os.path.isdir(eval_res_dir):
os.makedirs(eval_res_dir)
s_art, t_art, link_dict, cpts_set = utils.read_project(proj_dir)
art_acrn = dict()
art_acrn_gt = defaultdict(set)
concept_dict = utils.read_concept_dict(proj_dir)
for sid in tqdm(s_art, desc="scan source artifact"):
art_acrn[sid] = utils.find_acronym_in_text(
text=s_art[sid], acrn_index=acrn_index
)
for tid in tqdm(t_art, desc="scan target artifact"):
art_acrn[tid] = utils.find_acronym_in_text(
text=t_art[tid], acrn_index=acrn_index
)
for id in concept_dict:
cpts = concept_dict[id]
for cpt in cpts:
if cpt.isupper():
art_acrn_gt[id].add(cpt)
has_acrn_cnt = 0
acronym_in_art = set()
for id in art_acrn_gt:
if len(art_acrn_gt[id]) > 0:
has_acrn_cnt += 1
acronym_in_art.update(art_acrn_gt[id])
acrn_has_long_names = acronym_in_art & acrn_index.keys()
acrn_long_name_file = os.path.join(eval_res_dir, "acrn_in_art_has_long_name.txt")
true_acrn, false_acrn = 0, 0
with open(acrn_long_name_file, "w") as fout:
for a in acrn_has_long_names:
fout.write(f"{a}:{acrn_index[a]}\n")
if a in answer:
true_acrn += 1
else:
false_acrn += 1
stat = {
"how many acronym are extracted from corpus": len(acrn_index),
"how mnay acronym are detected in artifacts": len(acronym_in_art),
"How many acronym detected in artifact have long names": len(
acronym_in_art.intersection(acrn_index.keys())
),
"how many artifacts": len(s_art) + len(t_art),
"how many acronyms find in artifacts are correct":true_acrn,
"how many acronyms find in artifacts are incorrect": false_acrn,
}
utils.write_dict(stat, os.path.join(e, "acronym_stat.txt"))
return acronym_in_art
def eval_overlap(td, bu, acronym_in_art, out_file):
td_unique = (set(td.keys()) - set(bu.keys())) & acronym_in_art
bu_unique = (set(bu.keys()) - set(td.keys())) & acronym_in_art
common = set(td.keys()) & set(bu.keys()) & acronym_in_art
with open(out_file, "w") as fout:
fout.write(f"top_down unique:{len(td_unique)}\n")
fout.write(f"bot_up unique:{len(bu_unique)}\n")
fout.write(f"common: {len(common)}\n")
if __name__ == "__main__":
proj_dir, out_dir, eval_dir = utils.get_args()
top_down_acrn_index = utils.read_acronym(
os.path.join(out_dir, "top_down"), file_name="acronym_sel.jsonl"
)
top_down_acrn_answer = read_manual_acrn_eval(os.path.join(eval_dir, "top_down"))
bot_up_acrn_index = utils.read_acronym(
os.path.join(out_dir, "bot_up"), file_name="acronym_sel.jsonl"
)
bot_up_acrn_answer = read_manual_acrn_eval(os.path.join(eval_dir, "bot_up"))
both_acrn_index = dict()
both_acrn_index.update(top_down_acrn_index)
both_acrn_index.update(bot_up_acrn_index)
both_acrn_answer = set()
both_acrn_answer.update(top_down_acrn_answer)
both_acrn_answer.update(bot_up_acrn_answer)
eval_out = [os.path.join(eval_dir, x) for x in ["both", "top_down", "bot_up"]]
arcn_list = [both_acrn_index, top_down_acrn_index, bot_up_acrn_index]
answers = [both_acrn_answer, top_down_acrn_answer, bot_up_acrn_answer]
for e, a, ans in zip(eval_out, arcn_list, answers):
acronym_in_art = evaluate_acronym(proj_dir, a, e, ans)
if e.endswith("both"):
overlap_file = os.path.join(eval_dir, "both", "acrn_overlap.txt")
eval_overlap(
top_down_acrn_index, bot_up_acrn_index, acronym_in_art, overlap_file
)
| 4,850
| 34.933333
| 85
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/evaluation/utils.py
|
import argparse
from jsonlines import jsonlines
import pandas as pd
import os
from collections import defaultdict
import random
from pattmatch import kmp
import pandas as pd
def read_project(dir_path):
sarts = pd.read_csv(os.path.join(dir_path, "source_artifacts.csv"))
tarts = pd.read_csv(os.path.join(dir_path, "target_artifacts.csv"))
lks = pd.read_csv(os.path.join(dir_path, "links.csv"))
# annotated part
tokens = pd.read_csv(os.path.join(dir_path, "tokens.csv"))
token_dict = {}
concept_set = set()
with open(os.path.join(dir_path, "reduced_concepts_flat.txt")) as fin:
for c in fin:
concept_set.add(c.strip("\n\t\r "))
for _, row in tokens.iterrows():
token_dict[str(row["id"])] = eval(row["tokens"])
s_art, t_art, link_dict = dict(), dict(), defaultdict(set)
for _, row in sarts.iterrows():
id = str(row["id"])
if row["arts"] == row["arts"]:
s_art[id] = " ".join(token_dict[id])
for _, row in tarts.iterrows():
id = str(row["id"])
if row["arts"] == row["arts"]:
t_art[id] = " ".join(token_dict[id])
for _, row in lks.iterrows():
sid, tid = str(row["sid"]), str(row["tid"])
if sid in s_art and tid in t_art:
link_dict[sid].add(tid)
return s_art, t_art, link_dict, concept_set
def read_acronym(dir_path, file_name="acronym.jsonl"):
acr_file = os.path.join(dir_path, file_name)
acr_index = dict()
with jsonlines.open(acr_file) as fin:
for obj in fin:
if obj["short"].islower():
continue
options = set()
for long_arc in obj["long"]:
if long_arc.lower() not in options:
acr_index[obj["short"]] = set()
options.add(long_arc)
acr_index[obj["short"]].add(long_arc)
return acr_index
def read_definition(dir_path, file_name="definition.jsonl"):
defs = dict()
def_file = os.path.join(dir_path, file_name)
with jsonlines.open(def_file) as fin:
for o in fin:
defs[o["concept"]] = o["definition"]
return defs
def read_concept_dict(dir_path):
concept_dict = dict()
cpts = pd.read_csv(os.path.join(dir_path, "concepts.csv"))
for _, row in cpts.iterrows():
concept_dict[row["ids"]] = eval(row["phrase"])
return concept_dict
def read_context(dir_path, file_name="context.jsonl"):
context = dict()
ctx_file = os.path.join(dir_path, file_name)
with jsonlines.open(ctx_file) as fin:
for o in fin:
context[o["concept"]] = o["context"]
return context
def read_relation(dir_path, rel_type="clear"):
rels = defaultdict(defaultdict)
rel_path = os.path.join(dir_path, f"{rel_type}_relation.jsonl")
with jsonlines.open(rel_path) as fin:
for o in fin:
l, v, r = o["left"], o["verb"], o["right"]
rels[l][r] = v
rels[r][l] = v
return rels
def read_corpus(dir_path):
res = dict()
bot_up_file = os.path.join(dir_path, "bot_up_corpus.jsonl")
top_down_file = os.path.join(dir_path, "top_down_corpus.jsonl")
if os.path.isfile(bot_up_file):
with jsonlines.open(bot_up_file) as fin:
for o in fin:
res[o["query"]] = o["sentences"]
if os.path.isfile(top_down_file):
with jsonlines.open(top_down_file) as fin:
for o in fin:
res[o["query"]] = o["sentences"]
return res
def find_concept_in_text(text, concept):
lw_cpt_tokens = concept.lower().split()
lw_text_tokens = text.lower().split()
return len(kmp(lw_text_tokens, lw_cpt_tokens))
def find_acronym_in_text(text, acrn_index):
res = set()
text_tokens = text.split()
for short in acrn_index:
acrn_tokens = short.split()
if len(kmp(text_tokens, acrn_tokens)):
res.add(short)
return res
def write_dict(dict_res, file_path):
with open(file_path, "w") as fout:
for k, v in dict_res.items():
fout.write(f"{k}:{v}\n")
def sample_dict(d, file_path, k=20, col1="key", col2="value"):
k = min(k, len(d.items()))
selected = random.sample(list(d.items()), k)
df = pd.DataFrame()
for k, v in selected:
df = df.append({col1: k, col2: v}, ignore_index=True)
df.to_csv(file_path)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--proj_name", default="CCHIT")
parser.add_argument("--data_dir", default="./data")
parser.add_argument("--output_dir", default="./output")
parser.add_argument("--eval_dir", default="./evaluation")
args = parser.parse_args()
proj_dir = os.path.join(args.data_dir, "projects", args.proj_name)
out_dir = os.path.join(args.output_dir, args.proj_name)
eval_dir = os.path.join(args.eval_dir, args.proj_name)
return proj_dir, out_dir, eval_dir
| 4,932
| 30.621795
| 74
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/evaluation/eval_.py
|
import jsonlines
import pandas as pd
import os
from collections import defaultdict, Counter
from pattmatch import kmp
from tqdm import tqdm
from domain_data_collection.relation_graph import RelationGraph
import utils
def acronym_as_explain(s_acrns, t_acrns, acrn_index):
res = set()
t_longs = [x.lower() for x in t_acrns["long"]]
for short in s_acrns["short"]:
long = acrn_index[short].lower()
if long in t_longs:
res.add((short, long))
return res
# Total acronyms
# How many acronyms appeared in the artifacts
# Acronym ambiguity distribution
# Acronym as explaination absolute value
def evaluate_acronym(proj_dir, output_dir, eval_res_dir="./eval_acrn"):
if not os.path.isdir(eval_res_dir):
os.makedirs(eval_res_dir)
acrn_exp_file = os.path.join(eval_res_dir, "acrn_explain_file.jsonl")
s_art, t_art, link_dict, concept_dict = utils.read_project(proj_dir)
acrn_index = utils.read_acronym(output_dir)
art_acrn = dict()
for sid in s_art:
art_acrn[sid] = utils.find_acronym_in_text(
text=s_art[sid], acrn_index=acrn_index
)
for tid in t_art:
art_acrn[tid] = utils.find_acronym_in_text(
text=t_art[tid], acrn_index=acrn_index
)
exp_res = []
for sid in link_dict:
s_acrns = art_acrn[sid]
for tid in link_dict[sid]:
t_acrns = art_acrn[tid]
explain = acronym_as_explain(s_acrns, t_acrns, acrn_index)
explain.update(acronym_as_explain(t_acrns, s_acrns, acrn_index))
if len(explain) > 0:
exp_res.append({"sid": sid, "tid": tid, "explains": list(explain)})
with jsonlines.open(acrn_exp_file, "w") as fout:
for o in exp_res:
fout.write(o)
return {
"acronym_num": len(acrn_index), # Total acronyms
}
def eval_definitions(sarts, tarts, links, kg_dir):
def find_definition_in_arts(arts, defs, concepts):
d_cnt, c_cnt = 0, 0
for id in tqdm(arts, desc="definition eval"):
content = arts[id]
lw_tks = [x.strip("().,") for x in content.lower().split()]
has_def, has_cpt = False, False
for d in defs:
if len(kmp(lw_tks, d.lower().split())) > 0:
has_def = True
break
for c in concepts:
if len(kmp(lw_tks, c.lower().split())) > 0:
has_cpt = True
break
d_cnt += 1 if has_def else 0
c_cnt += 1 if has_cpt else 0
return d_cnt / len(arts), c_cnt / len(arts)
def_eval_res = dict()
defs, concepts = set(), set()
def_file = os.path.join(kg_dir, "definition.jsonl")
cpt_file = os.path.join(kg_dir, "concept.jsonl")
with jsonlines.open(def_file) as fin:
for o in fin:
defs.add(o["concept"].lower())
with jsonlines.open(cpt_file) as fin:
for o in fin:
concepts.add(o["concept"].lower())
(
def_eval_res["sart_contain_def_ratio"],
def_eval_res["sart_contain_concept_ratio"],
) = find_definition_in_arts(sarts, defs, concepts)
(
def_eval_res["tart_contain_def_ratio"],
def_eval_res["tart_contain_concept_ratio"],
) = find_definition_in_arts(tarts, defs, concepts)
def_eval_res["concpet_has_definition"] = len(concepts.intersection(defs)) / len(
concepts
)
return def_eval_res
def eval_clear_relation(sarts, tarts, links, concepts, kg_dir):
general_concepts = {"information", "system", "ability", "results", "data", "time"}
rel_graph = RelationGraph()
rel_graph.load(kg_dir, link_file="clear_relation.jsonl")
clear_rel_eval_res = dict()
cpt_related_explain = Counter()
debug_links = []
for sid in tqdm(sarts, desc="process links"):
for tid in tarts:
if tid in links[sid]:
label = True
else:
label = False
has_related_concept = False
for scpt in concepts[sid]:
if scpt in general_concepts:
continue
for tcpt in concepts[tid]:
if tcpt in general_concepts:
continue
if rel_graph.g.is_reachable(scpt, tcpt) or rel_graph.g.is_reachable(
tcpt, scpt
):
has_related_concept = True
debug_links.append(
{"label": label, "s_concept": scpt, "t_concept": tcpt}
)
break
if has_related_concept:
cpt_related_explain[label] += 1
with jsonlines.open("debug_clear_cpt_relation", "w") as fout:
for o in debug_links:
fout.write(o)
clear_rel_eval_res["true_link_with_related_concepts"] = cpt_related_explain[True]
clear_rel_eval_res["false_link_with_related_concepts"] = cpt_related_explain[False]
return clear_rel_eval_res
def eval_vague_relation(sarts, tarts, links, concepts, kg_dir):
general_concepts = {"information", "system", "ability", "results", "data", "time"}
vague_relation = os.path.join(kg_dir, "vague_relation.jsonl")
rel_set = set()
debug_links = []
with jsonlines.open(vague_relation) as fin:
for o in fin:
left, right = o["left"].lower(), o["right"].lower()
rel_set.add((left, right))
clear_rel_eval_res = dict()
cpt_related_explain = Counter()
for sid in tqdm(sarts, desc="process links"):
for tid in tarts:
if tid in links[sid]:
label = True
else:
label = False
has_related_concept = False
for scpt in concepts[sid]:
if scpt in general_concepts:
continue
for tcpt in concepts[tid]:
if tcpt in general_concepts:
continue
if (scpt, tcpt) in rel_set or (tcpt, scpt) in rel_set:
has_related_concept = True
debug_links.append(
{"label": label, "s_concept": scpt, "t_concept": tcpt}
)
break
if has_related_concept:
cpt_related_explain[label] += 1
with jsonlines.open("debug_vague_cpt_relation", "w") as fout:
for o in debug_links:
fout.write(o)
clear_rel_eval_res["true_link_with_related_concepts"] = cpt_related_explain[True]
clear_rel_eval_res["false_link_with_related_concepts"] = cpt_related_explain[False]
return clear_rel_eval_res
if __name__ == "__main__":
dir_path = "../data/projects/CCHIT"
kg_dir = "../data/backup/www_BU/data/CCHIT/bup_res"
eval_output_dir = "../data/backup/www_BU/eval_res"
summary_file = os.path.join(eval_output_dir, "summary.jsonl")
if not os.path.isdir(eval_output_dir):
os.makedirs(eval_output_dir)
s_art, t_art, links, concepts, tokens = read_project(dir_path)
report = {}
dataset_info = {
"source #": len(s_art),
"target #": len(t_art),
"link #": len([y for x in links for y in links[x]]),
}
report["dataset_info"] = dataset_info
print(dataset_info)
acr_eval_res = evaluate_acronym(s_art, t_art, links, kg_dir)
report["acronym_eval"] = acr_eval_res
def_eval_res = eval_definitions(s_art, t_art, links, kg_dir)
report["definition_eval"] = def_eval_res
clear_rel_res = eval_clear_relation(s_art, t_art, links, concepts, kg_dir)
report["clear_relation_eval"] = clear_rel_res
print(clear_rel_res)
vague_rel_res = eval_vague_relation(s_art, t_art, links, concepts, kg_dir)
report["vague_relation_eval"] = vague_rel_res
print(vague_rel_res)
with jsonlines.open(summary_file, "w") as fout:
fout.write(report)
| 7,987
| 36.327103
| 88
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/evaluation/eval_concept.py
|
# how many concept are detected in artifacts
# how many concept have definitions
# how many concept have context
# [manual/random sample] how is the quality of the concept defintion/context
import utils
import os
import json
def evaluate_concept(proj_dir, def_index, ctx_index, eval_dir, def_ans, ctx_ans):
if not os.path.isdir(eval_dir):
os.makedirs(eval_dir)
s_art, t_art, link_dict, cpts_set = utils.read_project(proj_dir)
def_cnt = 0
cpt_has_def, cpt_has_ctx = set(), set()
for d in def_index:
if d in cpts_set:
def_cnt += 1
cpt_has_def.add(d)
else:
for cpt in cpts_set:
if utils.find_concept_in_text(cpt, d):
def_cnt += 1
cpt_has_def.add(d)
break
ctx_cnt = 0
for c in ctx_index:
if c in cpts_set:
ctx_cnt += 1
cpt_has_ctx.add(c)
else:
for cpt in cpts_set:
if utils.find_concept_in_text(cpt, c):
ctx_cnt += 1
cpt_has_ctx.add(c)
break
file_has_cpt = 0
arts_has_rich_cpt = set()
for id in s_art:
for c in cpts_set:
if c in s_art[id]:
file_has_cpt += 1
if c in def_index or c in ctx_index:
arts_has_rich_cpt.add(id)
for id in t_art:
for c in cpts_set:
if c in t_art[id]:
file_has_cpt += 1
if c in def_index or c in ctx_index:
arts_has_rich_cpt.add(id)
link_total = 0
link_has_rich_cpt = 0
for sid in link_dict:
for tid in link_dict[sid]:
link_total += 1
if sid in arts_has_rich_cpt or tid in arts_has_rich_cpt:
link_has_rich_cpt += 1
utils.sample_dict(def_index, os.path.join(eval_dir, "definition_sample.csv"))
utils.sample_dict(ctx_index, os.path.join(eval_dir, "context_sample.csv"))
correct_def = len(cpt_has_def & def_ans)
incorrect_def = def_cnt - correct_def
correct_ctx = len(cpt_has_ctx & ctx_ans)
incorrect_ctx = ctx_cnt - correct_ctx
stat = {
"how many concepts are detected in artifacts": len(cpts_set),
"how many concepts have definition": def_cnt,
"how many concepts have correct def": correct_def,
"how many concepts have incorrect def": incorrect_def,
"how many concepts have correct ctx": correct_ctx,
"how many concepts have incorrect ctx": incorrect_ctx,
"how many concepts have context": ctx_cnt,
"how many artifacts contains rich-concept (the concept have either definition or context)": f"{len(arts_has_rich_cpt)}/{file_has_cpt}",
"how many links contains rich-concept explaination": f"{link_has_rich_cpt}/{link_total}",
}
utils.write_dict(stat, os.path.join(eval_dir, "concept_stat.txt"))
def read_concept_answer(dir, file_name):
concept_ctx_file = os.path.join(dir, file_name)
answers = set()
with open(concept_ctx_file) as fin:
for line in fin:
items = line.split("\t")
concept = json.loads(items[0])["concept"]
if "1" in items[1]:
answers.add(concept)
return answers
if __name__ == "__main__":
proj_dir, out_dir, eval_dir = utils.get_args()
def_ans_file_name = "def_manual_eval.txt"
ctx_ans_file_name = "ctx_manual_eval.txt"
top_down_res_dir = os.path.join(eval_dir, "top_down")
bot_up_res_dir = os.path.join(eval_dir, "bot_up")
top_down_dir = os.path.join(out_dir, "top_down")
top_down_def_index = utils.read_definition(
top_down_dir, file_name="definition_sel.jsonl"
)
td_def_ans_index = read_concept_answer(top_down_res_dir, def_ans_file_name)
td_ctx_ans_index = read_concept_answer(top_down_res_dir, ctx_ans_file_name)
top_down_ctx_index = utils.read_context(top_down_dir, file_name="context_sel.jsonl")
bot_up_dir = os.path.join(out_dir, "bot_up")
bot_up_def_index = utils.read_definition(
bot_up_dir, file_name="definition_sel.jsonl"
)
bu_def_ans_index = read_concept_answer(bot_up_res_dir, def_ans_file_name)
bu_ctx_ans_index = read_concept_answer(bot_up_res_dir, ctx_ans_file_name)
bot_up_ctx_index = utils.read_context(bot_up_dir, file_name="context_sel.jsonl")
both_def_index = dict()
both_def_index.update(top_down_def_index)
both_def_index.update(bot_up_def_index)
both_ctx_index = dict()
both_ctx_index.update(top_down_ctx_index)
both_ctx_index.update(bot_up_ctx_index)
both_def_ans, both_ctx_ans = set(), set()
both_def_ans.update(td_def_ans_index)
both_def_ans.update(bu_def_ans_index)
both_ctx_ans.update(td_ctx_ans_index)
both_ctx_ans.update(bu_ctx_ans_index)
eval_out = [os.path.join(eval_dir, x) for x in ["both", "top_down", "bot_up"]]
def_list = [both_def_index, top_down_def_index, bot_up_def_index]
def_answer = [both_def_ans, td_def_ans_index, bu_def_ans_index]
ctx_answer = [both_ctx_ans, td_ctx_ans_index, bu_ctx_ans_index]
ctx_list = [both_ctx_index, top_down_ctx_index, bot_up_ctx_index]
for e, d, c, da, ca in zip(eval_out, def_list, ctx_list, def_answer, ctx_answer):
print(e)
r = evaluate_concept(proj_dir, d, c, e, da, ca)
| 5,369
| 36.816901
| 143
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/evaluation/eval_relation.py
|
# how many relations are extracted
# how many links can be explained with the relations? How many are one hop and how many are two hop
# [manual/random sample] how is the quality of the links
import sys
from eval_glossary import find_concpet_in_art
sys.path.append(".")
sys.path.append("..")
from scripts.case_study.case_generator import gen_concpet_relation
from scripts.annotate_link.trace_link_annotation import process_path
from domain_data_collection.relation_graph import RelationGraph
from tqdm import tqdm
import utils
import os
def get_cpts_for_text(text, cpts_set):
res = set()
for cpt in cpts_set:
if "SHALL" in cpt:
continue
if utils.find_concept_in_text(text, cpt):
res.add(cpt)
return res
def get_sent_with_concept(sent_list, cpt):
res = set()
for sent in sent_list:
if utils.find_concept_in_text(sent, cpt):
res.add(sent)
return res
def find_match_for_cpt(tcpt, cpts_set):
res = set()
for cpt in cpts_set:
if utils.find_concept_in_text(tcpt, cpt):
res.add(tcpt)
return res
def evaluate_relation(proj_dir, knowledge_dir, eval_dir):
if not os.path.isdir(eval_dir):
os.makedirs(eval_dir)
s_art, t_art, link_dict, cpts_set = utils.read_project(proj_dir)
simple_rel = 0
clear_rels = utils.read_relation(knowledge_dir)
vague_rels = utils.read_relation(knowledge_dir, rel_type="vague")
acrn_index = utils.read_acronym(knowledge_dir)
sel_acrn = find_concpet_in_art(cpts_set, acrn_index)
for short in acrn_index.keys():
if short in sel_acrn:
for long in acrn_index[short]:
clear_rels[short][long] = "acronym"
rel_graph = RelationGraph()
for l in clear_rels:
for r in clear_rels[l]:
rel_graph.add_relation((l, clear_rels[l][r], r))
for l in vague_rels:
for r in clear_rels[l]:
rel_graph.add_relation((l, clear_rels[l][r], r))
rel_set = dict()
simple_rel = dict()
clink = []
for sid in tqdm(link_dict):
for tid in link_dict[sid]:
scpts = get_cpts_for_text(s_art[sid], cpts_set)
tcpts = get_cpts_for_text(t_art[tid], cpts_set)
clink.extend(gen_concpet_relation(rel_graph, scpts, tcpts))
for p in clink:
ps, pt = p["source"], p["target"]
if p["type"] == "simple":
simple_rel[ps, pt] = p
else:
rel_set[ps, pt] = p
with open(os.path.join(eval_dir, "simple_links.txt"), "w") as fout:
for p in simple_rel:
fout.write(f"{simple_rel[p]}\n")
with open(os.path.join(eval_dir, "relation_explain.txt"), "w") as fout:
for p in rel_set:
fout.write(f"{rel_set[p]}\n")
return {
"simple_rel": len(simple_rel),
"How many unique clear relationship in trace links": len(rel_set),
}
if __name__ == "__main__":
proj_dir, out_dir, eval_dir = utils.get_args()
proj_name = os.path.basename(proj_dir)
for d in ["top_down", "bot_up"]:
knowledge_dir = os.path.join(out_dir, d)
eval_out = os.path.join(eval_dir, d)
r = evaluate_relation(proj_dir, knowledge_dir, eval_out)
utils.write_dict(r, os.path.join(eval_dir, "relation_stat.txt"))
| 3,301
| 31.058252
| 99
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/concept_detection/EntityDetection.py
|
import os
import pathlib
import sys
from collections import defaultdict, OrderedDict
import stanza
import pandas as pd
import logging
from stanza.server import CoreNLPClient, StartServer
from tqdm import tqdm
from nltk.stem import WordNetLemmatizer
from concept_detection.DataReader import CM1Reader
logger = logging.getLogger(__name__)
month_set = {
"january",
"february",
"march",
"april",
"may",
"june",
"july",
"augest",
"september",
"october",
"november",
"december",
"jan",
"feb",
"apr",
"aug",
"sep",
"oct",
"nov",
"dec",
}
num_set = {
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
"twenty",
"thirty",
"forty",
"fifty",
"sixty",
"seventy",
"eighty",
"ninety",
}
adj_blk_list = {
"other",
"more",
"few",
"some",
"many",
"else",
"new",
"old",
"good",
"bad",
"nice",
}
class Concept:
def __init__(self, start, end, text):
self.start, self.end = start, end # start and end index of tokens
self.text = text
def __str__(self):
return self.text
class Relation:
def __init__(self, tokens, sent_index):
self.tokens = tokens
self.sent_index = sent_index
self.rel = dict() # key is subject, value is object
class DomainKG:
def __init__(self, stop_words=None, start_server=StartServer.TRY_START):
if stop_words is None:
stop_words = set()
self.client = CoreNLPClient(
annotators=["tokenize", "ssplit", "lemma", "pos", "depparse"],
timeout=30000,
memory="8G",
properties={"depparse.extradependencies": "MAXIMAL"},
start_server=start_server,
preload=True,
be_quiet=True,
)
self.client.annotate("")
self.stop_words = set(stop_words)
self.concepts, self.relations = [], []
def extend_adj(self, start, words, pos):
if start > 0 and pos[start - 1].startswith("JJ"): # first tk must be adj
s = start = start - 1
while start >= 0:
cur_pos = pos[start]
cur_word = words[start]
if (
cur_pos.startswith("JJ") and cur_word not in self.stop_words
): # save the last JJ
s = start
else:
break
start -= 1
return s
return start
@staticmethod
def extract_concepts(words, pos):
"""
Extract the concepts based on the postags. It will extend the concepts with adjectives and conjunctions
:param words:
:param pos:
:return:
"""
def valid_token_for_phrase(cur_w, cur_p, next_w, next_p):
# valid_tags = {"IN", "NN", "HYPH", "CD"}
valid_tags = {"NN", "HYPH", "CD"}
valid_text = {"\\", "/"}
if cur_w in valid_text:
return True
for t in valid_tags:
if cur_p.startswith(t):
return True
if cur_p.startswith("JJ") and next_w and next_p.startswith("NN"):
return True
return False
def valid_leading_token(cur_w, cur_p):
if cur_w in num_set or cur_w in month_set or cur_w in adj_blk_list:
return False
contain_alph = False
for i, c in enumerate(cur_w):
if c.isalpha():
contain_alph = True
break
elif i == 0: # leading token must start with character
return False
if not contain_alph:
return False
return True
concepts = []
i, N = 0, len(words)
while i < N:
if pos[i].startswith("NN") and valid_leading_token(words[i], pos[i]): # start with NN
s, e, j = i, i + 1, i + 1
while j < N:
j_pos = pos[j]
next_w = words[j + 1] if j + 1 < N else None
next_p = pos[j + 1] if j + 1 < N else None
if j_pos.startswith("NN"):
e = j + 1
if not valid_token_for_phrase(words[j], j_pos, next_w, next_p):
break
j += 1
i = j
ns = s # FIXME try not extend the phrases to reduce the error rate
# ns = self.extend_adj(s, words, pos)
# keep only the noun part if the leading token is not valid
# if not valid_leading_token(words[ns], pos[ns]):
# ns = s
c = Concept(ns, e, " ".join([x for x in words[ns:e]]))
concepts.append(c)
i += 1
return concepts
@staticmethod
def expand_verb(sent, idx):
l, r = idx - 1, idx + 1
ltk, rtk = [], []
exp_tag = ["IN", "RP", "TO", "VB", "CC"]
while l >= 0:
if sent.token[l].pos[:2] in exp_tag:
ltk.append(sent.token[l].word)
else:
break
l -= 1
while r < len(sent.token):
if sent.token[r].pos[:2] in exp_tag:
rtk.append(sent.token[r].word)
else:
break
r += 1
exp_verb = ltk[:-1] + [sent.token[idx].word] + rtk[:]
return " ".join(exp_verb)
@staticmethod
def extract_relations(sent, doc_concepts):
def to_rel(c1, verb, c2):
c1_str = str(doc_concepts.get(c1, ""))
c2_str = str(doc_concepts.get(c2, ""))
if c1_str == c2_str or c1_str == "" or c2_str == "":
return None
lmtzer = WordNetLemmatizer()
verb.replace("_", " ")
verb = " ".join([lmtzer.lemmatize(x) for x in verb.split()])
return (c1_str, verb, c2_str)
def add_relation(rel_set, c1, verb, c2):
relation = to_rel(c1, verb, c2)
if relation:
rel_set.add(relation)
doc_rels = set()
in_deps = defaultdict(dict)
out_deps = defaultdict(dict)
for r in sent.enhancedPlusPlusDependencies.edge:
# abandon the case annotation of the dep
t1_idx, rel, t2_idx = r.source - 1, r.dep.split(":"), r.target - 1
t1_pos, t2_pos = sent.token[t1_idx].pos, sent.token[t2_idx].pos
rcase = rel[1] if len(rel) > 1 else ""
rel = rel[0]
# if t1_pos.startswith("NN") and t1_idx in doc_concepts and t2_pos.startswith(
# "NN") and t2_idx in doc_concepts and rcase is not "":
# add_relation(doc_rels, t1_idx, rcase, t2_idx)
out_rels = out_deps[t1_idx]
tmp = out_rels.get(rel, [])
tmp.append(t2_idx)
out_deps[t1_idx][rel] = tmp
in_rels = in_deps[t2_idx]
tmp = in_rels.get(rel, [])
tmp.append(t1_idx)
in_deps[t2_idx][rel] = tmp
for idx, w in enumerate(sent.token):
if w.pos.startswith("VB"):
# sub-obj rule
expand_vb = DomainKG.expand_verb(sent, idx)
subjs = out_deps[idx].get("nsubj", [])
if len(subjs) == 0:
subjs = in_deps[idx].get("acl", [])
if len(subjs) == 0:
xcomp_dep = in_deps[idx].get("xcomp", [])
if len(xcomp_dep) > 0:
comp_vb = out_deps[xcomp_dep[0]]
subjs = comp_vb.get("obj", []) + comp_vb.get("obl", [])
for subj in subjs:
if subj not in doc_concepts:
continue
objs = out_deps[idx].get("obj", [])
if len(objs) == 0:
objs = out_deps[idx].get("obl", [])
for obj in objs:
if obj not in doc_concepts:
continue
add_relation(doc_rels, subj, expand_vb, obj)
break # pick only one subj
elif w.pos.startswith("NN"):
# "[IN]-> case ->[NN] -> obl-> [VB]-> obj-> [NN]"
case_dep = out_deps[idx].get("case", [])
obl_dep = in_deps[idx].get("obl", [])
if len(case_dep) > 0 and len(obl_dep) > 0:
vb = DomainKG.expand_verb(sent, case_dep[0])
objs = out_deps[obl_dep[0]].get("obj", [])
if len(objs) > 0:
obj = objs[0]
if sent.token[obj].pos.startswith("NN"):
add_relation(doc_rels, obj, vb, idx)
return doc_rels
def build(self, docs, disable_tqdm=False):
concepts = [] # concepts in each document as a list of list
rels = [] # concept in each document as a list of list
tokens = []
for d in tqdm(docs, disable=disable_tqdm):
doc_concepts = dict() # list of concepts in a document
doc_rels = [] # list of relations in a document
doc_tokens = []
for i, sent in enumerate(self.client.annotate(d).sentence):
try:
words, pos = [], []
for w in sent.token:
words.append(w.word)
pos.append(w.pos)
for c in self.extract_concepts(words, pos):
for i in range(c.start, c.end):
doc_concepts[i] = c
doc_rels.extend(self.extract_relations(sent, doc_concepts))
doc_tokens.extend(words)
except Exception as e:
raise Exception(e)
concepts.append([str(x) for x in set(doc_concepts.values())])
rels.append(doc_rels)
tokens.append(doc_tokens)
return concepts, rels, tokens
if __name__ == "__main__":
# process_CCHI()
exit()
# add more dataset here
# test cases for concept extraction
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger("stanza").setLevel(logging.WARN)
# concept_extraction_arts = [
# "The system shall use HL-7 communications protocol for transferring messages among packages",
# "The system shall allow event-delay capability for pre-admission, discharge, and transfer orders",
# ]
relation_extraction_arts = [
"The system shall be able to support the standards identified and recommended by the Health Information Technology Standards Panel (HITSP) on its HITSP-TP13 Ver 1.0.1 document"
"The system shall provide a way for user to quickly click on the specifications that he or she wants for each order placed",
# "The system shall improve accessibility of online clinical information and results.",
# "The system shall provide a platform for building interfaces to external lab services enabling automated order entry and results reporting.",
"The system shall provide the ability to capture common content for prescription details including strength, sig, quantity, and refills to be selected by the ordering clinician.",
# "The system shall have the ability to provide filtered displays of encounters based on encounter characteristics, including date of service, encounter provider and associated diagnosis.",
]
dkg = DomainKG()
concepts, relations = dkg.build(docs=relation_extraction_arts)
for c in concepts:
logger.debug(f"{c}")
for r in relations:
logger.debug(f"{r}")
| 12,021
| 32.960452
| 197
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/concept_detection/DataReader.py
|
import xml.etree.ElementTree as ET
import pandas as pd
from collections import defaultdict
import json
import os
from pathlib import Path
SART_CSV, TART_CSV, LK_CSV = "source_artifacts.csv", "target_artifacts.csv", "links.csv"
cur_dir = str(Path(__file__).parent.absolute())
class TraceReader:
def __init__(self, out_dir):
self.sarts, self.tarts = dict(), dict()
self.links = defaultdict(set)
self.out_dir = out_dir
def _read_art(self):
raise NotImplementedError
def _read_link(self):
raise NotImplementedError
def flat_links(self):
flink = []
for sid in self.links:
for tid in self.links[sid]:
flink.append((sid, tid))
return flink
def to_csv(self, sart_csv=SART_CSV, tart_csv=TART_CSV, link_csv=LK_CSV):
df = pd.DataFrame()
df["id"] = self.sarts.keys()
df["arts"] = self.sarts.values()
df.to_csv(os.path.join(self.out_dir, sart_csv), index=False)
df = pd.DataFrame()
df["id"] = self.tarts.keys()
df["arts"] = self.tarts.values()
df.to_csv(os.path.join(self.out_dir, tart_csv), index=False)
df = pd.DataFrame()
df["sid"] = [x[0] for x in self.flat_links()]
df["tid"] = [x[1] for x in self.flat_links()]
df.to_csv(os.path.join(self.out_dir, link_csv), index=False)
def run(self):
self._read_art()
self._read_link()
self.to_csv()
return self
class CM1Reader(TraceReader):
def __init__(
self,
data_dir=cur_dir + "/data/CM1",
sart_file="CM1-sourceArtifacts.xml",
tart_file="CM1-targetArtifacts.xml",
link_file="CM1-answerSet.xml",
):
super().__init__(out_dir=data_dir)
self.sart_file, self.tart_file = (
os.path.join(data_dir, sart_file),
os.path.join(data_dir, tart_file),
)
self.link_file = os.path.join(data_dir, link_file)
def _read_art(self):
def read(f):
res = dict()
root = ET.parse(f).getroot()
for art in root.iter("artifact"):
id = art.find("id").text
arts = art.find("content").text.strip("\n\t\r ").replace("\n", " ")
res[id] = arts
return res
self.sarts, self.tarts = read(self.sart_file), read(self.tart_file)
def _read_link(self):
root = ET.parse(self.link_file).getroot()
for lk in root.iter("link"):
sid = lk.find("source_artifact_id").text
tid = lk.find("target_artifact_id").text
self.links[sid].add(tid)
class CCHITReader(TraceReader):
"""
Convert CCHIT in XML into csv files
"""
def __init__(
self,
data_dir=cur_dir + "/data/CCHIT",
sart_file="source.xml",
tart_file="target.xml",
link_file="answer2.xml",
):
super().__init__(out_dir=data_dir)
self.sart_file, self.tart_file = (
os.path.join(data_dir, sart_file),
os.path.join(data_dir, tart_file),
)
self.link_file = os.path.join(data_dir, link_file)
def _read_art(self):
def read(f):
res = dict()
root = ET.parse(f).getroot()
for art in root.iter("artifact"):
id = art.find("art_id").text
arts = art.find("art_title").text
res[id] = arts
return res
self.sarts, self.tarts = read(self.sart_file), read(self.tart_file)
def _read_link(self):
root = ET.parse(self.link_file).getroot()
for lk in root.iter("link"):
sid = lk.find("source_artifact_id").text
tid = lk.find("target_artifact_id").text
self.links[sid].add(tid)
class DronologyReader(TraceReader):
"""
Convert the dronology json file to csv
"""
def __init__(self, data_dir=cur_dir + "/data/Dronology/"):
super().__init__(out_dir=data_dir)
js_file = os.path.join(data_dir, "dronologydataset01.json")
with open(js_file, encoding="utf8") as fin:
self.entries = json.load(fin)["entries"]
def _read_art(self):
for entry in self.entries:
art_id = entry["issueid"]
attributes = entry["attributes"]
art_summary = attributes["summary"].strip("\n\t\r")
art_describ = attributes["description"].strip("\n\t\r")
art_arts = art_summary + art_describ
self.arts[art_id] = art_arts
def _read_link(self):
for entry in self.entries:
sid = entry["issueid"]
children = entry["children"]
for child in children:
for tid in children[child]:
self.links[sid].add(tid)
class PTCReader(TraceReader):
def __init__(
self,
data_dir=cur_dir + "/data/PTC/",
sart_file="SDD2.xml",
tart_file="SRS.xml",
link_file="SDD2SRS.txt",
):
super().__init__(out_dir=data_dir)
self.data_dir = data_dir
self.sart_file, self.tart_file = (
os.path.join(data_dir, sart_file),
os.path.join(data_dir, tart_file),
)
self.link_file = os.path.join(data_dir, link_file)
def _read_art(self):
def read(f):
arts = dict()
root = ET.parse(f).getroot()
for art in root.iter("artifact"):
art_id = art.find("art_id").text
art_title = art.find("art_title").text
if not art_id or not art_title:
continue
art_title = art_title.strip('"\n\r\t\s ')
arts[art_id] = art_title
return arts
self.sarts = read(self.sart_file)
self.tarts = read(self.tart_file)
def _read_link(self):
df = pd.read_csv(self.link_file)
sids = df.iloc[:, 0]
tids = df.iloc[:, 1]
for sid, tid in zip(sids, tids):
self.links[sid].add(tid)
class InfusionPumpReader(TraceReader):
def __init__(
self,
data_dir=cur_dir + "/data/PTC/",
):
pass
if __name__ == "__main__":
# readers = [CCHITReader(), DronologyReader(), PTCReader()]
readers = [CM1Reader()]
for x in readers:
x.run()
| 6,353
| 29.257143
| 88
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/scripts/annotate_link/trace_link_annotation.py
|
from typing import Dict, List
from gensim.models import TfidfModel
import argparse
import pandas as pd
import os
import sys
from jsonlines import jsonlines
sys.path.append("../..")
from domain_data_collection.relation_graph import RelationGraph
from gensim.corpora import Dictionary
from concept_detection.EntityDetection import DomainKG
import json
from collections import Counter, defaultdict
from gensim import matutils
from nltk.stem import WordNetLemmatizer
from tqdm import tqdm
import math as m
version = "2.2.1"
def read_project(dir_path):
arts = pd.read_csv(os.path.join(dir_path, "artifacts.csv"))
lks = pd.read_csv(os.path.join(dir_path, "links.csv"))
# annotated part
cpts = pd.read_csv(os.path.join(dir_path, "concepts.csv"))
tokens = pd.read_csv(os.path.join(dir_path, "tokens.csv"))
concept_dict, token_dict = {}, {}
for idx, row in cpts.iterrows():
concept_dict[row["ids"]] = eval(row["phrase"])
for idx, row in tokens.iterrows():
token_dict[row["id"]] = eval(row["tokens"])
art_dict, link_dict = {}, defaultdict(set)
for idx, row in arts.iterrows():
art_dict[row["id"]] = row["arts"]
for idx, row in lks.iterrows():
link_dict[row["sid"]].add(row["tid"])
return art_dict, link_dict, concept_dict, token_dict
def get_imp_score(concept, model, lemmatizer):
tks = concept.split()
score = 0
for tk in tks:
tk = lemmatizer.lemmatize(tk)
score += model.idfs[model.id2word.token2id[tk]]
return score / len(tks)
def get_link_score(stoken, ttoken, model):
doc1_vec = model[model.id2word.doc2bow(stoken)]
doc2_vec = model[model.id2word.doc2bow(ttoken)]
score = matutils.cossim(doc1_vec, doc2_vec)
return score
def get_lemmas(concept, lemmatizer):
tks = concept.split()
return [lemmatizer.lemmatize(x) for x in tks]
def get_relation(lm1, lm2):
interset = lm1 & lm2
if len(interset) == len(lm1) and len(interset) == len(lm2):
return "same_as"
elif len(interset) == len(lm1):
return "parent_of"
elif len(interset) == len(lm2):
return "child_of"
else:
return None
def process_path(p):
relation_list = []
for i in range(1, len(p)):
c1, c2 = p[i - 1], p[i]
verb = rel_graph.g.get_edge_attribute_by_id(c1, c2, 0, "verb")
if len(verb) > 0:
verb = list(verb)[0]
relation_list.append(
{
"concept_1": c1.replace(" ", "_"),
"relation_type": verb,
"concept_2": c2.replace(" ", "_"),
}
)
return relation_list
def format_concept_list(words):
return [x.replace(" ", "_") for x in words]
def format_concept_dict(word_dict):
flat_dict = []
for k, v in word_dict.items():
flat_dict.append({k.replace(" ", "_"): v})
return flat_dict
def is_same(q1, q2, lmtzr):
lemm_token = lambda x: [lmtzr.lemmatize(t) for t in x.split()]
return lemm_token(q1) == lemm_token(q2)
def main(
arts: Dict,
links: List,
concepts: Dict,
tokens: Dict,
rel_graph: RelationGraph,
vague_rel_dict,
acr_index,
def_index,
):
lemmas = {}
lemmatizer = WordNetLemmatizer()
for doc in tokens:
lemmas[doc] = [lemmatizer.lemmatize(token) for token in tokens[doc]]
dct = Dictionary(lemmas.values())
corpus = [dct.doc2bow(doc_token) for doc_token in lemmas.values()]
model = TfidfModel(corpus, id2word=dct)
ann_arts = {}
visited = set()
terminilogy_pool = set(rel_graph.g.vertices())
max_imp = 0
min_imp = m.inf
for id in tqdm(arts, desc="prepare artifacts and relation graph"):
text = arts[id]
doc_concepts = set(concepts[id])
terminilogy = set() # terminlogy is the concept unique in domain
definitions = {}
importance_scores = {}
for c in concepts[id]:
c_lower = c.lower()
importance_scores[c] = get_imp_score(c, model, lemmatizer)
max_imp = max(max_imp, importance_scores[c])
min_imp = min(min_imp, importance_scores[c])
if c_lower in terminilogy_pool:
terminilogy.add(c)
if c in acr_index:
definitions[c] = acr_index[c]
elif c in def_index:
definitions[c] = def_index[c][
0
] # todo add definition ranking method
elif c_lower not in visited:
visited.add(c_lower)
c_lm = Counter(get_lemmas(c_lower, lemmatizer))
rel_graph.add_vertex(c_lower)
for t in rel_graph.g.vertices():
r = get_relation(c_lm, Counter(get_lemmas(t, lemmatizer)))
if r is not None:
rel_graph.add_relation((c_lower, r, t.lower()))
c_mark = c.replace(" ", "_")
text = text.replace(c, c_mark)
# normalize importance score
for c in importance_scores:
importance_scores[c] = (importance_scores[c] - min_imp) / (
max_imp - min_imp
)
del_c = [x for x in importance_scores if importance_scores[x] < 0.1]
for c in del_c:
if c in doc_concepts:
doc_concepts.remove(c)
if c in terminilogy:
terminilogy.remove(c)
if c in definitions:
del definitions[c]
if c in importance_scores:
del importance_scores[c]
ann_arts[id] = {
"id": id,
"text": text,
"concepts": format_concept_list(doc_concepts),
"terminilogy": format_concept_list(terminilogy),
"definitions": format_concept_dict(definitions),
"importance_scores": format_concept_dict(importance_scores),
}
rel_graph.dump("../")
res = []
max_score = 0
min_score = m.inf
for sid in tqdm(links, desc="process link"):
if sid not in ann_arts:
continue
sart = ann_arts[sid]
sart["query"] = "source"
targets = []
for tid in links[sid]:
if tid not in ann_arts:
continue
tart = ann_arts[tid]
tart["query"] = "target"
# get links
clink = []
for sc in sart["concepts"]:
for tc in tart["concepts"]:
squery = sc.lower().replace("_", " ")
tquery = tc.lower().replace("_", " ")
relation_list = []
if rel_graph.g.is_reachable(squery, tquery):
p = rel_graph.g.shortest_paths(squery, tquery)[0]
relation_list = process_path(p)
elif rel_graph.g.is_reachable(tquery, squery):
p = rel_graph.g.shortest_paths(tquery, squery)[0]
relation_list = process_path(p)
cterm = set()
if len(relation_list) > 0:
clink.append(
{
"source": sc,
"target": tc,
"relationship": relation_list,
}
)
cterm.add(sc)
cterm.add(tc)
tart["score"] = get_link_score(lemmas[sid], lemmas[tid], model)
max_score = max(tart["score"], max_score)
min_score = min(tart["score"], min_score)
tart["links"] = clink
tart["type"] = "Regulatory code"
targets.append(tart.copy())
ann_arts[sid]["type"] = "Requirements" # fixme
res.append({"source": [ann_arts[sid]], "targets": targets})
# normalize the link score
for r in res:
for t in r["targets"]:
t["score"] = round((t["score"] - min_score) / (max_score - min_score), 3)
if t["score"] > 1:
print(r)
with open(f"./annotated_link_{version}.json", "w") as fout:
json.dump(res, fout, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="prepare link file for visualization")
parser.add_argument(
"--project_dir", help="project directory contain the artifact and trace link"
)
parser.add_argument("--kg_dir", help="directory store the knowledge graph")
args = parser.parse_args()
arts, links, concepts, tokens = read_project(args.project_dir)
rel_graph = RelationGraph()
rel_graph.load(args.kg_dir)
vague_rel = dict()
vr_file = os.path.join(args.kg_dir, "vague_relations.jsonl")
if os.path.isfile(vr_file):
with jsonlines.open(vr_file) as fin:
for obj in fin:
l, vb, r = obj["left"], obj["verb"], obj["right"]
vague_rel[(l, r)] = vb
df_file = os.path.join(args.kg_dir, "definitions.jsonl")
acr_file = os.path.join(args.kg_dir, "acronym.jsonl")
definitions, acronyms = {}, {}
if os.path.isdir(df_file):
with jsonlines.open(df_file):
for obj in fin:
definitions[obj["concept"]] = obj["definition"]
if os.path.isdir(acr_file):
with jsonlines.open(acr_file) as fin:
for obj in fin:
acronyms[obj["short"]] = obj["long"]
with open(os.path.join(args.project_dir, "basic_relation.json")) as fin:
basic_r = json.load(fin)
for lk in basic_r:
for r in lk["relation"]:
left, vb, right = r[0], r[1], r[2]
rel_graph.add_relation((left, vb, right))
for acr in acronyms:
rel_graph.add_relation((acr, "synonym", acronyms[acr]))
rel_graph.add_relation((acronyms[acr], "synonym", acr))
main(arts, links, concepts, tokens, rel_graph, vague_rel, acronyms, definitions)
| 9,982
| 33.424138
| 87
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/scripts/extract_regular_concept/extract_regular_concepts.py
|
import argparse, sys, os
from collections import Counter
from pathlib import Path
import logging
from tqdm import tqdm
import pandas as pd
sys.path.append("../..")
from concept_detection.EntityDetection import DomainKG
import heapq
logger = logging.getLogger(__name__)
def select_concepts(in_file, out_file, ratio):
# select most frequent concepts (above ratio) with heap
hp = []
df = pd.read_csv(in_file)
hp_size = df.size * ratio
for idx, row in df.iterrows():
c, f = row["concept"], row["freq"]
heapq.heappush(hp, (f, c))
if len(hp) > hp_size:
heapq.heappop(hp)
df = pd.DataFrame()
df["concept"] = [x[1] for x in hp]
df["freq"] = [x[0] for x in hp]
df.to_csv(out_file, index=True)
def extract_webbase_concept(in_dir, out_file):
concepts = Counter()
for fname in tqdm(os.listdir(in_dir), desc="files"):
if fname.endswith("possf2"):
lines = (
Path(os.path.join(in_dir, fname))
.read_text(encoding="utf8")
.splitlines()
)
for line in lines:
tks = line.split()
if len(tks) == 0:
continue
words, pos = [], []
for tk in tks:
try:
w, p = tk.split("_")
words.append(w)
pos.append(p)
except Exception:
pass
cpts = DomainKG.extract_concepts(words, pos)
for c in cpts:
concepts[c.text] += 1
df = pd.DataFrame()
df["concept"] = concepts.keys()
df["freq"] = concepts.values()
df.to_csv(out_file, index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Extract regular concept and rank them by frequency from general corpus"
)
parser.add_argument("--data_dir", help="directory of general corpus")
parser.add_argument("--out_file", help="output of concepts")
parser.add_argument("--ratio", help="ratio of concepts to keep as regular concepts")
parser.add_argument("--overwrite", default=True, help="output of concepts")
args = parser.parse_args()
tmp_file = os.path.join(os.path.dirname(args.out_file), "all_regular_concepts.csv")
if not os.path.isfile(tmp_file) or args.overwrite:
extract_webbase_concept(in_dir=args.data_dir, out_file=tmp_file)
if not os.path.isfile(args.out_file) or args.overwrite:
select_concepts(tmp_file, args.out_file, ratio=float(args.ratio))
| 2,627
| 31.04878
| 92
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/scripts/glossay_processing/parse_glossary.py
|
import pandas as pd
import os
import jsonlines
def is_acronym(term):
for c in term:
if c.isalpha() and not c.isupper():
return False
return True
def write_glossary(out_dir, acrn_dict, def_dict):
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
acrn_file = os.path.join(out_dir, "glossary_acronym.jsonl")
def_file = os.path.join(out_dir, "glossary_definition.jsonl")
with jsonlines.open(acrn_file, "w") as fout:
for a in acrn_dict:
fout.write({"short": a, "long": [acrn_dict[a]]})
with jsonlines.open(def_file, "w") as fout:
for d in def_dict:
fout.write({"concept": d, "definition": [def_dict[d]]})
def parse_csv_glossary(path):
df = pd.read_csv(path)
acrn_dict, def_dict = dict(), dict()
df = df.fillna("")
for idx, row in df.iterrows():
acrn, concept, definition = row["Acronym"], row["Concept"], row["Definition"]
if len(acrn) == 0 and len(concept) == 0:
continue
if len(acrn) > 0 and len(concept) > 0:
acrn_dict[acrn] = concept
if len(definition) > 0:
if len(concept) > 0:
def_dict[concept] = definition
elif len(acrn) > 0:
def_dict[acrn] = definition
return acrn_dict, def_dict
def parse_cchit():
# split cchit glossary into acronym, concept, and definition
df = pd.read_csv("cchit_raw.csv")
acrn_dict, def_dict = dict(), dict()
for i, row in df.iterrows():
term, par = row[0], row[1]
if term != term or par != par:
continue
sents = par.split("\n")
s1 = sents[0]
rest = "\n".join(sents[1:])
if is_acronym(term) and not s1.endswith("."):
acronym = term
concept = s1
definition = rest
else:
concept = term
definition = par
acrn_dict[acronym] = concept
if len(definition) > 0:
def_dict[concept] = definition
return acrn_dict, def_dict
if __name__ == "__main__":
acrn_dict, def_dict = parse_cchit()
write_glossary("./CCHIT", acrn_dict, def_dict)
acrn_dict, def_dict = parse_csv_glossary("./cm1_raw.csv")
write_glossary("./CM1", acrn_dict, def_dict)
acrn_dict, def_dict = parse_csv_glossary("./ptc_raw.csv")
write_glossary("./PTC", acrn_dict, def_dict)
| 2,401
| 28.292683
| 85
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/scripts/preprocess_dataset/preprocess_dataset.py
|
import os
import pathlib
import sys
import argparse
sys.path.append(".")
sys.path.append("..")
from concept_detection.EntityDetection import DomainKG
from scripts.preprocess_dataset.remove_regular_concepts import remove_regular_concpts
from concept_detection.DataReader import CCHITReader, PTCReader, CM1Reader, InfusionPumpReader
import pandas as pd
reader_map = {
"CCHIT": CCHITReader,
"PTC": PTCReader,
"CM1": CM1Reader,
# "InfusionPump": InfusionPumpReader
}
def concept_detect(proj_dir, sart_fname="source_artifacts.csv", tart_fname="target_artifacts.csv"):
sart = pd.read_csv(os.path.join(proj_dir, sart_fname)).dropna()
tart = pd.read_csv(os.path.join(proj_dir, tart_fname)).dropna()
arts = sart["arts"].to_list() + tart["arts"].to_list()
ids = sart["id"].to_list() + tart["id"].to_list()
dkg = DomainKG()
cpt_file = os.path.join(proj_dir, "concepts.csv")
concepts, rels, tokens = dkg.build(arts)
concept_df = pd.DataFrame()
concept_df["phrase"] = concepts
concept_df["ids"] = ids
concept_df.to_csv(cpt_file)
rel_df = pd.DataFrame()
rel_df["id"] = ids
rel_df["rels"] = rels
rel_df.to_csv(os.path.join(proj_dir, "relations.csv"))
tk_df = pd.DataFrame()
tk_df["id"] = ids
tk_df['tokens'] = tokens
tk_df.to_csv(os.path.join(proj_dir, "tokens.csv"))
return cpt_file
def preprocess(proj_dir, reg_cpt_file):
reader = reader_map[os.path.basename(proj_dir)](proj_dir)
reader.run()
raw_concept_file = concept_detect(proj_dir)
reduced_cpt_file = os.path.join(proj_dir, "reduced_concepts.jsonl")
remove_regular_concpts(reg_cpt_file, raw_concept_file, reduced_cpt_file)
if __name__ == "__main__":
"""
Read raw materials from projects and produce the tokens and recoginize the concepts from the artifacts.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--project_dir", default="../../data/projects/CCHIT", help="the directory of the project dataset"
)
parser.add_argument("--reg_cpt_file", default="../../data/regular_concepts.csv",
help="the csv file of regular concepts")
args = parser.parse_args()
preprocess(args.project_dir, args.reg_cpt_file)
| 2,253
| 32.147059
| 107
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/scripts/preprocess_dataset/remove_regular_concepts.py
|
import argparse
import pandas as pd
from jsonlines import jsonlines
from tqdm import tqdm
import re
def get_concepts(concept_file):
cpt_df = pd.read_csv(concept_file)
cpts = set()
for idx, row in cpt_df.iterrows():
art_cpts = eval(row["phrase"])
cpts.update(art_cpts)
return cpts
def load_regular_concepts(concept_file, max_num=10 ** 7):
cnt = 0
cpts = set()
print("loading regular concepts")
with open(concept_file, encoding="utf8") as fin:
for line in tqdm(fin):
parts = line.split(",")
cpts.add(parts[0].lower())
cnt += 1
if cnt > max_num and max_num > 0:
break
return cpts
def is_valid(cpt):
blk_ch = {"?", "!"}
for c in cpt:
if c in blk_ch:
return False
if re.match("req\d+", cpt.lower()):
return False
for c in cpt:
if c.isalpha():
return True
return False
def remove_regular_concpts(reg_cpt_csv, raw_cpt_csv, out_cpt):
reg_cpts = load_regular_concepts(reg_cpt_csv)
raw_cpt_df = pd.read_csv(raw_cpt_csv)
all_rd_cpts = set()
with jsonlines.open(out_cpt, "w") as fout:
for idx, row in raw_cpt_df.iterrows():
art_id = row["ids"]
art_cpts = eval(row["phrase"])
rd_cpts = set()
blst_cpts = set()
for cpt in art_cpts:
if cpt.lower() not in reg_cpts and is_valid(cpt):
rd_cpts.add(cpt)
all_rd_cpts.add(cpt)
else:
blst_cpts.add(cpt)
fout.write(
{
"art_id": art_id,
"origin_concepts": art_cpts,
"reduced_concepts": list(rd_cpts),
"removed_concepts": list(blst_cpts),
}
)
out_cpt = out_cpt.replace(".jsonl", "_flat.txt")
with open(out_cpt, "w") as fout:
for c in all_rd_cpts:
fout.write(f"{c}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--regular_cpt_csv", help="csv file contains the regular concepts"
)
parser.add_argument(
"--artifact_cpt_csv", help="csv file contains the concepts in each artifact"
)
parser.add_argument(
"--output_cpt", help="jsonline file with filtered concepts in each artifact"
)
args = parser.parse_args()
remove_regular_concpts(args.regular_cpt_csv, args.artifact_cpt_csv, args.output_cpt)
| 2,571
| 27.263736
| 88
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/sentence_classifier/predict.py
|
from collections import defaultdict
import os
import sys
sys.path.append(".")
sys.path.append("..")
from torch import nn
from tqdm import tqdm
from transformers.models.auto.tokenization_auto import AutoTokenizer
from transformers import AutoModelForSequenceClassification
from torch.utils.data import DataLoader
import jsonlines
import pandas as pd
import torch
import argparse
key_dict = {
"acronym": ("short", "long"),
"definition": ("concept", "definition"),
"context": ("concept", "context"),
}
# FIXME import error if import it from train.py
class DMDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
if self.labels is not None:
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.encodings["input_ids"])
def write_res_for_manual_evaluation(concepts, sents, predicts, res_file):
df = pd.DataFrame(columns=["concept", "sentences", "predicts"])
df["concept"] = concepts
df["sentences"] = sents
df["predicts"] = predicts
df.to_csv(res_file)
def select_sentence(concepts, sents, predicts, res_file, type, thrd=0.5):
index = defaultdict(set)
for c, s, p in zip(concepts, sents, predicts):
if p > thrd:
index[c].add((s, p))
with jsonlines.open(res_file, "w") as fout:
for c in index:
k1, k2 = key_dict[type]
index[c] = [
x[0] for x in sorted(index[c], key=lambda x: x[1], reverse=True)
]
fout.write({k1: c, k2: index[c]})
def test(outptu_dir, model, tokenizer, type):
infile = os.path.join(outptu_dir, f"{type}.jsonl")
eval_file = os.path.join(outptu_dir, f"{type}_eval.csv")
sel_file = os.path.join(outptu_dir, f"{type}_sel.jsonl")
k1, k2 = key_dict[type]
concepts, sents = [], []
with jsonlines.open(infile) as fout:
for item in fout:
for s in item[k2]:
concepts.append(item[k1])
sents.append(s)
preds = run_prediction(sents, model, tokenizer)
write_res_for_manual_evaluation(concepts, sents, preds, eval_file)
select_sentence(concepts, sents, preds, sel_file, type)
def run_prediction(sents, model, tokenizer):
encodings = tokenizer(
sents,
truncation=True,
padding=True,
max_length=128,
)
dataset = DMDataset(encodings, None)
eval_dataloader = DataLoader(dataset, batch_size=8)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
model.eval()
m = nn.Softmax(dim=-1)
preds = []
with torch.no_grad():
for batch in tqdm(eval_dataloader):
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
logits = outputs.logits
scores = m(logits)[:, 1].tolist()
preds.extend(scores)
return preds
def run(proj_name, model_path):
tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
model = AutoModelForSequenceClassification.from_pretrained(model_path)
for direction in ["bot_up", "top_down"]:
output_dir = os.path.join("./output", proj_name, direction)
test(output_dir, model, tokenizer, "acronym")
test(output_dir, model, tokenizer, "definition")
test(output_dir, model, tokenizer, "context")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--proj_name")
parser.add_argument("--model_path")
args = parser.parse_args()
run(args.proj_name, args.model_path)
| 3,821
| 31.389831
| 87
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/sentence_classifier/eval_model.py
|
import sys
sys.path.append(".")
sys.path.append("..")
from sentence_classifier.predict import run_prediction
from transformers.models.auto.tokenization_auto import AutoTokenizer
from transformers import AutoModelForSequenceClassification
import argparse
import os
import jsonlines
key_dict = {
"acronym": ("short", "long"),
"definition": ("concept", "definition"),
"context": ("concept", "context"),
}
def run(proj_name, model_path, direction, type):
kg_dir = os.path.join("./output/", proj_name, direction)
out_dir = os.path.join("./sentence_clasifier/", proj_name, direction)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
model = AutoModelForSequenceClassification.from_pretrained(model_path)
infile = os.path.join(kg_dir, f"{type}.jsonl")
sel_file = os.path.join(out_dir, f"{type}_selected.jsonl")
origin_file = os.path.join(out_dir, f"{type}.jsonl")
k1, k2 = key_dict[type]
concepts, sents = [], []
with jsonlines.open(infile) as fout:
for item in fout:
for s in item[k2]:
concepts.append(item[k1])
sents.append(s)
preds = run_prediction(sents, model, tokenizer)
sel_index, origin_index = dict(), dict()
for c, s, p in zip(concepts, sents, preds):
if c not in sel_index:
sel_index[c] = set()
origin_index[c] = s
if p > 0.5:
sel_index[c].add((s, p))
with jsonlines.open(sel_file, "w") as fout:
for c in sel_index:
k1, k2 = key_dict[type]
sel_index[c] = [
x[0] for x in sorted(sel_index[c], key=lambda x: x[1], reverse=True)
][:1]
fout.write({k1: c, k2: sel_index[c]})
with jsonlines.open(origin_file, "w") as fout:
for c in origin_index:
fout.write({k1: c, k2: origin_index[c]})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--proj_name")
parser.add_argument("--model_path")
args = parser.parse_args()
for d in ["top_down", "bot_up"]:
for type in ["acronym", "definition", "context"]:
run(args.proj_name, args.model_path, d, type)
| 2,279
| 33.545455
| 84
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/sentence_classifier/train.py
|
from transformers import TrainingArguments, AutoTokenizer
from transformers import AutoModelForSequenceClassification, Trainer
import torch
from datasets import load_metric
import numpy as np
import sys
sys.path.append("..")
sys.path.append(".")
from evaluation import utils
from nltk.tokenize import sent_tokenize
import os
from sklearn.model_selection import train_test_split
import argparse
metric = load_metric("f1")
lm_name = "allenai/scibert_scivocab_uncased"
def read_training_data(project_name):
def get_sent_for_proj(dir_path):
s_art, t_art, link_dict, concept_set = utils.read_project(dir_path)
sents = set()
for sid in s_art:
sents.update(sent_tokenize(s_art[sid]))
for tid in t_art:
sents.update(sent_tokenize(t_art[tid]))
return sents
proj_root = "./data/projects"
sents, labels = [], []
for pname in ["CCHIT", "CM1", "PTC"]:
dir_path = os.path.join(proj_root, pname)
proj_sents = get_sent_for_proj(dir_path)
sents.extend(proj_sents)
labels.extend([1 if pname == project_name else 0] * len(proj_sents))
train_texts, val_texts, train_labels, val_labels = train_test_split(
sents, labels, test_size=0.2
)
return {
"train": (train_texts, train_labels),
"val": (val_texts, val_labels),
}
def run(proj_name):
raw_datas = read_training_data(proj_name)
tokenizer = AutoTokenizer.from_pretrained(lm_name)
datasets = dict()
for part in raw_datas.keys():
texts, labels = raw_datas[part]
encodings = tokenizer(
texts,
truncation=True,
padding=True,
max_length=128,
)
datasets[part] = DMDataset(encodings=encodings, labels=labels)
train(proj_name, datasets["train"], datasets["val"])
class DMDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
if self.labels is not None:
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.encodings["input_ids"])
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
def train(proj_name, train_data, eval_data):
training_args = TrainingArguments(
output_dir=f"./sentence_classifier/{proj_name}",
report_to="tensorboard",
num_train_epochs=10,
per_device_train_batch_size=16,
per_device_eval_batch_size=64,
logging_dir="./logs",
load_best_model_at_end=True,
save_strategy="epoch",
logging_strategy="epoch",
evaluation_strategy="epoch",
save_total_limit=3,
)
model = AutoModelForSequenceClassification.from_pretrained(lm_name, num_labels=2)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_data,
eval_dataset=eval_data,
compute_metrics=compute_metrics,
)
trainer.train()
trainer.evaluate()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--proj_name")
args = parser.parse_args()
run(args.proj_name)
| 3,452
| 28.512821
| 85
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/domain_data_collection/corpus_build_bot_up.py
|
import argparse
import os
import ssl
import sys
sys.path.append(".")
sys.path.append("..")
from domain_data_collection.utils import clean_paragraph
import pathlib
ssl._create_default_https_context = ssl._create_unverified_context
from queue import Queue
from time import sleep, time
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment
from cleantext import clean
from search_engines import Bing, Google
import jsonlines
from tqdm import tqdm
from multiprocessing.pool import ThreadPool
from pattmatch.kmp import kmp
blacklist = [
"[document]",
"noscript",
"header",
"html",
"meta",
"head",
"input",
"script",
"a",
"style",
]
OUT_FILE = "bot_up_corpus.jsonl"
def get_concepts(concept_file):
cpts = set()
if concept_file.endswith(".jsonl"):
with jsonlines.open(concept_file) as fin:
for obj in fin:
cpts.update(obj["reduced_concepts"])
elif concept_file.endswith(".csv"):
cpt_df = pd.read_csv(concept_file)
for idx, row in cpt_df.iterrows():
art_cpts = eval(row["phrase"])
cpts.update(art_cpts)
elif concept_file.endswith(".txt"):
with open(concept_file) as fin:
for line in fin:
cpts.add(line.strip("\n\t\r "))
extra_cpts = set()
for cpt in cpts:
tokens = cpt.split()
if len(tokens) > 1:
for tk in tokens:
if len(tk) >= 3 and tk.isupper():
extra_cpts.add(tk)
print(extra_cpts)
print(f"loaded {len(cpts)} concepts and added {len(extra_cpts)}")
cpts.update(extra_cpts)
return cpts
def check_sent_quality(sent):
stoken = set(sent.split())
if len(stoken) < 5 or len(stoken) > 75:
return False
if sent.endswith("?") or sent.endswith("!"):
return False
return True
def scrap_worker(url, query, clean_lines):
try:
line_cnt_with_query = 0
if url.endswith(".pdf") or url.endswith(".doc"):
return
header = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"}
proj_root = pathlib.Path(__file__).parent.parent.resolve()
ca_path = os.path.join(proj_root, "venv/Lib/site-packages/certifi/cacert.pem")
if not os.path.isfile(ca_path):
ca_path = os.path.join(
proj_root, "venv/lib/python3.7/site-packages/certifi/cacert.pem"
)
html = requests.get(url, headers=header, timeout=6, verify=ca_path).text
soup = BeautifulSoup(html, "html.parser")
text = soup.find_all(text=True)
output = ""
for t in text:
if t.parent.name not in blacklist and not isinstance(t, Comment):
output += "{} ".format(t)
output = [
y for y in [x.strip("\n\t\r ") for x in output.split("\n")] if len(y) > 0
]
for line in output:
par = clean(line).strip("\n\t\r ")
sents = clean_paragraph(par)
for s in sents:
if line_cnt_with_query >= 20 or not check_sent_quality(s):
break
if query.lower() in s.lower():
idxs = kmp(s.lower().split(), query.lower().split())
if len(idxs) > 0:
line_cnt_with_query += 1
clean_lines.put(s)
except Exception as e:
print(e)
def scrap_concept(qcpt, domain, page_num, visited_link, engine="bing"):
# scrap corpus with search engine and select sentences contains the given concept
if engine == "google":
engine = Google()
query = f'"{qcpt}" in {domain}'
else:
engine = Bing()
query = f"'{qcpt}' in {domain}"
clean_lines = Queue()
qres = engine.search(query, pages=page_num)
links = [x for x in qres.links() if x not in visited_link and ".pdf" not in x][:50]
visited_link.update(links)
with ThreadPool(30) as p:
p.starmap(scrap_worker, [(x, qcpt, clean_lines) for x in links])
res = set()
while not clean_lines.empty():
res.add(clean_lines.get())
return res
def domain_corpus_builder(concepts, out_dir, page_num, domain, interval):
visited_cpt, visited_link = set(), set()
out_file = os.path.join(out_dir, OUT_FILE)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if os.path.isfile(out_file):
with jsonlines.open(out_file) as fin:
for obj in fin:
visited_cpt.add(obj["query"])
i, total = 0, len(concepts)
with jsonlines.open(out_file, "a") as fout:
print("start")
for cpt in tqdm(concepts):
start = time()
i += 1
print(f"{i}/{total}: {cpt}")
if cpt in visited_cpt:
continue
visited_cpt.add(cpt)
clean_lines = scrap_concept(
qcpt=cpt, domain=domain, page_num=page_num, visited_link=visited_link
)
fout.write(
{
"query": cpt,
"sent_num": len(clean_lines),
"sentences": list(clean_lines),
}
)
end = time()
if end - start < 10:
sleep(interval)
if __name__ == "__main__":
"Collect related sentences from websites by providing it with concepts"
parser = argparse.ArgumentParser()
parser.add_argument(
"--concept_file", help="path to the file store the concepts for every artifacts"
)
parser.add_argument(
"--out_dir", help="output the collected corpus in the format of json"
)
parser.add_argument(
"--page_num", default=5, type=int, help="the number of pages in search engine"
)
parser.add_argument("--domain", help="the domain of the concepts")
parser.add_argument("--query_interval", default=5, type=float)
args = parser.parse_args()
cpt_set = get_concepts(args.concept_file)
domain_corpus_builder(
concepts=cpt_set,
out_dir=args.out_dir,
page_num=args.page_num,
domain=args.domain,
interval=args.query_interval,
)
| 6,198
| 29.995
| 88
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/domain_data_collection/utils.py
|
import re
import pandas as pd
from nltk import sent_tokenize
def read_regular_concepts(regcpt_file):
regcpt = set()
with open(regcpt_file) as fin:
for line in fin:
cpt, cnt = line.split(',')[-2:]
if int(cnt) > 10000:
regcpt.add(cpt.lower())
else:
break
return regcpt
def clean_paragraph(doc):
# add space around "-"
doc = re.sub("-", " - ", doc)
# merge multiple space
doc = re.sub("\s+", " ", doc)
# remove brackets with empty or non-alphbabatic content
doc = re.sub("[\[<\()][^a-zA-Z]+[\]\)>]", "", doc)
# remove <EMAIL> and <URL>
doc = re.sub("(<EMAIL>|<URL>)", "", doc)
# split it into sentences
sents = sent_tokenize(doc)
# for each sentence strip - and space
res = []
for s in sents:
first_upper = -1
for i, c in enumerate(s):
if c.isalpha() and c.isupper():
first_upper = i
break
cs = s
if first_upper >= 0:
cs = s[first_upper:]
if len(cs.split()) > 3:
res.append(cs)
return [x.rstrip("\n\t\r -") for x in res]
def load_arts(file):
df = pd.read_csv(file)
arts = {}
for id, content in zip(df['id'], df['arts']):
arts[id] = content
return arts
def load_links(file):
df = pd.read_csv(file)
links = []
for sid, tid in zip(df['sid'], df['tid']):
links.append((sid, tid))
return links
def load_concpet(file):
df = pd.read_csv(file)
cpts = {}
for id, phrases in zip(df['ids'], df['phrase']):
plist = eval(phrases)
cpts[id] = plist
return cpts
| 1,689
| 22.802817
| 60
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/domain_data_collection/basic_concept_relation.py
|
"""
Find the concept relations with more basic rules
"""
import argparse
from pathlib import Path
from nltk import WordNetLemmatizer, PorterStemmer
from nltk.corpus import wordnet
import sys
sys.path.append("..")
from domain_data_collection import utils
import json
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
def get_lemmas(concept, lemmatizer):
tks = concept.split()
return [lemmatizer.lemmatize(x) for x in tks]
def get_stemmer(concept, stemmer):
tks = concept.split()
return [stemmer.stem(tk) for tk in tks]
def read_project(proj_dir):
art_path = Path(proj_dir, "artifacts.csv")
lk_path = Path(proj_dir, "links.csv")
cpt_path = Path(proj_dir, "concepts.csv")
arts = utils.load_arts(art_path)
lk = utils.load_links(lk_path)
cpts = utils.load_concpet(cpt_path)
return arts, lk, cpts
def get_rel_type(p1, p2):
tk1, tk2 = p1.split(), p2.split()
stem1, stem2 = get_stemmer(p1, stemmer), get_stemmer(p2, stemmer)
lm1, lm2 = get_lemmas(p1, lemmatizer), get_lemmas(p2, lemmatizer)
stem_str1, stem_str2 = " ".join(stem1), " ".join(stem2)
lemm_str1, lemm_str2 = " ".join(lm1), " ".join(lm2)
if lemm_str1 == lemm_str2:
return "same_as"
if stem_str1 == stem_str2:
return "synonym_of"
if lemm_str1 in lemm_str2:
return "parent_of"
if lemm_str2 in lemm_str1:
return "child_of"
if lm1[-1] == lm2[-1]:
return "sibling"
syn1, syn2 = {}, {}
for lm in lm1:
syn1[lm] = {lm}
for syn in wordnet.synsets(lm):
for l in syn.lemmas():
syn1[lm].add(l.name())
for lm in lm2:
syn2[lm] = {lm}
for syn in wordnet.synsets(lm):
for l in syn.lemmas():
syn2[lm].add(l.name())
reasons = set()
for i, l1 in enumerate(lm1):
s1 = stem1[i]
t1 = tk1[i]
for j, l2 in enumerate(lm2):
s2 = stem2[j]
t2 = tk2[j]
if t1 == t2 or l1 == l2:
reasons.add(f"{t1} same_as {t2}")
elif s1 == s2:
reasons.add(f"{t1} synonym_of {t2}")
else:
inter = syn1[l1].intersection(syn2[l2])
if len(inter) > 0:
reasons.add(f"{t1} synonym_of {t2}")
if len(reasons) > 0:
return ",".join(reasons)
return None
def find_concept_relation(plist1, plist2):
relations = []
for p1 in plist1:
for p2 in plist2:
rel_type = get_rel_type(p1, p2)
if rel_type:
relations.append((p1, rel_type, p2))
return relations
def main(proj_dir, out_file):
arts, lk, cpts = read_project(proj_dir)
results = []
for sid, tid in lk:
if sid not in arts or tid not in arts:
continue
sart, tart = arts[sid], arts[tid]
scpt, tcpt = cpts[sid], cpts[tid]
relations = list(set(find_concept_relation(scpt, tcpt)))
results.append({
"sid": sid,
"tid": tid,
"sart": sart,
"tart": tart,
"relation": relations
})
with open(out_file, 'w') as fout:
json.dump(results, fout, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="find related concept in source and target artifacts"
)
parser.add_argument("--proj_dir", help="concepts extracted from project artifacts")
parser.add_argument("--out_file", help="output the relation in json format")
args = parser.parse_args()
main(proj_dir=args.proj_dir, out_file=args.out_file)
| 3,633
| 27.170543
| 87
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/domain_data_collection/corpus_build_top_down.py
|
import gzip, os, json, argparse
import sys
sys.path.append(".")
sys.path.append("..")
from multiprocessing import Pool, Process, Queue
from domain_data_collection.corpus_build_bot_up import get_concepts
from cleantext import clean
from domain_data_collection.utils import clean_paragraph
from collections import defaultdict
import jsonlines
from pattmatch.kmp import kmp
CORP_ABSTR = "ABSTRACT"
CORP_FULL = "FULL"
PROG_FILE = "progress_file.txt"
OUT_FILE = "top_down_corpus.jsonl"
MERGED_OUT_FILE = "top_down_corpus_merged.jsonl"
def read_gorc_file(fpath, data_type=CORP_ABSTR):
if fpath.endswith(".gz"):
with gzip.open(fpath, "r") as f:
for line in f:
line = json.loads(line)
if type(line).__name__ != "dict":
print(type(line))
else:
if line["grobid_parse"] != None:
text_body = None
if (
line["grobid_parse"]["abstract"] != None
and data_type == CORP_ABSTR
):
text_body = line["grobid_parse"]["abstract"]
elif (
line["grobid_parse"]["body_text"] != None
and data_type == CORP_FULL
):
text_body = line["grobid_parse"]["body_text"]
if text_body is not None:
for body in text_body:
if body["text"] != None:
text = body["text"]
yield text
def sent_process_worker(input_q, output_q, concepts):
"""
1. Open a zip file
2. Select the sentences contains the concept
3. Send them to output queue
"""
while True:
fpath = input_q.get()
if fpath is None:
break
for text in read_gorc_file(fpath):
par = clean(text).strip("\n\t\r ")
sents = clean_paragraph(par)
for s in sents:
stokens = set(s.split())
if len(stokens) < 5:
continue
for query in concepts:
if query.lower() in s.lower():
idxs = kmp(s.lower().split(), query.lower().split())
if len(idxs) > 0:
output_q.put(
{
"query": query,
"sent": s,
}
)
output_q.put({"file": fpath})
def start_sent_selector(concepts, fpath_list, output_q, process_num):
input_q = Queue()
for fpath in fpath_list:
input_q.put(fpath)
workers = []
for _ in range(process_num):
input_q.put(None)
w = Process(
target=sent_process_worker,
args=(
input_q,
output_q,
concepts,
),
)
w.start()
workers.append(w)
return workers
def res_collector_worker(output_q, out_file, ckpt_file, proc_num):
done_proc_cnt = 0
processed_files = set()
query_sent_map = defaultdict(set)
while True:
item = output_q.get()
if item is None:
done_proc_cnt += 1
if done_proc_cnt == proc_num:
break
else:
if "file" in item:
processed_files.add(item["file"])
if len(processed_files) % 20 == 0:
print(f"{len(processed_files)} files have been processed")
write_checkpoint(
out_file, ckpt_file, processed_files, query_sent_map
)
query_sent_map = defaultdict(set)
else:
if len(query_sent_map[item["query"]]) <= 50:
query_sent_map[item["query"]].add(item["sent"])
write_checkpoint(out_file, ckpt_file, processed_files, query_sent_map)
print("finished")
def start_res_collector(output_q, out_file, ckpt_file, proc_num):
w = Process(
target=res_collector_worker,
args=(output_q, out_file, ckpt_file, proc_num),
)
w.start()
return w
def write_checkpoint(out_file, ckpt_file, processed_files, query_sent_map):
with open(ckpt_file, "w") as fout:
for pfile in processed_files:
fout.write(f"{pfile}\n")
with jsonlines.open(out_file, "a") as fout:
for query in query_sent_map:
sents = query_sent_map[query]
fout.write(
{
"query": query,
"sent_num": len(sents),
"sentences": list(sents),
}
)
def merge_corpus(corpus_file, out_file):
query_sent_map = defaultdict(set)
with jsonlines.open(corpus_file) as fin:
for obj in fin:
query, sents = obj["query"], obj["sentences"]
query_sent_map[query].update(sents)
with jsonlines.open(out_file, "w") as fout:
for q in query_sent_map:
fout.write(
{
"query": q,
"sent_num": len(query_sent_map[q]),
"sentences": list(query_sent_map[q]),
}
)
def corpus_scan(concepts, corpus_dir, out_dir, proc_num, is_resume):
output_q = Queue()
ckpt_file = os.path.join(out_dir, PROG_FILE)
out_file = os.path.join(out_dir, OUT_FILE)
merged_out_file = os.path.join(out_dir, MERGED_OUT_FILE)
g = os.walk(corpus_dir)
visited_file, fpath_list = set(), set()
if os.path.isfile(ckpt_file) and is_resume:
with open(ckpt_file) as fin:
visited_file.update(fin.read().splitlines())
for root, dir_list, file_list in g:
for fname in file_list:
fpath = os.path.join(root, fname)
if fpath not in visited_file:
fpath_list.add(fpath)
print(
f"{len(visited_file)} has been processed, {len(fpath_list)} to process in total files."
)
sel_workers = start_sent_selector(concepts, fpath_list, output_q, proc_num)
col_worker = start_res_collector(output_q, out_file, ckpt_file, proc_num)
for p in sel_workers:
p.join()
output_q.put(None)
col_worker.join()
merge_corpus(out_file, merged_out_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--corpus_dir",
default="/afs/crc.nd.edu/group/dmsquare/vol5/Data-CiteExplainer/gorc",
help="path to the coprus",
)
parser.add_argument(
"--concept_file", help="path to the file store the concepts for every artifacts"
)
parser.add_argument(
"--out_dir", help="output the collected corpus in the format of json"
)
parser.add_argument(
"--is_resume",
default=True,
help="resume work by reading existing progress_file.txt",
)
parser.add_argument(
"--proc_num",
default=24,
type=int,
help="process number",
)
args = parser.parse_args()
cpt_set = get_concepts(args.concept_file)
if not os.path.isdir(args.out_dir):
os.makedirs(args.out_dir)
corpus_scan(
concepts=cpt_set,
corpus_dir=args.corpus_dir,
out_dir=args.out_dir,
proc_num=args.proc_num,
is_resume=args.is_resume,
)
| 7,596
| 31.32766
| 95
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/domain_data_collection/extract_from_corpus.py
|
import argparse
import os
from collections import defaultdict
import sys
sys.path.append(".")
sys.path.append("..")
from jsonlines import jsonlines
from nltk.corpus import wordnet
from pattmatch import kmp
from tqdm import tqdm
from abbreviations import schwartz_hearst
from concept_detection.EntityDetection import DomainKG, Concept
from multiprocessing import Queue, Process
from domain_data_collection.utils import read_regular_concepts
ACRON_OUT = "acronym.jsonl"
def read_acronyms(acronym_file):
acronym, inv_acronym = dict(), dict()
with jsonlines.open(acronym_file) as fin:
for obj in fin:
acronym[obj["short"]] = obj["long"][0]
inv_acronym[obj["long"][0]] = obj["short"]
return acronym, inv_acronym
def write_acronyms(acronyms, out):
with jsonlines.open(out, "w") as fout:
for short in acronyms:
fout.write({"short": short, "long": list(acronyms[short])})
# pip install git+git://github.com/tasos-py/Search-Engines-Scraper
# pip install abbreviations
# https://github.com/philgooch/abbreviation-extraction
def extract_acronym(clean_corpus, acronym_out):
"""
Clean the corpus and keep the sentences related to the query concept
:param raw_jl:
:param out_file:
:return:
"""
with jsonlines.open(clean_corpus) as fin:
acronyms = defaultdict(set)
for obj in tqdm(fin):
query = obj["query"]
for s in obj["sentences"]:
acronym_pairs = schwartz_hearst.extract_abbreviation_definition_pairs(
doc_text=s
)
for short, long in acronym_pairs.items():
if short.lower() != long.lower():
acronyms[short].add(long)
write_acronyms(acronyms, acronym_out)
def get_valid_verb():
# seed_verb = ["contain", "belong", "have", "include", "utilize", "use", "need", "require", "make", "conduct",
# "determine", "help", "achieve"]
seed_verb = ["contain", "belong", "have", "include", "utilize", "use", "need", "determine"]
verb_set = set()
for sv in seed_verb:
for syn in wordnet.synsets(sv):
if syn.pos() != "v":
continue
verb_set.update([x.name().replace("_", " ") for x in syn.lemmas()])
return verb_set
def is_valid_verb(verb, valid_verbs):
extra_verb = {"such as", "of"}
if verb in valid_verbs or verb in extra_verb:
return True
for tk in verb.split():
if tk in valid_verbs:
return True
return False
def write_results(results, out_dir):
def write_relation(rel_list, fout):
for relation in rel_list:
fout.write({"left": relation[0], "verb": relation[1], "right": relation[2]})
disc_concepts = os.path.join(out_dir, "concept.jsonl")
disc_vague_relation = os.path.join(out_dir, "vague_relation.jsonl")
disc_clear_relation = os.path.join(out_dir, "clear_relation.jsonl")
disc_definition = os.path.join(out_dir, "definition.jsonl")
disc_context = os.path.join(out_dir, "context.jsonl")
with jsonlines.open(disc_concepts, "w") as fout:
for cpt in results["concepts"]:
cpt = cpt.strip("\n\t\r ")
fout.write({"concept": cpt})
with jsonlines.open(disc_clear_relation, "w") as fout:
write_relation(results["clear_relations"], fout)
with jsonlines.open(disc_vague_relation, "w") as fout:
write_relation(results["vague_relations"], fout)
with jsonlines.open(disc_definition, "w") as fout:
defs = results["definitions"]
for cpt in defs:
fout.write({"concept": cpt, "definition": list(defs[cpt])})
with jsonlines.open(disc_context, "w") as fout:
ctxs = results["context"]
for cpt in ctxs:
fout.write({"concept": cpt, "context": list(ctxs[cpt])})
def _worker_map(job_queue, out_queue, acronyms, inv_acronyms, reg_cpts):
dkg = DomainKG()
while True:
is_def, is_ctx = False, False
valid_verbs = get_valid_verb()
clean_relation, vague_relation = set(), set()
concepts = set()
job = job_queue.get()
if job is None:
break
s, query = job["sent"], job["query"]
try:
ann_sent = dkg.client.annotate(s).sentence[0]
except:
break
type = is_definition(ann_sent, query, s)
if type == "def":
is_def = True
elif type == "ctx":
is_ctx = True
words, pos = [], []
for w in ann_sent.token:
words.append(w.word)
pos.append(w.pos)
concept_index = dict()
for c in dkg.extract_concepts(words, pos):
for i in range(c.start, c.end):
concept_index[i] = c
# mark the query in the sentences to override the concept detection
qtks = query.split()
n = 0
qconcept = []
while n < len(words):
match = True
for k, qtk in enumerate(qtks):
if n + k >= len(words) or words[n + k] != qtk:
match = False
break
if match:
for j in range(len(qtks)):
c = Concept(n, n + len(qtks), query)
concept_index[n + j] = c
qconcept.append(c)
n += len(qtks) - 1
n += 1
# remove overlapped concept
tmp_del = set()
for idx in concept_index:
c = concept_index[idx]
for qc in qconcept:
if (
qc.start <= c.start <= qc.end or qc.start <= c.end <= qc.end
) and not (qc.start == c.start and qc.end == c.end):
tmp_del.add(idx)
for idx in tmp_del:
del concept_index[idx]
relations = dkg.extract_relations(ann_sent, concept_index)
concepts.update([str(x[0]) for x in relations if is_valid_cpt(x[0], reg_cpts)])
concepts.update([str(x[2]) for x in relations if is_valid_cpt(x[2], reg_cpts)])
for x in relations:
if not is_valid_cpt(x[0], reg_cpts) or not is_valid_cpt(x[2], reg_cpts):
continue
if is_valid_verb(x[1], valid_verbs):
clean_relation.add(x)
else:
vague_relation.add(x)
out_queue.put(
{
"query": query,
"sent": s,
"concepts": list(concepts),
"is_def": is_def,
"is_ctx": is_ctx,
"clear_relations": clean_relation,
"vague_relations": vague_relation,
}
)
print("finished one process")
out_queue.put(None)
def is_valid_cpt(cpt, reg_cpts):
if cpt.lower() in reg_cpts:
return False
return True
def _worker_reduce(output_queue, map_num, out_dir):
finished = 0
concepts = set()
definitions = defaultdict(set)
context = defaultdict(set)
clear_relation = set()
vague_relation = set()
while True:
output = output_queue.get()
if output is None:
finished += 1
if finished >= map_num:
break
else:
continue
concepts.update(output["concepts"])
clear_relation.update(output["clear_relations"])
vague_relation.update(output["vague_relations"])
if output["is_def"]:
definitions[output["query"]].add(output["sent"])
elif output["is_ctx"]:
context[output["query"]].add(output["sent"])
print("finished collecting results")
bup_res = {
"concepts": list(concepts),
"definitions": definitions,
"context": context,
"clear_relations": list(clear_relation),
"vague_relations": list(vague_relation),
}
write_results(bup_res, out_dir)
def extract_definitions_and_relation(clean_corpus, acronym_file, out_dir, regcpt_file):
reg_cpts = read_regular_concepts(regcpt_file)
print("regular concept loaded...")
acronyms, inv_acronyms = read_acronyms(acronym_file)
map_num = 4
mapworker = []
job_q, out_q = Queue(), Queue()
for _ in range(map_num):
w = Process(
target=_worker_map, args=(job_q, out_q, acronyms, inv_acronyms, reg_cpts)
)
mapworker.append(w)
w.start()
rp = Process(target=_worker_reduce, args=(out_q, map_num, out_dir))
rp.start()
with jsonlines.open(clean_corpus) as fin:
for obj in fin:
sents = obj["sentences"]
query = obj["query"]
for s in sents[:100]:
job_q.put({"sent": s, "query": query})
for w in mapworker:
job_q.put(None)
for w in mapworker:
w.join()
rp.join()
job_q.close()
out_q.close()
def is_definition(asent, query, s):
if s.endswith("?") or s.endswith("!") or query.lower() not in s.lower():
return False
query_idxs = kmp([x.word.lower() for x in asent.token], query.lower().split())
if len(query_idxs) == 0:
return None
query_idxs = query_idxs[0]
fidx, lidx = query_idxs[0], query_idxs[-1]
pre_tks, post_tks = asent.token[:fidx], asent.token[lidx:]
if len(post_tks) > 0:
if fidx < 2:
in_deps, out_deps = defaultdict(dict), defaultdict(dict)
for r in asent.enhancedPlusPlusDependencies.edge:
t1_idx, rel, t2_idx = r.source - 1, r.dep.split(":"), r.target - 1
rel = rel[0]
out_rels = out_deps[t1_idx]
tmp = out_rels.get(rel, [])
tmp.append(t2_idx)
out_deps[t1_idx][rel] = tmp
in_rels = in_deps[t2_idx]
tmp = in_rels.get(rel, [])
tmp.append(t1_idx)
in_deps[t2_idx][rel] = tmp
for idx in range(query_idxs[0], query_idxs[-1]):
subj_dep = in_deps[idx].get("nsubj", [])
if len(subj_dep) > 0:
obj = subj_dep[0]
if (
asent.token[obj].pos.startswith("NN")
and len(out_deps[obj].get("cop", [])) > 0
):
cop_idx = out_deps[obj]["cop"][0]
if asent.token[cop_idx].word in ["is", "are"] and (
fidx == 0 or asent.token[0].lemma in ["a", "an", "the"]
):
return "def"
else:
return "ctx"
elif asent.token[obj].pos.startswith("VB") and asent.token[
obj
].pos not in ["VBD"]:
return "ctx"
return None
def extract_info(corpus, out_dir, regcpt_file):
acronym_out = os.path.join(out_dir, ACRON_OUT)
extract_acronym(clean_corpus=corpus, acronym_out=acronym_out)
extract_definitions_and_relation(
clean_corpus=corpus,
acronym_file=acronym_out,
out_dir=out_dir,
regcpt_file=regcpt_file,
)
if __name__ == "__main__":
"""
create clean sentences for each query concept
"""
parser = argparse.ArgumentParser()
parser.add_argument("--corpus_file", help="corpus for extraction")
parser.add_argument("--out_dir", help="dir to output the extracted information")
parser.add_argument(
"--regular_concepts", help="file to the list of regular concepts"
)
args = parser.parse_args()
extract_info(
corpus=args.corpus_file, out_dir=args.out_dir, regcpt_file=args.regular_concepts
)
| 11,747
| 32.855908
| 114
|
py
|
TraceLinkExplanation
|
TraceLinkExplanation-master/domain_data_collection/relation_graph.py
|
import os
from asyncio import start_server
import pandas as pd
from graph_tools import Graph
from pathlib import Path
import sys
from multiprocessing import Pool
from graphviz import Source
from jsonlines import jsonlines
from stanza.server import CoreNLPClient
from nltk.stem import WordNetLemmatizer
from pattmatch import kmp
sys.path.append("..")
EDGE_VERB = "verb"
class RelationGraph:
def __init__(self, bidirection=True):
"""
Data structure for storing the relationships between the concepts
"""
self.lemm = WordNetLemmatizer()
self.g = Graph()
self.bidirection = bidirection
def process_concept_name(self, concept):
tokens = concept.lower().split()
tokens = [self.lemm.lemmatize(x) for x in tokens]
return " ".join(tokens)
def add_vertex(self, concept):
"""
Add vertex if not exist
"""
self.g.add_vertex(self.process_concept_name(concept))
def add_relation(self, relation):
self._add_relation(relation)
if self.bidirection:
self._add_relation(reversed(relation))
def find_vague_match(self, concept):
c_tokens = concept.split()
for v in self.g.vertices():
v_tokens = v.split()
if len(kmp(c_tokens, v_tokens)) or len(kmp(v_tokens, c_tokens)):
return v
return None
def is_reachable(self, c1, c2, vague=False):
c1, c2 = self.process_concept_name(c1), self.process_concept_name(c2)
c1_match = c1 if c1 in self.g.vertices() else self.find_vague_match(c1)
c2_match = c2 if c2 in self.g.vertices() else self.find_vague_match(c2)
if c1_match == None or c2_match == None:
return False
elif c1_match == c2_match:
return True
return self.g.is_reachable(c1_match, c2_match) is not None
def find_path(self, c1, c2):
c1, c2 = self.process_concept_name(c1), self.process_concept_name(c2)
c1_match = c1 if c1 in self.g.vertices() else self.find_vague_match(c1)
c2_match = c2 if c2 in self.g.vertices() else self.find_vague_match(c2)
if c1_match == None or c2_match == None:
return False
all_path = self.g.shortest_paths(c1_match, c2_match)
if len(all_path) == 0:
return []
path = all_path[0]
if len(path) == 1:
return (c1, "synonym", c2)
elif len(path) > 1:
path_tup = []
for cur in range(1, len(path)):
pnode = path[cur - 1]
cnode = path[cur]
verb = list(
self.g.get_edge_attribute_by_id(pnode, cnode, 0, EDGE_VERB)
)[0]
path_tup.append((pnode, verb, cnode))
if c1 not in self.g.vertices():
path_tup.insert(0, (c1, "match", path_tup[0][0]))
if c2 not in self.g.vertices():
path_tup.append((path_tup[-1][-1], "match", c2))
return path_tup
else:
return []
def _add_relation(self, relation):
u, verb, v = relation
u = self.process_concept_name(u)
v = self.process_concept_name(v)
if not self.g.has_edge(u, v):
self.g.add_edge(u, v)
verbs = {} if verb is None else {verb}
else:
verbs = self.g.get_edge_attribute_by_id(u, v, 0, EDGE_VERB)
if verb is not None:
verbs.add(verb)
self.g.set_edge_attribute_by_id(u, v, 0, EDGE_VERB, verbs)
@property
def concepts(self):
return self.g.vertices()
@property
def links(self):
return self.g.unique_edges()
def merge(self, graph):
for v in graph.g.vertices():
self.g.add_vertex(v)
for (u, v) in graph.g.edges():
self.g.add_edge(u, v)
self.g.add_edge(v, u)
verbs = self.g.get_edge_attribute_by_id(u, v, 0, EDGE_VERB)
if not verbs:
verbs = set()
verbs = verbs.update(graph.g.get_edge_attribute_by_id(u, v, 0, EDGE_VERB))
self.g.set_edge_attribute_by_id(u, v, 0, EDGE_VERB, verbs)
def clean_graph(self, anchors, cores=4):
"""
Clean the graph by removing the concepts (and corresponding edges) if they do not link to an anchor-point concpet.
"""
keep = set()
with Pool(cores) as pool:
res = pool.starmap(
self.clean_worker, [(i, anchors, self.g) for i, a in enumerate(anchors)]
)
for r in res:
keep.update(r)
for v in self.g.vertices():
if v not in keep:
self.g.delete_vertex(v)
@staticmethod
def clean_worker(idx, anchors, graph):
keep = set()
s = anchors[idx]
for i in range(idx, len(anchors)):
t = anchors[i]
for p in graph.shortest_paths(s, t):
for n in p:
keep.add(n)
return keep
def _load(self, concepts, links, verbs):
for c in concepts:
self.g.add_vertex(c)
for l in links:
for r in links[l]:
self.g.add_edge(l, r)
self.g.add_edge(r, l)
self.g.set_edge_attribute_by_id(l, r, 0, EDGE_VERB, verbs[l, r])
self.g.set_edge_attribute_by_id(r, l, 0, EDGE_VERB, verbs[l, r])
return self
def load(self, link_file):
if link_file.endswith("csv"):
rdf = pd.read_csv(link_file)
for idx, row in rdf.iterrows():
lc, rc = row["left"].lower(), row["right"].lower()
self.add_relation((lc, None, rc))
verbs = set(eval(row["verbs"]))
self.g.set_edge_attribute_by_id(lc, rc, 0, EDGE_VERB, verbs)
elif link_file.endswith("jsonl"):
with jsonlines.open(link_file) as fin:
for obj in fin:
lc, rc = obj["left"].lower(), obj["right"].lower()
verb = obj["verb"]
self.add_relation((lc, None, rc))
self.g.set_edge_attribute_by_id(lc, rc, 0, EDGE_VERB, verb)
return self
def dump(self, dir, link_file):
if not os.path.isdir(dir):
os.makedirs(dir)
rfile = os.path.join(dir, link_file)
rdf = pd.DataFrame(columns=["left", "right", "verbs"])
for lc, rc in self.g.edges():
rdf = rdf.append(
{
"left": lc,
"right": rc,
"verbs": self.g.get_edge_attribute_by_id(lc, rc, 0, EDGE_VERB),
},
ignore_index=True,
)
rdf.to_csv(rfile, index=False)
def draw(self, filename):
s = self.g.export_dot()
Source(s).render(filename)
def __contains__(self, concept):
concept = self.process_concept_name(concept)
return self.g.has_vertex(concept)
if __name__ == "__main__":
rel_g = RelationGraph()
rel_g.add_vertex("Apple")
rel_g.add_vertex("apples")
rel_g.add_vertex("orange")
rel_g.add_vertex("fruit")
rel_g.add_relation(("orange", "is", "fruit"))
rel_g.add_relation(("apple", "is", "fruit"))
print(rel_g.g.unique_edges())
print(rel_g.is_reachable("Apple", "orange"))
| 7,405
| 33.287037
| 122
|
py
|
Opportunistic
|
Opportunistic-master/DataGenerator.py
|
import pdb
import numpy as np
from DataPoint import DataPoint
class DataGenerator:
def __init__(self, distribution):
self.__distribution = distribution
def get_data_point(self, label_noise=0.00):
ind, features, label, feature_costs, label_cost = next(self.__distribution)
if np.random.rand() < label_noise:
label = np.random.choice(int(label)+1)
return DataPoint(ind, features, label, feature_costs, label_cost)
| 470
| 25.166667
| 83
|
py
|
Opportunistic
|
Opportunistic-master/DataPoint.py
|
import numpy as np
class DataPoint():
def __init__(self, ind, features, label, feature_costs, label_cost):
self.__features = features
self.__label = label
self.__feature_costs = feature_costs
self.__label_cost = label_cost
self.__accumulated_cost = 0
self.__known_features = np.ones(len(features))*np.nan
self.__is_label_known = False
self.__index = ind
def get_features(self):
return self.__features * self.__known_features
def get_label(self):
if self.__is_label_known:
return self.__label
else:
return np.nan
def request_label(self):
self.__accumulated_cost += self.__label_cost
self.__is_label_known = True
return self.get_label()
def request_feature(self, index):
self.__accumulated_cost += self.__feature_costs[index]
self.__known_features[index] = 1
return self.get_features()
def get_accumulated_cost(self):
return self.__accumulated_cost
def get_feature_costs(self):
return self.__feature_costs
def get_label_costs(self):
return self.__label_costs
def get_index(self):
return self.__index
| 1,079
| 18.636364
| 69
|
py
|
Opportunistic
|
Opportunistic-master/nhanes.py
|
import pdb
import glob
import copy
import os
import pickle
import joblib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
import sklearn.feature_selection
class FeatureColumn:
def __init__(self, category, field, preprocessor, args=None, cost=None):
self.category = category
self.field = field
self.preprocessor = preprocessor
self.args = args
self.data = None
self.cost = cost
class NHANES:
def __init__(self, db_path=None, columns=None):
self.db_path = db_path
self.columns = columns # Depricated
self.dataset = None # Depricated
self.column_data = None
self.column_info = None
self.df_features = None
self.df_targets = None
self.costs = None
def process(self):
df = None
cache = {}
# collect relevant data
df = []
for fe_col in self.columns:
sheet = fe_col.category
field = fe_col.field
data_files = glob.glob(self.db_path+sheet+'/*.XPT')
df_col = []
for dfile in data_files:
print(80*' ', end='\r')
print('\rProcessing: ' + dfile.split('/')[-1], end='')
# read the file
if dfile in cache:
df_tmp = cache[dfile]
else:
df_tmp = pd.read_sas(dfile)
cache[dfile] = df_tmp
# skip of there is no SEQN
if 'SEQN' not in df_tmp.columns:
continue
#df_tmp.set_index('SEQN')
# skip if there is nothing interseting there
sel_cols = set(df_tmp.columns).intersection([field])
if not sel_cols:
continue
else:
df_tmp = df_tmp[['SEQN'] + list(sel_cols)]
df_tmp.set_index('SEQN', inplace=True)
df_col.append(df_tmp)
try:
df_col = pd.concat(df_col)
except:
#raise Error('Failed to process' + field)
raise Exception('Failed to process' + field)
df.append(df_col)
df = pd.concat(df, axis=1)
#df = pd.merge(df, df_sel, how='outer')
# do preprocessing steps
df_proc = []#[df['SEQN']]
for fe_col in self.columns:
field = fe_col.field
fe_col.data = df[field].copy()
# do preprocessing
if fe_col.preprocessor is not None:
prepr_col = fe_col.preprocessor(df[field], fe_col.args)
else:
prepr_col = df[field]
# handle the 1 to many
if (len(prepr_col.shape) > 1):
fe_col.cost = [fe_col.cost] * prepr_col.shape[1]
else:
fe_col.cost = [fe_col.cost]
df_proc.append(prepr_col)
self.dataset = pd.concat(df_proc, axis=1)
return self.dataset
def index(self, renew_cache=False):
# check if we don't have to renew cache
cache_path = self.db_path + 'cache/index_cache.pkl'
if (not renew_cache) and (os.path.exists(cache_path)):
print('Loading from cache:', cache_path)
try:
with open(cache_path, 'rb') as f:
self.column_data, self.column_info = pickle.load(f)
except:
self.column_data, self.column_info = joblib.load(cache_path)
return
# indexed cache file
self.column_data = {}
self.column_info = {}
# get the list of all sheets
sheets = [p.split('/')[-1] for p in glob.glob(self.db_path+'/*')]
# for each sheet read and index each data-file
for sheet in sheets:
dfiles = glob.glob(self.db_path+sheet+'/*.XPT')
for dfile in dfiles:
print('\rProcessing:', dfile, end='')
df = pd.read_sas(dfile)
if 'SEQN' not in df.columns:
continue
# read file columns and index them
df.set_index('SEQN', drop=True, inplace=True)
if not df.index.is_unique:
continue
for col in df.columns:
# if the column is not cached ever
if col not in self.column_data:
self.column_data[col] = df.loc[:,[col]]
# ignore duplicates
self.column_data[col] = self.column_data[col].groupby(level=0).last()
# get column info
self.update_column_info(col, dfile)
# else, we have cached info for this column
else:
# merge them
self.column_data[col] = pd.concat(
[self.column_data[col], df.loc[:,[col]]],
axis=0, verify_integrity=False)
# ignore duplicates
#if not self.column_data[col].index.is_unique:
# pdb.set_trace()
#self.column_data[col] = self.column_data[col].groupby(level=0).last()
self.column_data[col] = self.column_data[col][~self.column_data[col].index.duplicated(keep=False)]
# store/update cache file
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
print('\rStoring to cache:', cache_path)
try:
with open(cache_path, 'wb+') as f:
pickle.dump((self.column_data, self.column_info), f)
except:
joblib.dump((self.column_data, self.column_info), cache_path, compress=9)
return
def process_supervised(self, target_col, exclude_cols, include_cols=None,
preproc_target=None, preproc_target_args=None,
missing_threshold=1.0, muinfo_threshold=0.0):
# remove missing target values
if type(target_col) is list:
self.df_targets = self.column_data[target_col[0]]
for t_col in target_col[1:]:
self.df_targets = pd.concat([self.df_targets, self.column_data[t_col]],
axis=1, join='inner')
else:
self.df_targets = self.column_data[target_col].copy()
self.df_targets.dropna(axis=0, how='any', inplace=True)
# get target dataframe
#if type(preproc_target) is tuple:
if preproc_target != None:
self.df_targets = preproc_target(self.df_targets, preproc_target_args)
self.df_targets.dropna(axis=0, how='any', inplace=True)
# process features
self.df_features = pd.DataFrame()
for col in self.column_data.keys():
print(80*' ', end='\r')
print('Processing:', col, end='\r')
# check if we should skip this column
if col in exclude_cols or col == target_col:
continue
if self.column_data[col].dtypes[0] == np.dtype('O'):
continue
if include_cols != None:
if col not in include_cols:
continue
# find the intersection of the two
df_valid = pd.concat([self.df_targets, self.column_data[col]],
axis=1, join='inner')
df_col_valid = df_valid.loc[:,[col]]
# if too many nans, skip the column
if df_col_valid.isna().mean()[0]>missing_threshold or df_valid.shape[0]==0:
continue
# low r-value, skip the column
xx = df_valid.values
np.nan_to_num(xx, copy=False)
try:
mu_info = sklearn.feature_selection.mutual_info_classif(
xx[:,1].reshape(-1, 1), xx[:,0].ravel().astype(np.int))
#mu_info = np.abs(scipy.stats.pearsonr(xx[:,0], xx[:,1])[0])
#print(mu_info)
except:
continue
if mu_info < muinfo_threshold:
continue
# do outer join
self.df_features = pd.concat([self.df_features, df_col_valid], axis=1, join='outer')
count_thresh = self.df_features.shape[0]*(1-missing_threshold)
self.df_features.dropna(axis=1, thresh=count_thresh, inplace=True)
# preprocess all features
self.costs = []
prep_features = []
for col in self.df_features.columns:
prepp_col = preprocess(self.df_features[col], self.column_info[col], None)
if type(prepp_col) == type(None):
continue
prep_features.append(prepp_col)
prep_len = 1
if len(prepp_col.shape) == 2:
prep_len = prepp_col.shape[1]
self.costs.extend([self.column_info[col]['cost']] * prep_len)
self.df_features = pd.concat(prep_features, axis=1)
self.costs = np.array(self.costs)
# shuffle them
inds_perm = copy.deepcopy(self.df_features.index.values)
np.random.shuffle(inds_perm)
self.df_features = self.df_features.loc[inds_perm]
self.df_targets = self.df_targets.loc[inds_perm]
return (self.df_features, self.df_targets)
def update_column_info(self, col, dfile):
self.column_info[col] = {}
# get column values
vals = self.column_data[col][col]
vals_unique = len(vals.unique())
if vals_unique < 20:
self.column_info[col]['type'] = 'categorical'
else:
self.column_info[col]['type'] = 'real'
# set feature costs
sheet = dfile.split('/')[-2]
if sheet == 'Demographics':
self.column_info[col]['cost'] = 2.0
elif sheet == 'Dietary':
self.column_info[col]['cost'] = 4.0
elif sheet == 'Examination':
self.column_info[col]['cost'] = 5.0
elif sheet == 'Questionnaire':
self.column_info[col]['cost'] = 4.0
elif sheet == 'Laboratory':
self.column_info[col]['cost'] = 9.0
else:
raise NotImplementedError
return
def save_supervised(self, filename):
save_dict = {'df_features':self.df_features,
'df_targets':self.df_targets,
'costs':self.costs}
with open(filename, 'wb+') as f:
pickle.dump(save_dict, f)
def load_supervised(self, filename):
with open(filename, 'rb') as f:
load_dict = pickle.load(f)
self.df_features = load_dict['df_features']
self.df_targets = load_dict['df_targets']
self.costs = load_dict['costs']
def get_distribution(self, phase, balanced=True):
features = self.df_features.values
targets = self.df_targets.values
# check the phase
inds_tst = np.arange(1,features.shape[0]*0.15, dtype=np.int)
inds_val = np.arange(features.shape[0]*0.15,
features.shape[0]*0.30, dtype=np.int)
inds_trn = np.arange(features.shape[0]*0.30,
features.shape[0]*1.00, dtype=np.int)
# if the phase is validation
if phase == 'validation':
phase_features = features[inds_val,:]
phase_targets = targets[inds_val]
# if the phase is test
elif phase == 'test':
phase_features = features[inds_tst,:]
phase_targets = targets[inds_tst]
# if the phase is train
elif phase == 'train':
phase_features = features[inds_trn,:]
phase_targets = targets[inds_trn]
elif phase == 'all':
phase_features = features[:,:]
phase_targets = targets[:]
else:
raise NotImplementedError('phase not found.')
# sampling
i = 0
y = 0
n_cls = np.max(phase_targets) + 1
while True:
i += 1
ind = i % phase_features.shape[0]
# balance dataset
if balanced:
while phase_targets[ind] != y:
i += 1
ind = i % phase_features.shape[0]
y += 1
if y >= n_cls:
y = 0
# yield the sample
yield (ind, phase_features[ind], phase_targets[ind], self.costs, 1.0)
def get_batch(self, n_size, phase, balanced=True):
dataset_features = self.df_features.values
dataset_targets = self.df_targets.values
# select indices
n_samples = dataset_features.shape[0]
n_classes = int(dataset_targets.max() + 1)
if phase == 'test':
inds_sel = np.arange(0, int(n_samples*0.15), 1)
elif phase == 'validation':
n_samples = dataset_features.shape[0]
inds_sel = np.arange(int(n_samples*0.15), int(n_samples*0.30), 1)
elif phase == 'train':
n_samples = dataset_features.shape[0]
inds_sel = np.arange(int(n_samples*0.30), n_samples, 1)
else:
raise NotImplementedError
inds_sel = np.random.permutation(inds_sel)
batch_inds = []
# if we should balance the data
if balanced:
for cl in range(n_classes):
inds_cl = inds_sel[dataset_targets[inds_sel] == cl]
batch_inds.extend(inds_cl[:n_size//n_classes])
else:
batch_inds = inds_sel[:n_size]
batch_inds = np.random.permutation(batch_inds)
return dataset_features[batch_inds], dataset_targets[batch_inds]
def preprocess(df_col, info_col, preprocessor=None):
if df_col.dtypes == np.dtype('O'):
return None#pd.DataFrame()
#df_col[pd.isna(df_col)] = df_col.mean()
if info_col['type'] == 'categorical':
df_col = preproc_onehot(df_col)
#df_col[pd.isna(df_col)] = df_col.mean()
elif info_col['type'] == 'real':
df_col = preproc_real(df_col)
else:
raise NotImplementedError
return df_col
def preproc_onehot(df_col, args=None):
return pd.get_dummies(df_col, prefix=df_col.name, prefix_sep='#')
def preproc_real(df_col, args=None):
if args is None:
args={'cutoff':np.inf}
# other answers as nan
df_col[df_col > args['cutoff']] = np.nan
# nan replaced by mean
df_col[pd.isna(df_col)] = df_col.mean()
# statistical normalization
df_col = (df_col-df_col.mean()) / df_col.std()
return df_col
def preproc_impute(df_col, args=None):
# nan replaced by mean
df_col[pd.isna(df_col)] = df_col.mean()
return df_col
def preproc_cut(df_col, bins):
# limit values to the bins range
df_col = df_col[df_col >= bins[0]]
df_col = df_col[df_col <= bins[-1]]
return pd.cut(df_col.iloc[:,0], bins, labels=False)
def preproc_dropna(df_col, args=None):
df_col.dropna(axis=0, how='any', inplace=True)
return df_col
class Dataset():
"""
Dataset manager class
"""
def __init__(self, data_path=None):
"""
Class intitializer.
"""
# set database path
if data_path == None:
self.data_path = './run_data/'
else:
self.data_path = data_path
# feature and target vecotrs
self.features = None
self.targets = None
self.costs = None
def load_diabetes(self, opts=None):
columns = [
# TARGET: Fasting Glucose
FeatureColumn('Laboratory', 'LBXGLU',
#preproc_dropna, None),
preproc_impute, None),
# Gender
FeatureColumn('Demographics', 'RIAGENDR',
preproc_real, None, cost=2),
# Age at time of screening
FeatureColumn('Demographics', 'RIDAGEYR',
preproc_real, None, cost=2),
FeatureColumn('Demographics', 'RIDRETH3',
preproc_onehot, None, cost=2),
# Race/ethnicity
FeatureColumn('Demographics', 'RIDRETH1',
preproc_onehot, None, cost=2),
# Annual household income
FeatureColumn('Demographics', 'INDHHINC',
preproc_real, {'cutoff':11}, cost=4),
# Education level
FeatureColumn('Demographics', 'DMDEDUC2',
preproc_real, {'cutoff':5}, cost=2),
# Blood pressure
FeatureColumn('Examination', 'BPXSY1',
preproc_real, None, cost=5),
FeatureColumn('Examination', 'BPXDI1',
preproc_real, None, cost=5),
FeatureColumn('Examination', 'BPXSY2',
preproc_real, None, cost=5),
FeatureColumn('Examination', 'BPXDI2',
preproc_real, None, cost=5),
FeatureColumn('Examination', 'BPXSY3',
preproc_real, None, cost=5),
FeatureColumn('Examination', 'BPXDI3',
preproc_real, None, cost=5),
FeatureColumn('Examination', 'BPXSY4',
preproc_real, None, cost=5),
FeatureColumn('Examination', 'BPXDI4',
preproc_real, None, cost=5),
# BMI
FeatureColumn('Examination', 'BMXBMI',
preproc_real, None, cost=5),
# Waist
FeatureColumn('Examination', 'BMXWAIST',
preproc_real, None, cost=5),
# Height
FeatureColumn('Examination', 'BMXHT',
preproc_real, None, cost=5),
# Upper Leg Length
FeatureColumn('Examination', 'BMXLEG',
preproc_real, None, cost=5),
# Weight
FeatureColumn('Examination', 'BMXWT',
preproc_real, None, cost=5),
# Total Cholesterol
FeatureColumn('Laboratory', 'LBXTC',
preproc_real, None, cost=9),
# Triglyceride
FeatureColumn('Laboratory', 'LBXTR',
preproc_real, None, cost=9),
# fibrinogen
FeatureColumn('Laboratory', 'LBXFB',
preproc_real, None, cost=9),
# LDL-cholesterol
FeatureColumn('Laboratory', 'LBDLDL',
preproc_real, None, cost=9),
# Alcohol consumption
FeatureColumn('Questionnaire', 'ALQ101',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'ALQ120Q',
preproc_real, {'cutoff':365}, cost=4),
# Vigorous work activity
FeatureColumn('Questionnaire', 'PAQ605',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'PAQ620',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'PAQ180',
preproc_real, {'cutoff':4}, cost=4),
# Sleep
FeatureColumn('Questionnaire', 'SLD010H',
preproc_real, {'cutoff':12}, cost=4),
# Smoking
FeatureColumn('Questionnaire', 'SMQ020',
preproc_onehot, None, cost=4),
FeatureColumn('Questionnaire', 'SMD030',
preproc_real, {'cutoff':72}, cost=4),
# Blood relatives have diabetes
FeatureColumn('Questionnaire', 'MCQ250A',
preproc_real, {'cutoff':2}, cost=4),
# Blood pressure history
FeatureColumn('Questionnaire', 'BPQ020',
preproc_real, {'cutoff':2}, cost=4),
]
nhanes_dataset = NHANES(self.data_path, columns)
df = nhanes_dataset.process()
# extract feature and target
features = df.loc[:, df.columns != 'LBXGLU'].values
targets_LBXGLU = df['LBXGLU'].values
targets = np.zeros(targets_LBXGLU.shape[0])
targets[targets_LBXGLU <= 100] = 0
targets[np.logical_and(targets_LBXGLU<125,targets_LBXGLU>100)] = 1
targets[targets_LBXGLU >= 125] = 2
# random permutation
perm = np.random.permutation(targets.shape[0])
self.features = features[perm]
self.targets = targets[perm]
self.costs = [c.cost for c in columns[1:]]
self.costs = np.array(
[item for sublist in self.costs for item in sublist])
def load_hypertension(self, opts=None):
columns = [
# TARGET: systolic BP average
FeatureColumn('Examination', 'BPXSAR',
preproc_dropna, None, cost=5),
# Gender
FeatureColumn('Demographics', 'RIAGENDR',
preproc_real, None, cost=2),
# Age at time of screening
FeatureColumn('Demographics', 'RIDAGEYR',
preproc_real, None, cost=2),
# Race/ethnicity
FeatureColumn('Demographics', 'RIDRETH1',
preproc_onehot, None, cost=2),
# Annual household income
FeatureColumn('Demographics', 'INDHHINC',
preproc_real, {'cutoff':11}, cost=4),
# Education level
FeatureColumn('Demographics', 'DMDEDUC2',
preproc_real, {'cutoff':5}, cost=2),
# Sodium eaten day before
FeatureColumn('Dietary', 'DR2TSODI',
preproc_real, {'cutoff':20683}, cost=4),
# BMI
FeatureColumn('Examination', 'BMXBMI',
preproc_real, None, cost=5),
# Waist
FeatureColumn('Examination', 'BMXWAIST',
preproc_real, None, cost=5),
# Height
FeatureColumn('Examination', 'BMXHT',
preproc_real, None, cost=5),
# Upper Leg Length
FeatureColumn('Examination', 'BMXLEG',
preproc_real, None, cost=5),
# Weight
FeatureColumn('Examination', 'BMXWT',
preproc_real, None, cost=5),
# Total Cholesterol
FeatureColumn('Laboratory', 'LBXTC',
preproc_real, None, cost=9),
# Triglyceride
FeatureColumn('Laboratory', 'LBXTR',
preproc_real, None, cost=9),
# fibrinogen
FeatureColumn('Laboratory', 'LBXFB',
preproc_real, None, cost=9),
# LDL-cholesterol
FeatureColumn('Laboratory', 'LBDLDL',
preproc_real, None, cost=9),
# Alcohol consumption
FeatureColumn('Questionnaire', 'ALQ101',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'ALQ120Q',
preproc_real, {'cutoff':365}, cost=4),
# Vigorous work activity
FeatureColumn('Questionnaire', 'PAQ605',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'PAQ620',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'PAQ180',
preproc_real, {'cutoff':4}, cost=4),
# Sleep
FeatureColumn('Questionnaire', 'SLD010H',
preproc_real, {'cutoff':12}, cost=4),
# Smoking
FeatureColumn('Questionnaire', 'SMQ020',
preproc_onehot, None, cost=4),
FeatureColumn('Questionnaire', 'SMD030',
preproc_real, {'cutoff':72}, cost=4),
# Blood relatives have hypertension/stroke
FeatureColumn('Questionnaire', 'MCQ250F',
preproc_real, {'cutoff':2}, cost=4),
]
nhanes_dataset = NHANES(self.data_path, columns)
df = nhanes_dataset.process()
# extract feature and target
# below 90/60 is hypotension, in between is normal, above 120/80 is prehypertension,
# above 140/90 is hypertension
fe_cols = df.drop(['BPXSAR'], axis=1)
features = fe_cols.values
target = df['BPXSAR'].values
# remove nan labeled samples
inds_valid = ~ np.isnan(target)
features = features[inds_valid]
target = target[inds_valid]
# Put each person in the corresponding bin
targets = np.zeros(target.shape[0])
targets[target < 140] = 0 # Rest (hypotsn, normal, prehyptsn)
targets[target >= 140] = 1 # hypertension
# random permutation
perm = np.random.permutation(targets.shape[0])
self.features = features[perm]
self.targets = targets[perm]
self.costs = [c.cost for c in columns[1:]]
self.costs = np.array(
[item for sublist in self.costs for item in sublist])
def load_arthritis(self, opts=None):
columns = [
# TARGET: systolic BP average
FeatureColumn('Questionnaire', 'MCQ160A',
None, None, cost=4),
# Gender
FeatureColumn('Demographics', 'RIAGENDR',
preproc_real, None, cost=2),
# Age at time of screening
FeatureColumn('Demographics', 'RIDAGEYR',
preproc_real, None, cost=2),
FeatureColumn('Demographics', 'RIDRETH3',
preproc_onehot, None, cost=2),
# Race/ethnicity
FeatureColumn('Demographics', 'RIDRETH1',
preproc_onehot, None, cost=2),
# Annual household income
FeatureColumn('Demographics', 'INDHHINC',
preproc_real, {'cutoff':11}, cost=4),
# Education level
FeatureColumn('Demographics', 'DMDEDUC2',
preproc_real, {'cutoff':5}, cost=2),
# BMI
FeatureColumn('Examination', 'BMXBMI',
preproc_real, None, cost=5),
# Waist
FeatureColumn('Examination', 'BMXWAIST',
preproc_real, None, cost=5),
# Height
FeatureColumn('Examination', 'BMXHT',
preproc_real, None, cost=5),
# Upper Leg Length
FeatureColumn('Examination', 'BMXLEG',
preproc_real, None, cost=5),
# Weight
FeatureColumn('Examination', 'BMXWT',
preproc_real, None, cost=5),
# Total Cholesterol
FeatureColumn('Laboratory', 'LBXTC',
preproc_real, None, cost=9),
# Alcohol consumption
FeatureColumn('Questionnaire', 'ALQ101',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'ALQ120Q',
preproc_real, {'cutoff':365}, cost=4),
# Vigorous work activity
FeatureColumn('Questionnaire', 'PAQ605',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'PAQ620',
preproc_real, {'cutoff':2}, cost=4),
FeatureColumn('Questionnaire', 'PAQ180',
preproc_real, {'cutoff':4}, cost=4),
FeatureColumn('Questionnaire', 'PAD615',
preproc_real, {'cutoff':780}, cost=4),
# Doctor told overweight (risk factor)
FeatureColumn('Questionnaire', 'MCQ160J',
preproc_onehot, {'cutoff':2}, cost=4),
# Sleep
FeatureColumn('Questionnaire', 'SLD010H',
preproc_real, {'cutoff':12}, cost=4),
# Smoking
FeatureColumn('Questionnaire', 'SMQ020',
preproc_onehot, None, cost=4),
FeatureColumn('Questionnaire', 'SMD030',
preproc_real, {'cutoff':72}, cost=4),
# Blood relatives with arthritis
FeatureColumn('Questionnaire', 'MCQ250D',
preproc_onehot, {'cutoff':2}, cost=4),
# joint pain/aching/stiffness in past year
FeatureColumn('Questionnaire', 'MPQ010',
preproc_onehot, {'cutoff':2}, cost=4),
# symptoms began only because of injury
FeatureColumn('Questionnaire', 'MPQ030',
preproc_onehot, {'cutoff':2}, cost=4),
# how long experiencing pain
FeatureColumn('Questionnaire', 'MPQ110',
preproc_real, {'cutoff':4}, cost=4),
]
nhanes_dataset = NHANES(self.data_path, columns)
df = nhanes_dataset.process()
fe_cols = df.drop(['MCQ160A'], axis=1)
features = fe_cols.values
target = df['MCQ160A'].values
# remove nan labeled samples
inds_valid = ~ np.isnan(target)
features = features[inds_valid]
target = target[inds_valid]
# Put each person in the corresponding bin
targets = np.zeros(target.shape[0])
targets[target == 1] = 0 # yes arthritis
targets[target == 2] = 1 # no arthritis
# random permutation
perm = np.random.permutation(targets.shape[0])
self.features = features[perm]
self.targets = targets[perm]
self.costs = [c.cost for c in columns[1:]]
self.costs = np.array(
[item for sublist in self.costs for item in sublist])
def get_dataset(self, phase):
# check the phase
inds_tst = np.arange(1,self.features.shape[0]*0.15, dtype=np.int)
inds_val = np.arange(self.features.shape[0]*0.15,
self.features.shape[0]*0.30, dtype=np.int)
inds_trn = np.arange(self.features.shape[0]*0.30,
self.features.shape[0]*1.00, dtype=np.int)
# if the phase is validation
if phase == 'validation':
phase_features = self.features[inds_val,:]
phase_targets = self.targets[inds_val]
# if the phase is test
elif phase == 'test':
phase_features = self.features[inds_tst,:]
phase_targets = self.targets[inds_tst]
# if the phase is train
elif phase == 'train':
phase_features = self.features[inds_trn,:]
phase_targets = self.targets[inds_trn]
elif phase == 'all':
phase_features = self.features[:,:]
phase_targets = self.targets[:]
else:
raise NotImplementedError('phase not found.')
# sampling
i = 0
y = 0
n_cls = np.max(phase_targets) + 1
while True:
i += 1
ind = i % phase_features.shape[0]
# balance dataset
while phase_targets[ind] != y:
i += 1
ind = i % phase_features.shape[0]
y += 1
if y >= n_cls:
y = 0
# yield the sample
yield (ind, phase_features[ind], phase_targets[ind], self.costs, 1.0)
| 32,417
| 41.937748
| 122
|
py
|
Opportunistic
|
Opportunistic-master/src/utils.py
|
import gc
import numpy as np
import torch
class ExperienceBuffer():
def __init__(self, buffer_size):
self.buffer = []
self.buffer_size = buffer_size
def push(self,experience):
if len(self.buffer) + 1 >= self.buffer_size:
self.buffer[0:(1+len(self.buffer))-self.buffer_size] = []
self.buffer.append(experience)
def sample(self,size):
inds = np.random.choice(len(self.buffer),size)
return [self.buffer[i] for i in inds]
def clear(self, buffer_size_new=None):
self.buffer = []
if buffer_size_new:
self.buffer_size = buffer_size_new
def mem_report():
'''Report the memory usage of the tensor.storage in pytorch
Both on CPUs and GPUs are reported'''
def _mem_report(tensors, mem_type):
'''Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation '''
print('Storage on %s' %(mem_type))
print('-'*LEN)
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
print('%s\t\t%s\t\t%.2f' % (
element_type,
size,
mem) )
print('-'*LEN)
print('Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (total_numel, total_mem) )
print('-'*LEN)
LEN = 65
print('='*LEN)
objects = gc.get_objects()
print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, 'GPU')
_mem_report(host_tensors, 'CPU')
| 2,645
| 33.363636
| 95
|
py
|
DROO
|
DROO-master/main.py
|
# #################################################################
# Deep Reinforcement Learning for Online Offloading in Wireless Powered Mobile-Edge Computing Networks
#
# This file contains the main code of DROO. It loads the training samples saved in ./data/data_#.mat, splits the samples into two parts (training and testing data constitutes 80% and 20%), trains the DNN with training and validation samples, and finally tests the DNN with test data.
#
# Input: ./data/data_#.mat
# Data samples are generated according to the CD method presented in [2]. There are 30,000 samples saved in each ./data/data_#.mat, where # is the user number. Each data sample includes
# -----------------------------------------------------------------
# | wireless channel gain | input_h |
# -----------------------------------------------------------------
# | computing mode selection | output_mode |
# -----------------------------------------------------------------
# | energy broadcasting parameter | output_a |
# -----------------------------------------------------------------
# | transmit time of wireless device | output_tau |
# -----------------------------------------------------------------
# | weighted sum computation rate | output_obj |
# -----------------------------------------------------------------
#
#
# References:
# [1] 1. Liang Huang, Suzhi Bi, and Ying-Jun Angela Zhang, "Deep Reinforcement Learning for Online Offloading in Wireless Powered Mobile-Edge Computing Networks," in IEEE Transactions on Mobile Computing, early access, 2019, DOI:10.1109/TMC.2019.2928811.
# [2] S. Bi and Y. J. Zhang, “Computation rate maximization for wireless powered mobile-edge computing with binary computation offloading,” IEEE Trans. Wireless Commun., vol. 17, no. 6, pp. 4177-4190, Jun. 2018.
#
# version 1.0 -- July 2018. Written by Liang Huang (lianghuang AT zjut.edu.cn)
# #################################################################
import scipy.io as sio # import scipy.io for .mat file I/
import numpy as np # import numpy
from memory import MemoryDNN
from optimization import bisection
import time
def plot_rate( rate_his, rolling_intv = 50):
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib as mpl
rate_array = np.asarray(rate_his)
df = pd.DataFrame(rate_his)
mpl.style.use('seaborn')
fig, ax = plt.subplots(figsize=(15,8))
# rolling_intv = 20
plt.plot(np.arange(len(rate_array))+1, np.hstack(df.rolling(rolling_intv, min_periods=1).mean().values), 'b')
plt.fill_between(np.arange(len(rate_array))+1, np.hstack(df.rolling(rolling_intv, min_periods=1).min()[0].values), np.hstack(df.rolling(rolling_intv, min_periods=1).max()[0].values), color = 'b', alpha = 0.2)
plt.ylabel('Normalized Computation Rate')
plt.xlabel('Time Frames')
plt.show()
def save_to_txt(rate_his, file_path):
with open(file_path, 'w') as f:
for rate in rate_his:
f.write("%s \n" % rate)
if __name__ == "__main__":
'''
This algorithm generates K modes from DNN, and chooses with largest
reward. The mode with largest reward is stored in the memory, which is
further used to train the DNN.
Adaptive K is implemented. K = max(K, K_his[-memory_size])
'''
N = 10 # number of users
n = 30000 # number of time frames
K = N # initialize K = N
decoder_mode = 'OP' # the quantization mode could be 'OP' (Order-preserving) or 'KNN'
Memory = 1024 # capacity of memory structure
Delta = 32 # Update interval for adaptive K
print('#user = %d, #channel=%d, K=%d, decoder = %s, Memory = %d, Delta = %d'%(N,n,K,decoder_mode, Memory, Delta))
# Load data
channel = sio.loadmat('./data/data_%d' %N)['input_h']
rate = sio.loadmat('./data/data_%d' %N)['output_obj'] # this rate is only used to plot figures; never used to train DROO.
# increase h to close to 1 for better training; it is a trick widely adopted in deep learning
channel = channel * 1000000
# generate the train and test data sample index
# data are splitted as 80:20
# training data are randomly sampled with duplication if n > total data size
split_idx = int(.8* len(channel))
num_test = min(len(channel) - split_idx, n - int(.8* n)) # training data size
mem = MemoryDNN(net = [N, 120, 80, N],
learning_rate = 0.01,
training_interval=10,
batch_size=128,
memory_size=Memory
)
start_time=time.time()
rate_his = []
rate_his_ratio = []
mode_his = []
k_idx_his = []
K_his = []
for i in range(n):
if i % (n//10) == 0:
print("%0.1f"%(i/n))
if i> 0 and i % Delta == 0:
# index counts from 0
if Delta > 1:
max_k = max(k_idx_his[-Delta:-1]) +1;
else:
max_k = k_idx_his[-1] +1;
K = min(max_k +1, N)
if i < n - num_test:
# training
i_idx = i % split_idx
else:
# test
i_idx = i - n + num_test + split_idx
h = channel[i_idx,:]
# the action selection must be either 'OP' or 'KNN'
m_list = mem.decode(h, K, decoder_mode)
r_list = []
for m in m_list:
r_list.append(bisection(h/1000000, m)[0])
# encode the mode with largest reward
mem.encode(h, m_list[np.argmax(r_list)])
# the main code for DROO training ends here
# the following codes store some interested metrics for illustrations
# memorize the largest reward
rate_his.append(np.max(r_list))
rate_his_ratio.append(rate_his[-1] / rate[i_idx][0])
# record the index of largest reward
k_idx_his.append(np.argmax(r_list))
# record K in case of adaptive K
K_his.append(K)
mode_his.append(m_list[np.argmax(r_list)])
total_time=time.time()-start_time
mem.plot_cost()
plot_rate(rate_his_ratio)
print("Averaged normalized computation rate:", sum(rate_his_ratio[-num_test: -1])/num_test)
print('Total time consumed:%s'%total_time)
print('Average time per channel:%s'%(total_time/n))
# save data into txt
save_to_txt(k_idx_his, "k_idx_his.txt")
save_to_txt(K_his, "K_his.txt")
save_to_txt(mem.cost_his, "cost_his.txt")
save_to_txt(rate_his_ratio, "rate_his_ratio.txt")
save_to_txt(mode_his, "mode_his.txt")
| 6,803
| 39.987952
| 284
|
py
|
DROO
|
DROO-master/optimization.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 9 10:45:26 2018
@author: Administrator
"""
import numpy as np
from scipy import optimize
from scipy.special import lambertw
import scipy.io as sio # import scipy.io for .mat file I/
import time
def plot_gain( gain_his):
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib as mpl
gain_array = np.asarray(gain_his)
df = pd.DataFrame(gain_his)
mpl.style.use('seaborn')
fig, ax = plt.subplots(figsize=(15,8))
rolling_intv = 20
plt.plot(np.arange(len(gain_array))+1, df.rolling(rolling_intv, min_periods=1).mean(), 'b')
plt.fill_between(np.arange(len(gain_array))+1, df.rolling(rolling_intv, min_periods=1).min()[0], df.rolling(rolling_intv, min_periods=1).max()[0], color = 'b', alpha = 0.2)
plt.ylabel('Gain ratio')
plt.xlabel('learning steps')
plt.show()
def bisection(h, M, weights=[]):
# the bisection algorithm proposed by Suzhi BI
# average time to find the optimal: 0.012535839796066284 s
# parameters and equations
o=100
p=3
u=0.7
eta1=((u*p)**(1.0/3))/o
ki=10**-26
eta2=u*p/10**-10
B=2*10**6
Vu=1.1
epsilon=B/(Vu*np.log(2))
x = [] # a =x[0], and tau_j = a[1:]
M0=np.where(M==0)[0]
M1=np.where(M==1)[0]
hi=np.array([h[i] for i in M0])
hj=np.array([h[i] for i in M1])
if len(weights) == 0:
# default weights [1, 1.5, 1, 1.5, 1, 1.5, ...]
weights = [1.5 if i%2==1 else 1 for i in range(len(M))]
wi=np.array([weights[M0[i]] for i in range(len(M0))])
wj=np.array([weights[M1[i]] for i in range(len(M1))])
def sum_rate(x):
sum1=sum(wi*eta1*(hi/ki)**(1.0/3)*x[0]**(1.0/3))
sum2=0
for i in range(len(M1)):
sum2+=wj[i]*epsilon*x[i+1]*np.log(1+eta2*hj[i]**2*x[0]/x[i+1])
return sum1+sum2
def phi(v, j):
return 1/(-1-1/(lambertw(-1/(np.exp( 1 + v/wj[j]/epsilon))).real))
def p1(v):
p1 = 0
for j in range(len(M1)):
p1 += hj[j]**2 * phi(v, j)
return 1/(1 + p1 * eta2)
def Q(v):
sum1 = sum(wi*eta1*(hi/ki)**(1.0/3))*p1(v)**(-2/3)/3
sum2 = 0
for j in range(len(M1)):
sum2 += wj[j]*hj[j]**2/(1 + 1/phi(v,j))
return sum1 + sum2*epsilon*eta2 - v
def tau(v, j):
return eta2*hj[j]**2*p1(v)*phi(v,j)
# bisection starts here
delta = 0.005
UB = 999999999
LB = 0
while UB - LB > delta:
v = (float(UB) + LB)/2
if Q(v) > 0:
LB = v
else:
UB = v
x.append(p1(v))
for j in range(len(M1)):
x.append(tau(v, j))
return sum_rate(x), x[0], x[1:]
def cd_method(h):
N = len(h)
M0 = np.random.randint(2,size = N)
gain0,a,Tj= bisection(h,M0)
g_list = []
M_list = []
while True:
for j in range(0,N):
M = np.copy(M0)
M[j] = (M[j]+1)%2
gain,a,Tj= bisection(h,M)
g_list.append(gain)
M_list.append(M)
g_max = max(g_list)
if g_max > gain0:
gain0 = g_max
M0 = M_list[g_list.index(g_max)]
else:
break
return gain0, M0
if __name__ == "__main__":
h=np.array([6.06020304235508*10**-6,1.10331933767028*10**-5,1.00213540309998*10**-7,1.21610610942759*10**-6,1.96138838395145*10**-6,1.71456339592966*10**-6,5.24563569673585*10**-6,5.89530717142197*10**-7,4.07769429231962*10**-6,2.88333185798682*10**-6])
M=np.array([1,0,0,0,1,0,0,0,0,0])
# h=np.array([1.00213540309998*10**-7,1.10331933767028*10**-5,6.06020304235508*10**-6,1.21610610942759*10**-6,1.96138838395145*10**-6,1.71456339592966*10**-6,5.24563569673585*10**-6,5.89530717142197*10**-7,4.07769429231962*10**-6,2.88333185798682*10**-6])
# M=np.array([0,0,1,0,1,0,0,0,0,0])
# h = np.array([4.6368924987170947*10**-7, 1.3479411763648968*10**-7, 7.174945246007612*10**-6, 2.5590719803595445*10**-7, 3.3189928740379023*10**-6, 1.2109071327755575*10**-5, 2.394278475886022*10**-6, 2.179121774067472*10**-6, 5.5213902658478367*10**-8, 2.168778154948169*10**-7, 2.053227965874453*10**-6, 7.002952297466865*10**-8, 7.594077851181444*10**-8, 7.904048961975136*10**-7, 8.867218892023474*10**-7, 5.886007653360979*10**-6, 2.3470565740563855*10**-6, 1.387049627074303*10**-7, 3.359475870531776*10**-7, 2.633733784949562*10**-7, 2.189895264149453*10**-6, 1.129177795302099*10**-5, 1.1760290137191366*10**-6, 1.6588656719735275*10**-7, 1.383637788476638*10**-6, 1.4485928387351664*10**-6, 1.4262265958416598*10**-6, 1.1779725004265418*10**-6, 7.738218993031842*10**-7, 4.763534225174186*10**-6])
# M =np.array( [0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1,])
# time the average speed of bisection algorithm
# repeat = 1
# M =np.random.randint(2, size=(repeat,len(h)))
# start_time=time.time()
# for i in range(repeat):
# gain,a,Tj= bisection(h,M[i,:])
# total_time=time.time()-start_time
# print('time_cost:%s'%(total_time/repeat))
gain,a,Tj= bisection(h,M)
print('y:%s'%gain)
print('a:%s'%a)
print('Tj:%s'%Tj)
# test CD method. Given h, generate the max mode
gain0, M0 = cd_method(h)
print('max y:%s'%gain0)
print(M0)
# test all data
K = [10, 20, 30] # number of users
N = 1000 # number of channel
for k in K:
# Load data
channel = sio.loadmat('./data/data_%d' %int(k))['input_h']
gain = sio.loadmat('./data/data_%d' %int(k))['output_obj']
start_time=time.time()
gain_his = []
gain_his_ratio = []
mode_his = []
for i in range(N):
if i % (N//10) == 0:
print("%0.1f"%(i/N))
i_idx = i
h = channel[i_idx,:]
# the CD method
gain0, M0 = cd_method(h)
# memorize the largest reward
gain_his.append(gain0)
gain_his_ratio.append(gain_his[-1] / gain[i_idx][0])
mode_his.append(M0)
total_time=time.time()-start_time
print('time_cost:%s'%total_time)
print('average time per channel:%s'%(total_time/N))
plot_gain(gain_his_ratio)
print("gain/max ratio: ", sum(gain_his_ratio)/N)
| 6,614
| 26.911392
| 811
|
py
|
DROO
|
DROO-master/demo_alternate_weights.py
|
# #################################################################
# Deep Reinforcement Learning for Online Offloading in Wireless Powered Mobile-Edge Computing Networks
#
# This file contains a demo evaluating the performance of DROO with laternating-weight WDs. It loads the training samples with default WDs' weights from ./data/data_10.mat and with alternated weights from ./data/data_10_WeightsAlternated.mat. The channel gains in both files are the same. However, the optimal offloading mode, resource allocation, and the maximum computation rate in 'data_10_WeightsAlternated.mat' are recalculated since WDs' weights are alternated.
#
# References:
# [1] 1. Liang Huang, Suzhi Bi, and Ying-jun Angela Zhang, “Deep Reinforcement Learning for Online Offloading in Wireless Powered Mobile-Edge Computing Networks”, on arxiv:1808.01977
#
# version 1.0 -- April 2019. Written by Liang Huang (lianghuang AT zjut.edu.cn)
# #################################################################
import scipy.io as sio # import scipy.io for .mat file I/
import numpy as np # import numpy
from memory import MemoryDNN
from optimization import bisection
from main import plot_rate, save_to_txt
import time
def alternate_weights(case_id=0):
'''
Alternate the weights of all WDs. Note that, the maximum computation rate need be recomputed by solving (P2) once any WD's weight is changed.
Input: case_id = 0 for default weights; case_id = 1 for alternated weights.
Output: The alternated weights and the corresponding rate.
'''
# set alternated weights
weights=[[1,1.5,1,1.5,1,1.5,1,1.5,1,1.5],[1.5,1,1.5,1,1.5,1,1.5,1,1.5,1]]
# load the corresponding maximum computation rate
if case_id == 0:
# by defaulst, case_id = 0
rate = sio.loadmat('./data/data_10')['output_obj']
else:
# alternate weights for all WDs, case_id = 1
rate = sio.loadmat('./data/data_10_WeightsAlternated')['output_obj']
return weights[case_id], rate
if __name__ == "__main__":
'''
This demo evaluate DROO with laternating-weight WDs. We evaluate an extreme case by alternating the weights of all WDs between 1 and 1.5 at the same time, specifically, at time frame 6,000 and 8,000.
'''
N = 10 # number of users
n = 10000 # number of time frames, <= 10,000
K = N # initialize K = N
decoder_mode = 'OP' # the quantization mode could be 'OP' (Order-preserving) or 'KNN'
Memory = 1024 # capacity of memory structure
Delta = 32 # Update interval for adaptive K
print('#user = %d, #channel=%d, K=%d, decoder = %s, Memory = %d, Delta = %d'%(N,n,K,decoder_mode, Memory, Delta))
# Load data
channel = sio.loadmat('./data/data_%d' %N)['input_h']
rate = sio.loadmat('./data/data_%d' %N)['output_obj']
# increase h to close to 1 for better training; it is a trick widely adopted in deep learning
channel = channel * 1000000
# generate the train and test data sample index
# data are splitted as 80:20
# training data are randomly sampled with duplication if n > total data size
split_idx = int(.8* len(channel))
num_test = min(len(channel) - split_idx, n - int(.8* n)) # training data size
mem = MemoryDNN(net = [N, 120, 80, N],
learning_rate = 0.01,
training_interval=10,
batch_size=128,
memory_size=Memory
)
start_time=time.time()
rate_his = []
rate_his_ratio = []
mode_his = []
k_idx_his = []
K_his = []
h = channel[0,:]
# initilize the weights by setting case_id = 0.
weight, rate = alternate_weights(0)
print("WD weights at time frame %d:"%(0), weight)
for i in range(n):
# for dynamic number of WDs
if i ==0.6*n:
weight, rate = alternate_weights(1)
print("WD weights at time frame %d:"%(i), weight)
if i ==0.8*n:
weight, rate = alternate_weights(0)
print("WD weights at time frame %d:"%(i), weight)
if i % (n//10) == 0:
print("%0.1f"%(i/n))
if i> 0 and i % Delta == 0:
# index counts from 0
if Delta > 1:
max_k = max(k_idx_his[-Delta:-1]) +1;
else:
max_k = k_idx_his[-1] +1;
K = min(max_k +1, N)
i_idx = i
h = channel[i_idx,:]
# the action selection must be either 'OP' or 'KNN'
m_list = mem.decode(h, K, decoder_mode)
r_list = []
for m in m_list:
# only acitve users are used to compute the rate
r_list.append(bisection(h/1000000, m, weight)[0])
# memorize the largest reward
rate_his.append(np.max(r_list))
rate_his_ratio.append(rate_his[-1] / rate[i_idx][0])
# record the index of largest reward
k_idx_his.append(np.argmax(r_list))
# record K in case of adaptive K
K_his.append(K)
# save the mode with largest reward
mode_his.append(m_list[np.argmax(r_list)])
# if i <0.6*n:
# encode the mode with largest reward
mem.encode(h, m_list[np.argmax(r_list)])
total_time=time.time()-start_time
mem.plot_cost()
plot_rate(rate_his_ratio)
print("Averaged normalized computation rate:", sum(rate_his_ratio[-num_test: -1])/num_test)
print('Total time consumed:%s'%total_time)
print('Average time per channel:%s'%(total_time/n))
# save data into txt
save_to_txt(k_idx_his, "k_idx_his.txt")
save_to_txt(K_his, "K_his.txt")
save_to_txt(mem.cost_his, "cost_his.txt")
save_to_txt(rate_his_ratio, "rate_his_ratio.txt")
save_to_txt(mode_his, "mode_his.txt")
| 5,962
| 37.973856
| 468
|
py
|
DROO
|
DROO-master/memory.py
|
# #################################################################
# This file contains memory operation including encoding and decoding operations.
#
# version 1.0 -- January 2018. Written by Liang Huang (lianghuang AT zjut.edu.cn)
# #################################################################
from __future__ import print_function
import tensorflow as tf
import numpy as np
# DNN network for memory
class MemoryDNN:
def __init__(
self,
net,
learning_rate = 0.01,
training_interval=10,
batch_size=100,
memory_size=1000,
output_graph=False
):
# net: [n_input, n_hidden_1st, n_hidded_2ed, n_output]
assert(len(net) is 4) # only 4-layer DNN
self.net = net
self.training_interval = training_interval # learn every #training_interval
self.lr = learning_rate
self.batch_size = batch_size
self.memory_size = memory_size
# store all binary actions
self.enumerate_actions = []
# stored # memory entry
self.memory_counter = 1
# store training cost
self.cost_his = []
# reset graph
tf.reset_default_graph()
# initialize zero memory [h, m]
self.memory = np.zeros((self.memory_size, self.net[0]+ self.net[-1]))
# construct memory network
self._build_net()
self.sess = tf.Session()
# for tensorboard
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def _build_net(self):
def build_layers(h, c_names, net, w_initializer, b_initializer):
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [net[0], net[1]], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, self.net[1]], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(h, w1) + b1)
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [net[1], net[2]], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, net[2]], initializer=b_initializer, collections=c_names)
l2 = tf.nn.relu(tf.matmul(l1, w2) + b2)
with tf.variable_scope('M'):
w3 = tf.get_variable('w3', [net[2], net[3]], initializer=w_initializer, collections=c_names)
b3 = tf.get_variable('b3', [1, net[3]], initializer=b_initializer, collections=c_names)
out = tf.matmul(l2, w3) + b3
return out
# ------------------ build memory_net ------------------
self.h = tf.placeholder(tf.float32, [None, self.net[0]], name='h') # input
self.m = tf.placeholder(tf.float32, [None, self.net[-1]], name='mode') # for calculating loss
self.is_train = tf.placeholder("bool") # train or evaluate
with tf.variable_scope('memory_net'):
c_names, w_initializer, b_initializer = \
['memory_net_params', tf.GraphKeys.GLOBAL_VARIABLES], \
tf.random_normal_initializer(0., 1/self.net[0]), tf.constant_initializer(0.1) # config of layers
self.m_pred = build_layers(self.h, c_names, self.net, w_initializer, b_initializer)
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = self.m, logits = self.m_pred))
with tf.variable_scope('train'):
self._train_op = tf.train.AdamOptimizer(self.lr, 0.09).minimize(self.loss)
def remember(self, h, m):
# replace the old memory with new memory
idx = self.memory_counter % self.memory_size
self.memory[idx, :] = np.hstack((h,m))
self.memory_counter += 1
def encode(self, h, m):
# encoding the entry
self.remember(h, m)
# train the DNN every 10 step
# if self.memory_counter> self.memory_size / 2 and self.memory_counter % self.training_interval == 0:
if self.memory_counter % self.training_interval == 0:
self.learn()
def learn(self):
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
h_train = batch_memory[:, 0: self.net[0]]
m_train = batch_memory[:, self.net[0]:]
# print(h_train)
# print(m_train)
# train the DNN
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.h: h_train, self.m: m_train})
assert(self.cost >0)
self.cost_his.append(self.cost)
def decode(self, h, k = 1, mode = 'OP'):
# to have batch dimension when feed into tf placeholder
h = h[np.newaxis, :]
m_pred = self.sess.run(self.m_pred, feed_dict={self.h: h})
if mode is 'OP':
return self.knm(m_pred[0], k)
elif mode is 'KNN':
return self.knn(m_pred[0], k)
else:
print("The action selection must be 'OP' or 'KNN'")
def knm(self, m, k = 1):
# return k-nearest-mode
m_list = []
# generate the first binary offloading decision
# note that here 'm' is the output of DNN before the sigmoid activation function, in the field of all real number.
# Therefore, we compare it with '0' instead of 0.5 in equation (8). Since, sigmod(0) = 0.5.
m_list.append(1*(m>0))
if k > 1:
# generate the remaining K-1 binary offloading decisions with respect to equation (9)
m_abs = abs(m)
idx_list = np.argsort(m_abs)[:k-1]
for i in range(k-1):
if m[idx_list[i]] >0:
# set a positive user to 0
m_list.append(1*(m - m[idx_list[i]] > 0))
else:
# set a negtive user to 1
m_list.append(1*(m - m[idx_list[i]] >= 0))
return m_list
def knn(self, m, k = 1):
# list all 2^N binary offloading actions
if len(self.enumerate_actions) is 0:
import itertools
self.enumerate_actions = np.array(list(map(list, itertools.product([0, 1], repeat=self.net[0]))))
# the 2-norm
sqd = ((self.enumerate_actions - m)**2).sum(1)
idx = np.argsort(sqd)
return self.enumerate_actions[idx[:k]]
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his))*self.training_interval, self.cost_his)
plt.ylabel('Training Loss')
plt.xlabel('Time Frames')
plt.show()
| 7,094
| 36.539683
| 123
|
py
|
DROO
|
DROO-master/mainPyTorch.py
|
# #################################################################
# Deep Reinforcement Learning for Online Offloading in Wireless Powered Mobile-Edge Computing Networks
#
# This file contains the main code of DROO. It loads the training samples saved in ./data/data_#.mat, splits the samples into two parts (training and testing data constitutes 80% and 20%), trains the DNN with training and validation samples, and finally tests the DNN with test data.
#
# Input: ./data/data_#.mat
# Data samples are generated according to the CD method presented in [2]. There are 30,000 samples saved in each ./data/data_#.mat, where # is the user number. Each data sample includes
# -----------------------------------------------------------------
# | wireless channel gain | input_h |
# -----------------------------------------------------------------
# | computing mode selection | output_mode |
# -----------------------------------------------------------------
# | energy broadcasting parameter | output_a |
# -----------------------------------------------------------------
# | transmit time of wireless device | output_tau |
# -----------------------------------------------------------------
# | weighted sum computation rate | output_obj |
# -----------------------------------------------------------------
#
#
# References:
# [1] 1. Liang Huang, Suzhi Bi, and Ying-Jun Angela Zhang, "Deep Reinforcement Learning for Online Offloading in Wireless Powered Mobile-Edge Computing Networks," in IEEE Transactions on Mobile Computing, early access, 2019, DOI:10.1109/TMC.2019.2928811.
# [2] S. Bi and Y. J. Zhang, “Computation rate maximization for wireless powered mobile-edge computing with binary computation offloading,” IEEE Trans. Wireless Commun., vol. 17, no. 6, pp. 4177-4190, Jun. 2018.
#
# version 1.0 -- July 2018. Written by Liang Huang (lianghuang AT zjut.edu.cn)
# #################################################################
import scipy.io as sio # import scipy.io for .mat file I/
import numpy as np # import numpy
# Implementated based on the PyTorch
from memoryPyTorch import MemoryDNN
from optimization import bisection
import time
def plot_rate(rate_his, rolling_intv=50):
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib as mpl
rate_array = np.asarray(rate_his)
df = pd.DataFrame(rate_his)
mpl.style.use('seaborn')
fig, ax = plt.subplots(figsize=(15, 8))
# rolling_intv = 20
plt.plot(np.arange(len(rate_array))+1, np.hstack(df.rolling(rolling_intv, min_periods=1).mean().values), 'b')
plt.fill_between(np.arange(len(rate_array))+1, np.hstack(df.rolling(rolling_intv, min_periods=1).min()[0].values), np.hstack(df.rolling(rolling_intv, min_periods=1).max()[0].values), color = 'b', alpha = 0.2)
plt.ylabel('Normalized Computation Rate')
plt.xlabel('Time Frames')
plt.show()
def save_to_txt(rate_his, file_path):
with open(file_path, 'w') as f:
for rate in rate_his:
f.write("%s \n" % rate)
if __name__ == "__main__":
'''
This algorithm generates K modes from DNN, and chooses with largest
reward. The mode with largest reward is stored in the memory, which is
further used to train the DNN.
Adaptive K is implemented. K = max(K, K_his[-memory_size])
'''
N = 10 # number of users
n = 30000 # number of time frames
K = N # initialize K = N
decoder_mode = 'OP' # the quantization mode could be 'OP' (Order-preserving) or 'KNN'
Memory = 1024 # capacity of memory structure
Delta = 32 # Update interval for adaptive K
print('#user = %d, #channel=%d, K=%d, decoder = %s, Memory = %d, Delta = %d'%(N,n,K,decoder_mode, Memory, Delta))
# Load data
channel = sio.loadmat('./data/data_%d' %N)['input_h']
rate = sio.loadmat('./data/data_%d' %N)['output_obj'] # this rate is only used to plot figures; never used to train DROO.
# increase h to close to 1 for better training; it is a trick widely adopted in deep learning
channel = channel * 1000000
# generate the train and test data sample index
# data are splitted as 80:20
# training data are randomly sampled with duplication if n > total data size
split_idx = int(.8 * len(channel))
num_test = min(len(channel) - split_idx, n - int(.8 * n)) # training data size
mem = MemoryDNN(net = [N, 120, 80, N],
learning_rate = 0.01,
training_interval=10,
batch_size=128,
memory_size=Memory
)
start_time = time.time()
rate_his = []
rate_his_ratio = []
mode_his = []
k_idx_his = []
K_his = []
for i in range(n):
if i % (n//10) == 0:
print("%0.1f"%(i/n))
if i> 0 and i % Delta == 0:
# index counts from 0
if Delta > 1:
max_k = max(k_idx_his[-Delta:-1]) +1;
else:
max_k = k_idx_his[-1] +1;
K = min(max_k +1, N)
if i < n - num_test:
# training
i_idx = i % split_idx
else:
# test
i_idx = i - n + num_test + split_idx
h = channel[i_idx,:]
# the action selection must be either 'OP' or 'KNN'
m_list = mem.decode(h, K, decoder_mode)
r_list = []
for m in m_list:
r_list.append(bisection(h/1000000, m)[0])
# encode the mode with largest reward
mem.encode(h, m_list[np.argmax(r_list)])
# the main code for DROO training ends here
# the following codes store some interested metrics for illustrations
# memorize the largest reward
rate_his.append(np.max(r_list))
rate_his_ratio.append(rate_his[-1] / rate[i_idx][0])
# record the index of largest reward
k_idx_his.append(np.argmax(r_list))
# record K in case of adaptive K
K_his.append(K)
mode_his.append(m_list[np.argmax(r_list)])
total_time=time.time()-start_time
mem.plot_cost()
plot_rate(rate_his_ratio)
print("Averaged normalized computation rate:", sum(rate_his_ratio[-num_test: -1])/num_test)
print('Total time consumed:%s'%total_time)
print('Average time per channel:%s'%(total_time/n))
# save data into txt
save_to_txt(k_idx_his, "k_idx_his.txt")
save_to_txt(K_his, "K_his.txt")
save_to_txt(mem.cost_his, "cost_his.txt")
save_to_txt(rate_his_ratio, "rate_his_ratio.txt")
save_to_txt(mode_his, "mode_his.txt")
| 6,830
| 39.904192
| 284
|
py
|
DROO
|
DROO-master/memoryPyTorch.py
|
# #################################################################
# This file contains the main DROO operations, including building DNN,
# Storing data sample, Training DNN, and generating quantized binary offloading decisions.
# version 1.0 -- February 2020. Written based on Tensorflow 2 by Weijian Pan and
# Liang Huang (lianghuang AT zjut.edu.cn)
# ###################################################################
from __future__ import print_function
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
print(torch.__version__)
# DNN network for memory
class MemoryDNN:
def __init__(
self,
net,
learning_rate = 0.01,
training_interval=10,
batch_size=100,
memory_size=1000,
output_graph=False
):
self.net = net
self.training_interval = training_interval # learn every #training_interval
self.lr = learning_rate
self.batch_size = batch_size
self.memory_size = memory_size
# store all binary actions
self.enumerate_actions = []
# stored # memory entry
self.memory_counter = 1
# store training cost
self.cost_his = []
# initialize zero memory [h, m]
self.memory = np.zeros((self.memory_size, self.net[0] + self.net[-1]))
# construct memory network
self._build_net()
def _build_net(self):
self.model = nn.Sequential(
nn.Linear(self.net[0], self.net[1]),
nn.ReLU(),
nn.Linear(self.net[1], self.net[2]),
nn.ReLU(),
nn.Linear(self.net[2], self.net[3]),
nn.Sigmoid()
)
def remember(self, h, m):
# replace the old memory with new memory
idx = self.memory_counter % self.memory_size
self.memory[idx, :] = np.hstack((h, m))
self.memory_counter += 1
def encode(self, h, m):
# encoding the entry
self.remember(h, m)
# train the DNN every 10 step
# if self.memory_counter> self.memory_size / 2 and self.memory_counter % self.training_interval == 0:
if self.memory_counter % self.training_interval == 0:
self.learn()
def learn(self):
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
h_train = torch.Tensor(batch_memory[:, 0: self.net[0]])
m_train = torch.Tensor(batch_memory[:, self.net[0]:])
# train the DNN
optimizer = optim.Adam(self.model.parameters(), lr=self.lr,betas = (0.09,0.999),weight_decay=0.0001)
criterion = nn.BCELoss()
self.model.train()
optimizer.zero_grad()
predict = self.model(h_train)
loss = criterion(predict, m_train)
loss.backward()
optimizer.step()
self.cost = loss.item()
assert(self.cost > 0)
self.cost_his.append(self.cost)
def decode(self, h, k = 1, mode = 'OP'):
# to have batch dimension when feed into Tensor
h = torch.Tensor(h[np.newaxis, :])
self.model.eval()
m_pred = self.model(h)
m_pred = m_pred.detach().numpy()
if mode is 'OP':
return self.knm(m_pred[0], k)
elif mode is 'KNN':
return self.knn(m_pred[0], k)
else:
print("The action selection must be 'OP' or 'KNN'")
def knm(self, m, k = 1):
# return k order-preserving binary actions
m_list = []
# generate the first binary offloading decision with respect to equation (8)
m_list.append(1*(m>0.5))
if k > 1:
# generate the remaining K-1 binary offloading decisions with respect to equation (9)
m_abs = abs(m-0.5)
idx_list = np.argsort(m_abs)[:k-1]
for i in range(k-1):
if m[idx_list[i]] >0.5:
# set the \hat{x}_{t,(k-1)} to 0
m_list.append(1*(m - m[idx_list[i]] > 0))
else:
# set the \hat{x}_{t,(k-1)} to 1
m_list.append(1*(m - m[idx_list[i]] >= 0))
return m_list
def knn(self, m, k = 1):
# list all 2^N binary offloading actions
if len(self.enumerate_actions) is 0:
import itertools
self.enumerate_actions = np.array(list(map(list, itertools.product([0, 1], repeat=self.net[0]))))
# the 2-norm
sqd = ((self.enumerate_actions - m)**2).sum(1)
idx = np.argsort(sqd)
return self.enumerate_actions[idx[:k]]
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his))*self.training_interval, self.cost_his)
plt.ylabel('Training Loss')
plt.xlabel('Time Frames')
plt.show()
| 5,082
| 31.583333
| 109
|
py
|
DROO
|
DROO-master/demo_on_off.py
|
# #################################################################
# Deep Reinforcement Learning for Online Offloading in Wireless Powered Mobile-Edge Computing Networks
#
# This file contains a demo evaluating the performance of DROO by randomly turning on/off some WDs. It loads the training samples from ./data/data_#.mat, where # denotes the number of active WDs in the MEC network. Note that, the maximum computation rate need be recomputed by solving (P2) once a WD is turned off/on.
#
# References:
# [1] 1. Liang Huang, Suzhi Bi, and Ying-jun Angela Zhang, “Deep Reinforcement Learning for Online Offloading in Wireless Powered Mobile-Edge Computing Networks”, submitted to IEEE Journal on Selected Areas in Communications.
#
# version 1.0 -- April 2019. Written by Liang Huang (lianghuang AT zjut.edu.cn)
# #################################################################
import scipy.io as sio # import scipy.io for .mat file I/
import numpy as np # import numpy
from memory import MemoryDNN
from optimization import bisection
from main import plot_rate, save_to_txt
import time
def WD_off(channel, N_active, N):
# turn off one WD
if N_active > 5: # current we support half of WDs are off
N_active = N_active - 1
# set the (N-active-1)th channel to close to 0
# since all channels in each time frame are randomly generated, we turn of the WD with greatest index
channel[:,N_active] = channel[:, N_active] / 1000000 # a programming trick,such that we can recover its channel gain once the WD is turned on again.
print(" The %dth WD is turned on."%(N_active +1))
# update the expected maximum computation rate
rate = sio.loadmat('./data/data_%d' %N_active)['output_obj']
return channel, rate, N_active
def WD_on(channel, N_active, N):
# turn on one WD
if N_active < N:
N_active = N_active + 1
# recover (N_active-1)th channel
channel[:,N_active-1] = channel[:, N_active-1] * 1000000
print(" The %dth WD is turned on."%(N_active))
# update the expected maximum computation rate
rate = sio.loadmat('./data/data_%d' %N_active)['output_obj']
return channel, rate, N_active
if __name__ == "__main__":
'''
This demo evaluate DROO for MEC networks where WDs can be occasionally turned off/on. After DROO converges, we randomly turn off on one WD at each time frame 6,000, 6,500, 7,000, and 7,500, and then turn them on at time frames 8,000, 8,500, and 9,000. At time frame 9,500 , we randomly turn off two WDs, resulting an MEC network with 8 acitve WDs.
'''
N = 10 # number of users
N_active = N # number of effective users
N_off = 0 # number of off-users
n = 10000 # number of time frames, <= 10,000
K = N # initialize K = N
decoder_mode = 'OP' # the quantization mode could be 'OP' (Order-preserving) or 'KNN'
Memory = 1024 # capacity of memory structure
Delta = 32 # Update interval for adaptive K
print('#user = %d, #channel=%d, K=%d, decoder = %s, Memory = %d, Delta = %d'%(N,n,K,decoder_mode, Memory, Delta))
# Load data
channel = sio.loadmat('./data/data_%d' %N)['input_h']
rate = sio.loadmat('./data/data_%d' %N)['output_obj']
# increase h to close to 1 for better training; it is a trick widely adopted in deep learning
channel = channel * 1000000
channel_bak = channel.copy()
# generate the train and test data sample index
# data are splitted as 80:20
# training data are randomly sampled with duplication if n > total data size
split_idx = int(.8* len(channel))
num_test = min(len(channel) - split_idx, n - int(.8* n)) # training data size
mem = MemoryDNN(net = [N, 120, 80, N],
learning_rate = 0.01,
training_interval=10,
batch_size=128,
memory_size=Memory
)
start_time=time.time()
rate_his = []
rate_his_ratio = []
mode_his = []
k_idx_his = []
K_his = []
h = channel[0,:]
for i in range(n):
# for dynamic number of WDs
if i ==0.6*n:
print("At time frame %d:"%(i))
channel, rate, N_active = WD_off(channel, N_active, N)
if i ==0.65*n:
print("At time frame %d:"%(i))
channel, rate, N_active = WD_off(channel, N_active, N)
if i ==0.7*n:
print("At time frame %d:"%(i))
channel, rate, N_active = WD_off(channel, N_active, N)
if i ==0.75*n:
print("At time frame %d:"%(i))
channel, rate, N_active = WD_off(channel, N_active, N)
if i ==0.8*n:
print("At time frame %d:"%(i))
channel, rate, N_active = WD_on(channel, N_active, N)
if i ==0.85*n:
print("At time frame %d:"%(i))
channel, rate, N_active = WD_on(channel, N_active, N)
if i ==0.9*n:
print("At time frame %d:"%(i))
channel, rate, N_active = WD_on(channel, N_active, N)
channel, rate, N_active = WD_on(channel, N_active, N)
if i == 0.95*n:
print("At time frame %d:"%(i))
channel, rate, N_active = WD_off(channel, N_active, N)
channel, rate, N_active = WD_off(channel, N_active, N)
if i % (n//10) == 0:
print("%0.1f"%(i/n))
if i> 0 and i % Delta == 0:
# index counts from 0
if Delta > 1:
max_k = max(k_idx_his[-Delta:-1]) +1;
else:
max_k = k_idx_his[-1] +1;
K = min(max_k +1, N)
i_idx = i
h = channel[i_idx,:]
# the action selection must be either 'OP' or 'KNN'
m_list = mem.decode(h, K, decoder_mode)
r_list = []
for m in m_list:
# only acitve users are used to compute the rate
r_list.append(bisection(h[0:N_active]/1000000, m[0:N_active])[0])
# memorize the largest reward
rate_his.append(np.max(r_list))
rate_his_ratio.append(rate_his[-1] / rate[i_idx][0])
# record the index of largest reward
k_idx_his.append(np.argmax(r_list))
# record K in case of adaptive K
K_his.append(K)
# save the mode with largest reward
mode_his.append(m_list[np.argmax(r_list)])
# if i <0.6*n:
# encode the mode with largest reward
mem.encode(h, m_list[np.argmax(r_list)])
total_time=time.time()-start_time
mem.plot_cost()
plot_rate(rate_his_ratio)
print("Averaged normalized computation rate:", sum(rate_his_ratio[-num_test: -1])/num_test)
print('Total time consumed:%s'%total_time)
print('Average time per channel:%s'%(total_time/n))
# save data into txt
save_to_txt(k_idx_his, "k_idx_his.txt")
save_to_txt(K_his, "K_his.txt")
save_to_txt(mem.cost_his, "cost_his.txt")
save_to_txt(rate_his_ratio, "rate_his_ratio.txt")
save_to_txt(mode_his, "mode_his.txt")
| 7,281
| 39.681564
| 355
|
py
|
DROO
|
DROO-master/memoryTF2.py
|
# #################################################################
# This file contains the main DROO operations, including building DNN,
# Storing data sample, Training DNN, and generating quantized binary offloading decisions.
# version 1.0 -- January 2020. Written based on Tensorflow 2 by Weijian Pan and
# Liang Huang (lianghuang AT zjut.edu.cn)
# #################################################################
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
print(tf.__version__)
print(tf.keras.__version__)
# DNN network for memory
class MemoryDNN:
def __init__(
self,
net,
learning_rate = 0.01,
training_interval=10,
batch_size=100,
memory_size=1000,
output_graph=False
):
self.net = net # the size of the DNN
self.training_interval = training_interval # learn every #training_interval
self.lr = learning_rate
self.batch_size = batch_size
self.memory_size = memory_size
# store all binary actions
self.enumerate_actions = []
# stored # memory entry
self.memory_counter = 1
# store training cost
self.cost_his = []
# initialize zero memory [h, m]
self.memory = np.zeros((self.memory_size, self.net[0] + self.net[-1]))
# construct memory network
self._build_net()
def _build_net(self):
self.model = keras.Sequential([
layers.Dense(self.net[1], activation='relu'), # the first hidden layer
layers.Dense(self.net[2], activation='relu'), # the second hidden layer
layers.Dense(self.net[-1], activation='sigmoid') # the output layer
])
self.model.compile(optimizer=keras.optimizers.Adam(lr=self.lr), loss=tf.losses.binary_crossentropy, metrics=['accuracy'])
def remember(self, h, m):
# replace the old memory with new memory
idx = self.memory_counter % self.memory_size
self.memory[idx, :] = np.hstack((h, m))
self.memory_counter += 1
def encode(self, h, m):
# encoding the entry
self.remember(h, m)
# train the DNN every 10 step
# if self.memory_counter> self.memory_size / 2 and self.memory_counter % self.training_interval == 0:
if self.memory_counter % self.training_interval == 0:
self.learn()
def learn(self):
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
h_train = batch_memory[:, 0: self.net[0]]
m_train = batch_memory[:, self.net[0]:]
# print(h_train) # (128, 10)
# print(m_train) # (128, 10)
# train the DNN
hist = self.model.fit(h_train, m_train, verbose=0)
self.cost = hist.history['loss'][0]
assert(self.cost > 0)
self.cost_his.append(self.cost)
def decode(self, h, k = 1, mode = 'OP'):
# to have batch dimension when feed into tf placeholder
h = h[np.newaxis, :]
m_pred = self.model.predict(h)
if mode is 'OP':
return self.knm(m_pred[0], k)
elif mode is 'KNN':
return self.knn(m_pred[0], k)
else:
print("The action selection must be 'OP' or 'KNN'")
def knm(self, m, k = 1):
# return k order-preserving binary actions
m_list = []
# generate the first binary offloading decision with respect to equation (8)
m_list.append(1*(m>0.5))
if k > 1:
# generate the remaining K-1 binary offloading decisions with respect to equation (9)
m_abs = abs(m-0.5)
idx_list = np.argsort(m_abs)[:k-1]
for i in range(k-1):
if m[idx_list[i]] >0.5:
# set the \hat{x}_{t,(k-1)} to 0
m_list.append(1*(m - m[idx_list[i]] > 0))
else:
# set the \hat{x}_{t,(k-1)} to 1
m_list.append(1*(m - m[idx_list[i]] >= 0))
return m_list
def knn(self, m, k = 1):
# list all 2^N binary offloading actions
if len(self.enumerate_actions) is 0:
import itertools
self.enumerate_actions = np.array(list(map(list, itertools.product([0, 1], repeat=self.net[0]))))
# the 2-norm
sqd = ((self.enumerate_actions - m)**2).sum(1)
idx = np.argsort(sqd)
return self.enumerate_actions[idx[:k]]
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his))*self.training_interval, self.cost_his)
plt.ylabel('Training Loss')
plt.xlabel('Time Frames')
plt.show()
| 5,117
| 33.816327
| 129
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/exps/der_womask/cifar100/b0/10steps/main.py
|
'''
@Author : Yan Shipeng, Xie Jiangwei
@Contact: yanshp@shanghaitech.edu.cn, xiejw@shanghaitech.edu.cn
'''
import sys
import os
import os.path as osp
import copy
import time
import shutil
import cProfile
import logging
from pathlib import Path
import numpy as np
import random
from easydict import EasyDict as edict
from tensorboardX import SummaryWriter
repo_name = 'DER-ClassIL.pytorch'
base_dir = osp.realpath(".")[:osp.realpath(".").index(repo_name) + len(repo_name)]
sys.path.insert(0, base_dir)
from sacred import Experiment
ex = Experiment(base_dir=base_dir)
# Save which files
# ex.add_source_file(osp.join(base_dir, "inclearn/models/icarl.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/data.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/network.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/convnet/resnet.py"))
# ex.add_source_file(osp.join(os.getcwd(), "icarl.py"))
# ex.add_source_file(osp.join(os.getcwd(), "network.py"))
# ex.add_source_file(osp.join(os.getcwd(), "resnet.py"))
# MongoDB Observer
# ex.observers.append(MongoObserver.create(url='xx.xx.xx.xx:port', db_name='classil'))
import torch
from inclearn.tools import factory, results_utils, utils
from inclearn.learn.pretrain import pretrain
from inclearn.tools.metrics import IncConfusionMeter
def initialization(config, seed, mode, exp_id):
# Add it if your input size is fixed
# ref: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True # This will result in non-deterministic results.
# ex.captured_out_filter = lambda text: 'Output capturing turned off.'
cfg = edict(config)
utils.set_seed(cfg['seed'])
if exp_id is None:
exp_id = -1
cfg.exp.savedir = "./logs"
logger = utils.make_logger(f"exp{exp_id}_{cfg.exp.name}_{mode}", savedir=cfg.exp.savedir)
# Tensorboard
exp_name = f'{exp_id}_{cfg["exp"]["name"]}' if exp_id is not None else f'../inbox/{cfg["exp"]["name"]}'
tensorboard_dir = cfg["exp"]["tensorboard_dir"] + f"/{exp_name}"
# If not only save latest tensorboard log.
# if Path(tensorboard_dir).exists():
# shutil.move(tensorboard_dir, cfg["exp"]["tensorboard_dir"] + f"/../inbox/{time.time()}_{exp_name}")
tensorboard = SummaryWriter(tensorboard_dir)
return cfg, logger, tensorboard
@ex.command
def train(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "train", _run._id)
ex.logger.info(cfg)
cfg.data_folder = osp.join(base_dir, "data")
start_time = time.time()
_train(cfg, _run, ex, tensorboard)
ex.logger.info("Training finished in {}s.".format(int(time.time() - start_time)))
def _train(cfg, _run, ex, tensorboard):
device = factory.set_device(cfg)
trial_i = cfg['trial']
inc_dataset = factory.get_data(cfg, trial_i)
ex.logger.info("classes_order")
ex.logger.info(inc_dataset.class_order)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
if _run.meta_info["options"]["--file_storage"] is not None:
_save_dir = osp.join(_run.meta_info["options"]["--file_storage"], str(_run._id))
else:
_save_dir = cfg["exp"]["ckptdir"]
results = results_utils.get_template_results(cfg)
for task_i in range(inc_dataset.n_tasks):
task_info, train_loader, val_loader, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=inc_dataset.n_tasks,
)
model.before_task(task_i, inc_dataset)
# TODO: Move to incmodel.py
if 'min_class' in task_info:
ex.logger.info("Train on {}->{}.".format(task_info["min_class"], task_info["max_class"]))
# Pretraining at step0 if needed
if task_i == 0 and cfg["start_class"] > 0:
do_pretrain(cfg, ex, model, device, train_loader, test_loader)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
elif task_i < cfg['start_task']:
state_dict = torch.load(f'./ckpts/step{task_i}.ckpt')
model._parallel_network.load_state_dict(state_dict)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
else:
model.train_task(train_loader, val_loader)
model.after_task(task_i, inc_dataset)
ex.logger.info("Eval on {}->{}.".format(0, task_info["max_class"]))
ypred, ytrue = model.eval_task(test_loader)
acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
#Logging
model._tensorboard.add_scalar(f"taskaccu/trial{trial_i}", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_taskaccu", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_task_top5_accu", acc_stats["top5"]["total"], task_i)
ex.logger.info(f"top1:{acc_stats['top1']}")
ex.logger.info(f"top5:{acc_stats['top5']}")
results["results"].append(acc_stats)
top1_avg_acc, top5_avg_acc = results_utils.compute_avg_inc_acc(results["results"])
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"] = top1_avg_acc
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"] = top5_avg_acc
ex.logger.info("Average Incremental Accuracy Top 1: {} Top 5: {}.".format(
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"],
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"],
))
if cfg["exp"]["name"]:
results_utils.save_results(results, cfg["exp"]["name"])
def do_pretrain(cfg, ex, model, device, train_loader, test_loader):
if not os.path.exists(osp.join(ex.base_dir, 'pretrain/')):
os.makedirs(osp.join(ex.base_dir, 'pretrain/'))
model_path = osp.join(
ex.base_dir,
"pretrain/{}_{}_cosine_{}_multi_{}_aux{}_nplus1_{}_{}_trial_{}_{}_seed_{}_start_{}_epoch_{}.pth".format(
cfg["model"],
cfg["convnet"],
cfg["weight_normalization"],
cfg["der"],
cfg["use_aux_cls"],
cfg["aux_n+1"],
cfg["dataset"],
cfg["trial"],
cfg["train_head"],
cfg['seed'],
cfg["start_class"],
cfg["pretrain"]["epochs"],
),
)
if osp.exists(model_path):
print("Load pretrain model")
if hasattr(model._network, "module"):
model._network.module.load_state_dict(torch.load(model_path))
else:
model._network.load_state_dict(torch.load(model_path))
else:
pretrain(cfg, ex, model, device, train_loader, test_loader, model_path)
@ex.command
def test(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "test", _run._id)
ex.logger.info(cfg)
trial_i = cfg['trial']
cfg.data_folder = osp.join(base_dir, "data")
inc_dataset = factory.get_data(cfg, trial_i)
# inc_dataset._current_task = taski
# train_loader = inc_dataset._get_loader(inc_dataset.data_cur, inc_dataset.targets_cur)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
model._network.task_size = cfg.increment
test_results = results_utils.get_template_results(cfg)
for taski in range(inc_dataset.n_tasks):
task_info, train_loader, _, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=task_info["max_task"]
)
model.before_task(taski, inc_dataset)
state_dict = torch.load(f'./ckpts/step{taski}.ckpt')
model._parallel_network.load_state_dict(state_dict)
model.eval()
#Build exemplars
model.after_task(taski, inc_dataset)
ypred, ytrue = model.eval_task(test_loader)
test_acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
test_results['results'].append(test_acc_stats)
ex.logger.info(f"task{taski} test top1acc:{test_acc_stats['top1']}")
avg_test_acc = results_utils.compute_avg_inc_acc(test_results['results'])
ex.logger.info(f"Test Average Incremental Accuracy: {avg_test_acc}")
if __name__ == "__main__":
# ex.add_config('./codes/base/configs/default.yaml')
ex.add_config("./configs/default.yaml")
ex.run_commandline()
| 8,825
| 37.710526
| 119
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/exps/der_womask/cifar100/b50/10steps/main.py
|
'''
@Author : Yan Shipeng, Xie Jiangwei
@Contact: yanshp@shanghaitech.edu.cn, xiejw@shanghaitech.edu.cn
'''
import sys
import os
import os.path as osp
import copy
import time
import shutil
import cProfile
import logging
from pathlib import Path
import numpy as np
import random
from easydict import EasyDict as edict
from tensorboardX import SummaryWriter
repo_name = 'DER-ClassIL.pytorch'
base_dir = osp.realpath(".")[:osp.realpath(".").index(repo_name) + len(repo_name)]
sys.path.insert(0, base_dir)
from sacred import Experiment
ex = Experiment(base_dir=base_dir)
# Save which files
# ex.add_source_file(osp.join(base_dir, "inclearn/models/icarl.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/data.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/network.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/convnet/resnet.py"))
# ex.add_source_file(osp.join(os.getcwd(), "icarl.py"))
# ex.add_source_file(osp.join(os.getcwd(), "network.py"))
# ex.add_source_file(osp.join(os.getcwd(), "resnet.py"))
# MongoDB Observer
# ex.observers.append(MongoObserver.create(url='xx.xx.xx.xx:port', db_name='classil'))
import torch
from inclearn.tools import factory, results_utils, utils
from inclearn.learn.pretrain import pretrain
from inclearn.tools.metrics import IncConfusionMeter
def initialization(config, seed, mode, exp_id):
# Add it if your input size is fixed
# ref: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True # This will result in non-deterministic results.
# ex.captured_out_filter = lambda text: 'Output capturing turned off.'
cfg = edict(config)
utils.set_seed(cfg['seed'])
if exp_id is None:
exp_id = -1
cfg.exp.savedir = "./logs"
logger = utils.make_logger(f"exp{exp_id}_{cfg.exp.name}_{mode}", savedir=cfg.exp.savedir)
# Tensorboard
exp_name = f'{exp_id}_{cfg["exp"]["name"]}' if exp_id is not None else f'../inbox/{cfg["exp"]["name"]}'
tensorboard_dir = cfg["exp"]["tensorboard_dir"] + f"/{exp_name}"
# If not only save latest tensorboard log.
# if Path(tensorboard_dir).exists():
# shutil.move(tensorboard_dir, cfg["exp"]["tensorboard_dir"] + f"/../inbox/{time.time()}_{exp_name}")
tensorboard = SummaryWriter(tensorboard_dir)
return cfg, logger, tensorboard
@ex.command
def train(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "train", _run._id)
ex.logger.info(cfg)
cfg.data_folder = osp.join(base_dir, "data")
start_time = time.time()
_train(cfg, _run, ex, tensorboard)
ex.logger.info("Training finished in {}s.".format(int(time.time() - start_time)))
def _train(cfg, _run, ex, tensorboard):
device = factory.set_device(cfg)
trial_i = cfg['trial']
inc_dataset = factory.get_data(cfg, trial_i)
ex.logger.info("classes_order")
ex.logger.info(inc_dataset.class_order)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
if _run.meta_info["options"]["--file_storage"] is not None:
_save_dir = osp.join(_run.meta_info["options"]["--file_storage"], str(_run._id))
else:
_save_dir = cfg["exp"]["ckptdir"]
results = results_utils.get_template_results(cfg)
for task_i in range(inc_dataset.n_tasks):
task_info, train_loader, val_loader, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=inc_dataset.n_tasks,
)
model.before_task(task_i, inc_dataset)
# TODO: Move to incmodel.py
if 'min_class' in task_info:
ex.logger.info("Train on {}->{}.".format(task_info["min_class"], task_info["max_class"]))
# Pretraining at step0 if needed
if task_i == 0 and cfg["start_class"] > 0:
do_pretrain(cfg, ex, model, device, train_loader, test_loader)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
elif task_i < cfg['start_task']:
state_dict = torch.load(f'./ckpts/step{task_i}.ckpt')
model._parallel_network.load_state_dict(state_dict)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
else:
model.train_task(train_loader, val_loader)
model.after_task(task_i, inc_dataset)
ex.logger.info("Eval on {}->{}.".format(0, task_info["max_class"]))
ypred, ytrue = model.eval_task(test_loader)
acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
#Logging
model._tensorboard.add_scalar(f"taskaccu/trial{trial_i}", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_taskaccu", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_task_top5_accu", acc_stats["top5"]["total"], task_i)
ex.logger.info(f"top1:{acc_stats['top1']}")
ex.logger.info(f"top5:{acc_stats['top5']}")
results["results"].append(acc_stats)
top1_avg_acc, top5_avg_acc = results_utils.compute_avg_inc_acc(results["results"])
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"] = top1_avg_acc
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"] = top5_avg_acc
ex.logger.info("Average Incremental Accuracy Top 1: {} Top 5: {}.".format(
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"],
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"],
))
if cfg["exp"]["name"]:
results_utils.save_results(results, cfg["exp"]["name"])
def do_pretrain(cfg, ex, model, device, train_loader, test_loader):
if not os.path.exists(osp.join(ex.base_dir, 'pretrain/')):
os.makedirs(osp.join(ex.base_dir, 'pretrain/'))
model_path = osp.join(
ex.base_dir,
"pretrain/{}_{}_cosine_{}_multi_{}_aux{}_nplus1_{}_{}_trial_{}_{}_seed_{}_start_{}_epoch_{}.pth".format(
cfg["model"],
cfg["convnet"],
cfg["weight_normalization"],
cfg["der"],
cfg["use_aux_cls"],
cfg["aux_n+1"],
cfg["dataset"],
cfg["trial"],
cfg["train_head"],
cfg['seed'],
cfg["start_class"],
cfg["pretrain"]["epochs"],
),
)
if osp.exists(model_path):
print("Load pretrain model")
if hasattr(model._network, "module"):
model._network.module.load_state_dict(torch.load(model_path))
else:
model._network.load_state_dict(torch.load(model_path))
else:
pretrain(cfg, ex, model, device, train_loader, test_loader, model_path)
@ex.command
def test(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "test", _run._id)
ex.logger.info(cfg)
trial_i = cfg['trial']
cfg.data_folder = osp.join(base_dir, "data")
inc_dataset = factory.get_data(cfg, trial_i)
# inc_dataset._current_task = taski
# train_loader = inc_dataset._get_loader(inc_dataset.data_cur, inc_dataset.targets_cur)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
model._network.task_size = cfg.increment
test_results = results_utils.get_template_results(cfg)
for taski in range(inc_dataset.n_tasks):
task_info, train_loader, _, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=task_info["max_task"]
)
model.before_task(taski, inc_dataset)
state_dict = torch.load(f'./ckpts/step{taski}.ckpt')
model._parallel_network.load_state_dict(state_dict)
model.eval()
#Build exemplars
model.after_task(taski, inc_dataset)
ypred, ytrue = model.eval_task(test_loader)
test_acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
test_results['results'].append(test_acc_stats)
ex.logger.info(f"task{taski} test top1acc:{test_acc_stats['top1']}")
avg_test_acc = results_utils.compute_avg_inc_acc(test_results['results'])
ex.logger.info(f"Test Average Incremental Accuracy: {avg_test_acc}")
if __name__ == "__main__":
# ex.add_config('./codes/base/configs/default.yaml')
ex.add_config("./configs/default.yaml")
ex.run_commandline()
| 8,825
| 37.710526
| 119
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/exps/der_womask/imagenet-100/b0_10s/main.py
|
'''
@Author : Yan Shipeng, Xie Jiangwei
@Contact: yanshp@shanghaitech.edu.cn, xiejw@shanghaitech.edu.cn
'''
import sys
import os
import os.path as osp
import copy
import time
import shutil
import cProfile
import logging
from pathlib import Path
import numpy as np
import random
from easydict import EasyDict as edict
from tensorboardX import SummaryWriter
repo_name = 'DER-ClassIL.pytorch'
base_dir = osp.realpath(".")[:osp.realpath(".").index(repo_name) + len(repo_name)]
sys.path.insert(0, base_dir)
from sacred import Experiment
ex = Experiment(base_dir=base_dir)
# Save which files
# ex.add_source_file(osp.join(base_dir, "inclearn/models/icarl.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/data.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/network.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/convnet/resnet.py"))
# ex.add_source_file(osp.join(os.getcwd(), "icarl.py"))
# ex.add_source_file(osp.join(os.getcwd(), "network.py"))
# ex.add_source_file(osp.join(os.getcwd(), "resnet.py"))
# MongoDB Observer
# ex.observers.append(MongoObserver.create(url='xx.xx.xx.xx:port', db_name='classil'))
import torch
from inclearn.tools import factory, results_utils, utils
from inclearn.learn.pretrain import pretrain
from inclearn.tools.metrics import IncConfusionMeter
def initialization(config, seed, mode, exp_id):
# Add it if your input size is fixed
# ref: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True # This will result in non-deterministic results.
# ex.captured_out_filter = lambda text: 'Output capturing turned off.'
cfg = edict(config)
utils.set_seed(cfg['seed'])
if exp_id is None:
exp_id = -1
cfg.exp.savedir = "./logs"
logger = utils.make_logger(f"exp{exp_id}_{cfg.exp.name}_{mode}", savedir=cfg.exp.savedir)
# Tensorboard
exp_name = f'{exp_id}_{cfg["exp"]["name"]}' if exp_id is not None else f'../inbox/{cfg["exp"]["name"]}'
tensorboard_dir = cfg["exp"]["tensorboard_dir"] + f"/{exp_name}"
# If not only save latest tensorboard log.
# if Path(tensorboard_dir).exists():
# shutil.move(tensorboard_dir, cfg["exp"]["tensorboard_dir"] + f"/../inbox/{time.time()}_{exp_name}")
tensorboard = SummaryWriter(tensorboard_dir)
return cfg, logger, tensorboard
@ex.command
def train(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "train", _run._id)
cfg.data_folder = osp.join(base_dir, "data")
start_time = time.time()
_train(cfg, _run, ex, tensorboard)
ex.logger.info("Training finished in {}s.".format(int(time.time() - start_time)))
def _train(cfg, _run, ex, tensorboard):
device = factory.set_device(cfg)
trial_i = cfg['trial']
inc_dataset = factory.get_data(cfg, trial_i)
ex.logger.info("classes_order")
ex.logger.info(inc_dataset.class_order)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
if _run.meta_info["options"]["--file_storage"] is not None:
_save_dir = osp.join(_run.meta_info["options"]["--file_storage"], str(_run._id))
else:
_save_dir = cfg["exp"]["ckptdir"]
results = results_utils.get_template_results(cfg)
for task_i in range(inc_dataset.n_tasks):
task_info, train_loader, val_loader, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=inc_dataset.n_tasks,
)
model.before_task(task_i, inc_dataset)
# TODO: Move to incmodel.py
if 'min_class' in task_info:
ex.logger.info("Train on {}->{}.".format(task_info["min_class"], task_info["max_class"]))
# Pretraining at step0 if needed
if task_i == 0 and cfg["start_class"] > 0:
do_pretrain(cfg, ex, model, device, train_loader, test_loader)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
elif task_i < cfg['start_task']:
state_dict = torch.load(f'./ckpts/step{task_i}.ckpt')
model._parallel_network.load_state_dict(state_dict)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
else:
model.train_task(train_loader, val_loader)
model.after_task(task_i, inc_dataset)
ex.logger.info("Eval on {}->{}.".format(0, task_info["max_class"]))
ypred, ytrue = model.eval_task(test_loader)
acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
#Logging
model._tensorboard.add_scalar(f"taskaccu/trial{trial_i}", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_taskaccu", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_task_top5_accu", acc_stats["top5"]["total"], task_i)
ex.logger.info(f"top1:{acc_stats['top1']}")
ex.logger.info(f"top5:{acc_stats['top5']}")
results["results"].append(acc_stats)
top1_avg_acc, top5_avg_acc = results_utils.compute_avg_inc_acc(results["results"])
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"] = top1_avg_acc
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"] = top5_avg_acc
ex.logger.info("Average Incremental Accuracy Top 1: {} Top 5: {}.".format(
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"],
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"],
))
if cfg["exp"]["name"]:
results_utils.save_results(results, cfg["exp"]["name"])
def do_pretrain(cfg, ex, model, device, train_loader, test_loader):
if not os.path.exists(osp.join(ex.base_dir, 'pretrain/')):
os.makedirs(osp.join(ex.base_dir, 'pretrain/'))
model_path = osp.join(
ex.base_dir,
"pretrain/{}_{}_cosine_{}_multi_{}_aux{}_nplus1_{}_{}_trial_{}_{}_seed_{}_start_{}_epoch_{}.pth".format(
cfg["model"],
cfg["convnet"],
cfg["weight_normalization"],
cfg["der"],
cfg["use_aux_cls"],
cfg["aux_n+1"],
cfg["dataset"],
cfg["trial"],
cfg["train_head"],
cfg['seed'],
cfg["start_class"],
cfg["pretrain"]["epochs"],
),
)
if osp.exists(model_path):
print("Load pretrain model")
if hasattr(model._network, "module"):
model._network.module.load_state_dict(torch.load(model_path))
else:
model._network.load_state_dict(torch.load(model_path))
else:
pretrain(cfg, ex, model, device, train_loader, test_loader, model_path)
@ex.command
def test(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "test", _run._id)
trial_i = cfg['trial']
cfg.data_folder = osp.join(base_dir, "data")
inc_dataset = factory.get_data(cfg, trial_i)
# inc_dataset._current_task = taski
# train_loader = inc_dataset._get_loader(inc_dataset.data_cur, inc_dataset.targets_cur)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
model._network.task_size = cfg.increment
test_results = results_utils.get_template_results(cfg)
for taski in range(inc_dataset.n_tasks):
task_info, train_loader, _, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=task_info["max_task"]
)
model.before_task(taski, inc_dataset)
state_dict = torch.load(f'./ckpts/step{taski}.ckpt')
model._parallel_network.load_state_dict(state_dict)
model.eval()
#Build exemplars
model.after_task(taski, inc_dataset)
ypred, ytrue = model.eval_task(test_loader)
test_acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
test_results['results'].append(test_acc_stats)
ex.logger.info(f"task{taski} test top1acc:{test_acc_stats['top1']}")
avg_test_acc = results_utils.compute_avg_inc_acc(test_results['results'])
ex.logger.info(f"Test Average Incremental Accuracy: {avg_test_acc}")
if __name__ == "__main__":
# ex.add_config('./codes/base/configs/default.yaml')
ex.add_config("./configs/default.yaml")
ex.run_commandline()
| 8,777
| 37.840708
| 119
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/exps/weight_align/cifar100/b0/10steps/main.py
|
'''
@Author : Yan Shipeng, Xie Jiangwei
@Contact: yanshp@shanghaitech.edu.cn, xiejw@shanghaitech.edu.cn
'''
import sys
import os
import os.path as osp
import copy
import time
import shutil
import cProfile
import logging
from pathlib import Path
import numpy as np
import random
from easydict import EasyDict as edict
from tensorboardX import SummaryWriter
repo_name = 'DER-ClassIL.pytorch'
base_dir = osp.realpath(".")[:osp.realpath(".").index(repo_name) + len(repo_name)]
sys.path.insert(0, base_dir)
from sacred import Experiment
ex = Experiment(base_dir=base_dir)
# Save which files
# ex.add_source_file(osp.join(base_dir, "inclearn/models/icarl.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/data.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/network.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/convnet/resnet.py"))
# ex.add_source_file(osp.join(os.getcwd(), "icarl.py"))
# ex.add_source_file(osp.join(os.getcwd(), "network.py"))
# ex.add_source_file(osp.join(os.getcwd(), "resnet.py"))
# MongoDB Observer
# ex.observers.append(MongoObserver.create(url='xx.xx.xx.xx:port', db_name='classil'))
import torch
from inclearn.tools import factory, results_utils, utils
from inclearn.learn.pretrain import pretrain
from inclearn.tools.metrics import IncConfusionMeter
def initialization(config, seed, mode, exp_id):
# Add it if your input size is fixed
# ref: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True # This will result in non-deterministic results.
# ex.captured_out_filter = lambda text: 'Output capturing turned off.'
cfg = edict(config)
utils.set_seed(cfg['seed'])
if exp_id is None:
exp_id = -1
cfg.exp.savedir = "./logs"
logger = utils.make_logger(f"exp{exp_id}_{cfg.exp.name}_{mode}", savedir=cfg.exp.savedir)
# Tensorboard
exp_name = f'{exp_id}_{cfg["exp"]["name"]}' if exp_id is not None else f'../inbox/{cfg["exp"]["name"]}'
tensorboard_dir = cfg["exp"]["tensorboard_dir"] + f"/{exp_name}"
# If not only save latest tensorboard log.
# if Path(tensorboard_dir).exists():
# shutil.move(tensorboard_dir, cfg["exp"]["tensorboard_dir"] + f"/../inbox/{time.time()}_{exp_name}")
tensorboard = SummaryWriter(tensorboard_dir)
return cfg, logger, tensorboard
@ex.command
def train(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "train", _run._id)
ex.logger.info(cfg)
cfg.data_folder = osp.join(base_dir, "data")
start_time = time.time()
_train(cfg, _run, ex, tensorboard)
ex.logger.info("Training finished in {}s.".format(int(time.time() - start_time)))
def _train(cfg, _run, ex, tensorboard):
device = factory.set_device(cfg)
trial_i = cfg['trial']
inc_dataset = factory.get_data(cfg, trial_i)
ex.logger.info("classes_order")
ex.logger.info(inc_dataset.class_order)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
if _run.meta_info["options"]["--file_storage"] is not None:
_save_dir = osp.join(_run.meta_info["options"]["--file_storage"], str(_run._id))
else:
_save_dir = cfg["exp"]["ckptdir"]
results = results_utils.get_template_results(cfg)
for task_i in range(inc_dataset.n_tasks):
task_info, train_loader, val_loader, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=inc_dataset.n_tasks,
)
model.before_task(task_i, inc_dataset)
# TODO: Move to incmodel.py
if 'min_class' in task_info:
ex.logger.info("Train on {}->{}.".format(task_info["min_class"], task_info["max_class"]))
# Pretraining at step0 if needed
if task_i == 0 and cfg["start_class"] > 0:
do_pretrain(cfg, ex, model, device, train_loader, test_loader)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
elif task_i < cfg['start_task']:
state_dict = torch.load(f'./ckpts/step{task_i}.ckpt')
model._parallel_network.load_state_dict(state_dict)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
else:
model.train_task(train_loader, val_loader)
model.after_task(task_i, inc_dataset)
ex.logger.info("Eval on {}->{}.".format(0, task_info["max_class"]))
ypred, ytrue = model.eval_task(test_loader)
acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
#Logging
model._tensorboard.add_scalar(f"taskaccu/trial{trial_i}", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_taskaccu", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_task_top5_accu", acc_stats["top5"]["total"], task_i)
ex.logger.info(f"top1:{acc_stats['top1']}")
ex.logger.info(f"top5:{acc_stats['top5']}")
results["results"].append(acc_stats)
top1_avg_acc, top5_avg_acc = results_utils.compute_avg_inc_acc(results["results"])
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"] = top1_avg_acc
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"] = top5_avg_acc
ex.logger.info("Average Incremental Accuracy Top 1: {} Top 5: {}.".format(
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"],
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"],
))
if cfg["exp"]["name"]:
results_utils.save_results(results, cfg["exp"]["name"])
def do_pretrain(cfg, ex, model, device, train_loader, test_loader):
if not os.path.exists(osp.join(ex.base_dir, 'pretrain/')):
os.makedirs(osp.join(ex.base_dir, 'pretrain/'))
model_path = osp.join(
ex.base_dir,
"pretrain/{}_{}_cosine_{}_multi_{}_aux{}_nplus1_{}_{}_trial_{}_{}_seed_{}_start_{}_epoch_{}.pth".format(
cfg["model"],
cfg["convnet"],
cfg["weight_normalization"],
cfg["der"],
cfg["use_aux_cls"],
cfg["aux_n+1"],
cfg["dataset"],
cfg["trial"],
cfg["train_head"],
cfg['seed'],
cfg["start_class"],
cfg["pretrain"]["epochs"],
),
)
if osp.exists(model_path):
print("Load pretrain model")
if hasattr(model._network, "module"):
model._network.module.load_state_dict(torch.load(model_path))
else:
model._network.load_state_dict(torch.load(model_path))
else:
pretrain(cfg, ex, model, device, train_loader, test_loader, model_path)
@ex.command
def test(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "test", _run._id)
ex.logger.info(cfg)
trial_i = cfg['trial']
cfg.data_folder = osp.join(base_dir, "data")
inc_dataset = factory.get_data(cfg, trial_i)
# inc_dataset._current_task = taski
# train_loader = inc_dataset._get_loader(inc_dataset.data_cur, inc_dataset.targets_cur)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
model._network.task_size = cfg.increment
test_results = results_utils.get_template_results(cfg)
for taski in range(inc_dataset.n_tasks):
task_info, train_loader, _, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=task_info["max_task"]
)
model.before_task(taski, inc_dataset)
state_dict = torch.load(f'./ckpts/step{taski}.ckpt')
model._parallel_network.load_state_dict(state_dict)
model.eval()
#Build exemplars
model.after_task(taski, inc_dataset)
ypred, ytrue = model.eval_task(test_loader)
test_acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
test_results['results'].append(test_acc_stats)
ex.logger.info(f"task{taski} test top1acc:{test_acc_stats['top1']}")
avg_test_acc = results_utils.compute_avg_inc_acc(test_results['results'])
ex.logger.info(f"Test Average Incremental Accuracy: {avg_test_acc}")
if __name__ == "__main__":
# ex.add_config('./codes/base/configs/default.yaml')
ex.add_config("./configs/default.yaml")
ex.run_commandline()
| 8,825
| 37.710526
| 119
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/__init__.py
| 0
| 0
| 0
|
py
|
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/learn/pretrain.py
|
import os.path as osp
import torch
import torch.nn.functional as F
from inclearn.tools import factory, utils
from inclearn.tools.metrics import ClassErrorMeter, AverageValueMeter
# import line_profiler
# import atexit
# profile = line_profiler.LineProfiler()
# atexit.register(profile.print_stats)
def _compute_loss(cfg, logits, targets, device):
if cfg["train_head"] == "sigmoid":
n_classes = cfg["start_class"]
onehot_targets = utils.to_onehot(targets, n_classes).to(device)
loss = F.binary_cross_entropy_with_logits(logits, onehot_targets)
elif cfg["train_head"] == "softmax":
loss = F.cross_entropy(logits, targets)
else:
raise ValueError()
return loss
def train(cfg, model, optimizer, device, train_loader):
_loss = 0.0
accu = ClassErrorMeter(accuracy=True)
accu.reset()
model.train()
for i, (inputs, targets) in enumerate(train_loader, start=1):
# assert torch.isnan(inputs).sum().item() == 0
optimizer.zero_grad()
inputs, targets = inputs.to(device), targets.to(device)
logits = model._parallel_network(inputs)['logit']
if accu is not None:
accu.add(logits.detach(), targets)
loss = _compute_loss(cfg, logits, targets, device)
if torch.isnan(loss):
import pdb
pdb.set_trace()
loss.backward()
optimizer.step()
_loss += loss
return (
round(_loss.item() / i, 3),
round(accu.value()[0], 3),
)
def test(cfg, model, device, test_loader):
_loss = 0.0
accu = ClassErrorMeter(accuracy=True)
accu.reset()
model.eval()
with torch.no_grad():
for i, (inputs, targets) in enumerate(test_loader, start=1):
# assert torch.isnan(inputs).sum().item() == 0
inputs, targets = inputs.to(device), targets.to(device)
logits = model._parallel_network(inputs)['logit']
if accu is not None:
accu.add(logits.detach(), targets)
loss = _compute_loss(cfg, logits, targets, device)
if torch.isnan(loss):
import pdb
pdb.set_trace()
_loss = _loss + loss
return round(_loss.item() / i, 3), round(accu.value()[0], 3)
def pretrain(cfg, ex, model, device, train_loader, test_loader, model_path):
ex.logger.info(f"nb Train {len(train_loader.dataset)} Eval {len(test_loader.dataset)}")
optimizer = torch.optim.SGD(model._network.parameters(),
lr=cfg["pretrain"]["lr"],
momentum=0.9,
weight_decay=cfg["pretrain"]["weight_decay"])
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
cfg["pretrain"]["scheduling"],
gamma=cfg["pretrain"]["lr_decay"])
test_loss, test_acc = float("nan"), float("nan")
for e in range(cfg["pretrain"]["epochs"]):
train_loss, train_acc = train(cfg, model, optimizer, device, train_loader)
if e % 5 == 0:
test_loss, test_acc = test(cfg, model, device, test_loader)
ex.logger.info(
"Pretrain Class {}, Epoch {}/{} => Clf Train loss: {}, Accu {} | Eval loss: {}, Accu {}".format(
cfg["start_class"], e + 1, cfg["pretrain"]["epochs"], train_loss, train_acc, test_loss, test_acc))
else:
ex.logger.info("Pretrain Class {}, Epoch {}/{} => Clf Train loss: {}, Accu {} ".format(
cfg["start_class"], e + 1, cfg["pretrain"]["epochs"], train_loss, train_acc))
scheduler.step()
if hasattr(model._network, "module"):
torch.save(model._network.module.state_dict(), model_path)
else:
torch.save(model._network.state_dict(), model_path)
| 3,886
| 36.019048
| 118
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/learn/__init__.py
| 0
| 0
| 0
|
py
|
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/tools/memory.py
|
import numpy as np
from copy import deepcopy
import torch
from torch.nn import functional as F
from inclearn.tools.utils import get_class_loss
from inclearn.convnet.utils import extract_features
class MemorySize:
def __init__(self, mode, inc_dataset, total_memory=None, fixed_memory_per_cls=None):
self.mode = mode
assert mode.lower() in ["uniform_fixed_per_cls", "uniform_fixed_total_mem", "dynamic_fixed_per_cls"]
self.total_memory = total_memory
self.fixed_memory_per_cls = fixed_memory_per_cls
self._n_classes = 0
self.mem_per_cls = []
self._inc_dataset = inc_dataset
def update_n_classes(self, n_classes):
self._n_classes = n_classes
def update_memory_per_cls_uniform(self, n_classes):
if "fixed_per_cls" in self.mode:
self.mem_per_cls = [self.fixed_memory_per_cls for i in range(n_classes)]
elif "fixed_total_mem" in self.mode:
self.mem_per_cls = [self.total_memory // n_classes for i in range(n_classes)]
return self.mem_per_cls
def update_memory_per_cls(self, network, n_classes, task_size):
if "uniform" in self.mode:
self.update_memory_per_cls_uniform(n_classes)
else:
if n_classes == task_size:
self.update_memory_per_cls_uniform(n_classes)
@property
def memsize(self):
if self.mode == "fixed_total_mem":
return self.total_memory
elif self.mode == "fixed_per_cls":
return self.fixed_memory_per_cls * self._n_classes
def compute_examplar_mean(feat_norm, feat_flip, herding_mat, nb_max):
EPSILON = 1e-8
D = feat_norm.T
D = D / (np.linalg.norm(D, axis=0) + EPSILON)
D2 = feat_flip.T
D2 = D2 / (np.linalg.norm(D2, axis=0) + EPSILON)
alph = herding_mat
alph = (alph > 0) * (alph < nb_max + 1) * 1.0
alph_mean = alph / np.sum(alph)
mean = (np.dot(D, alph_mean) + np.dot(D2, alph_mean)) / 2
# mean = np.dot(D, alph_mean)
mean /= np.linalg.norm(mean) + EPSILON
return mean, alph
def select_examplars(features, nb_max):
EPSILON = 1e-8
D = features.T
D = D / (np.linalg.norm(D, axis=0) + EPSILON)
mu = np.mean(D, axis=1)
herding_matrix = np.zeros((features.shape[0], ))
idxes = []
w_t = mu
iter_herding, iter_herding_eff = 0, 0
while not (np.sum(herding_matrix != 0) == min(nb_max, features.shape[0])) and iter_herding_eff < 1000:
tmp_t = np.dot(w_t, D)
# tmp_t = -np.linalg.norm(w_t[:,np.newaxis]-D, axis=0)
# tmp_t = np.linalg.norm(w_t[:,np.newaxis]-D, axis=0)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if herding_matrix[ind_max] == 0:
herding_matrix[ind_max] = 1 + iter_herding
idxes.append(ind_max)
iter_herding += 1
w_t = w_t + mu - D[:, ind_max]
return herding_matrix, idxes
def random_selection(n_classes, task_size, network, logger, inc_dataset, memory_per_class: list):
# TODO: Move data_memroy,targets_memory into IncDataset
logger.info("Building & updating memory.(Random Selection)")
tmp_data_memory, tmp_targets_memory = [], []
assert len(memory_per_class) == n_classes
for class_idx in range(n_classes):
if class_idx < n_classes - task_size:
inputs, targets, loader = inc_dataset.get_custom_loader_from_memory([class_idx])
else:
inputs, targets, loader = inc_dataset.get_custom_loader(class_idx, mode="test")
memory_this_cls = min(memory_per_class[class_idx], inputs.shape[0])
idxs = np.random.choice(inputs.shape[0], memory_this_cls, replace=False)
tmp_data_memory.append(inputs[idxs])
tmp_targets_memory.append(targets[idxs])
tmp_data_memory = np.concatenate(tmp_data_memory)
tmp_targets_memory = np.concatenate(tmp_targets_memory)
return tmp_data_memory, tmp_targets_memory
def herding(n_classes, task_size, network, herding_matrix, inc_dataset, shared_data_inc, memory_per_class: list,
logger):
"""Herding matrix: list
"""
logger.info("Building & updating memory.(iCaRL)")
tmp_data_memory, tmp_targets_memory = [], []
for class_idx in range(n_classes):
inputs = inc_dataset.data_train[inc_dataset.targets_train == class_idx]
targets = inc_dataset.targets_train[inc_dataset.targets_train == class_idx]
if class_idx >= n_classes - task_size:
if len(shared_data_inc) > len(inc_dataset.targets_inc):
share_memory = [shared_data_inc[i] for i in np.where(inc_dataset.targets_inc == class_idx)[0].tolist()]
else:
share_memory = []
for i in np.where(inc_dataset.targets_inc == class_idx)[0].tolist():
if i < len(shared_data_inc):
share_memory.append(shared_data_inc[i])
# share_memory = [shared_data_inc[i] for i in np.where(inc_dataset.targets_inc == class_idx)[0].tolist()]
loader = inc_dataset._get_loader(inc_dataset.data_inc[inc_dataset.targets_inc == class_idx],
inc_dataset.targets_inc[inc_dataset.targets_inc == class_idx],
share_memory=share_memory,
batch_size=1024,
shuffle=False,
mode="test")
features, _ = extract_features(network, loader)
# features_flipped, _ = extract_features(network, inc_dataset.get_custom_loader(class_idx, mode="flip")[-1])
herding_matrix.append(select_examplars(features, memory_per_class[class_idx])[0])
alph = herding_matrix[class_idx]
alph = (alph > 0) * (alph < memory_per_class[class_idx] + 1) * 1.0
# examplar_mean, alph = compute_examplar_mean(features, features_flipped, herding_matrix[class_idx],
# memory_per_class[class_idx])
tmp_data_memory.append(inputs[np.where(alph == 1)[0]])
tmp_targets_memory.append(targets[np.where(alph == 1)[0]])
tmp_data_memory = np.concatenate(tmp_data_memory)
tmp_targets_memory = np.concatenate(tmp_targets_memory)
return tmp_data_memory, tmp_targets_memory, herding_matrix
| 6,375
| 41.791946
| 120
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/tools/results_utils.py
|
import glob
import json
import math
import os
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from . import utils
def get_template_results(cfg):
return {"config": cfg, "results": []}
def save_results(results, label):
del results["config"]["device"]
folder_path = os.path.join("results", "{}_{}".format(utils.get_date(), label))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = "{}_{}_.json".format(utils.get_date(), results["config"]["seed"])
with open(os.path.join(folder_path, file_path), "w+") as f:
json.dump(results, f, indent=2)
def compute_avg_inc_acc(results):
"""Computes the average incremental accuracy as defined in iCaRL.
The average incremental accuracies at task X are the average of accuracies
at task 0, 1, ..., and X.
:param accs: A list of dict for per-class accuracy at each step.
:return: A float.
"""
top1_tasks_accuracy = [r['top1']["total"] for r in results]
top1acc = sum(top1_tasks_accuracy) / len(top1_tasks_accuracy)
if "top5" in results[0].keys():
top5_tasks_accuracy = [r['top5']["total"] for r in results]
top5acc = sum(top5_tasks_accuracy) / len(top5_tasks_accuracy)
else:
top5acc = None
return top1acc, top5acc
| 1,315
| 28.909091
| 82
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/tools/utils.py
|
import random
from copy import deepcopy
import numpy as np
import datetime
import torch
from inclearn.tools.metrics import ClassErrorMeter
def get_date():
return datetime.datetime.now().strftime("%Y%m%d")
def to_onehot(targets, n_classes):
if not hasattr(targets, "device"):
targets = torch.from_numpy(targets)
onehot = torch.zeros(targets.shape[0], n_classes).to(targets.device)
onehot.scatter_(dim=1, index=targets.long().view(-1, 1), value=1.0)
return onehot
def get_class_loss(network, cur_n_cls, loader):
class_loss = torch.zeros(cur_n_cls)
n_cls_data = torch.zeros(cur_n_cls) # the num of imgs for cls i.
EPS = 1e-10
task_size = 10
network.eval()
for x, y in loader:
x, y = x.cuda(), y.cuda()
preds = network(x)['logit'].softmax(dim=1)
# preds[:,-task_size:] = preds[:,-task_size:].softmax(dim=1)
for i, lbl in enumerate(y):
class_loss[lbl] = class_loss[lbl] - (preds[i, lbl] + EPS).detach().log().cpu()
n_cls_data[lbl] += 1
class_loss = class_loss / n_cls_data
return class_loss
def get_featnorm_grouped_by_class(network, cur_n_cls, loader):
"""
Ret: feat_norms: list of list
feat_norms[idx] is the list of feature norm of the images for class idx.
"""
feats = [[] for i in range(cur_n_cls)]
feat_norms = np.zeros(cur_n_cls)
network.eval()
with torch.no_grad():
for x, y in loader:
x = x.cuda()
feat = network(x)['feature'].cpu()
for i, lbl in enumerate(y):
feats[lbl].append(feat[y == lbl])
for i in range(len(feats)):
if len(feats[i]) != 0:
feat_cls = torch.cat((feats[i]))
feat_norms[i] = torch.norm(feat_cls, p=2, dim=1).mean().data.numpy()
return feat_norms
def set_seed(seed):
print("Set seed", seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True # This will slow down training.
torch.backends.cudnn.benchmark = False
def display_weight_norm(logger, network, increments, tag):
weight_norms = [[] for _ in range(len(increments))]
increments = np.cumsum(np.array(increments))
for idx in range(network.module.classifier.weight.shape[0]):
norm = torch.norm(network.module.classifier.weight[idx].data, p=2).item()
for i in range(len(weight_norms)):
if idx < increments[i]:
break
weight_norms[i].append(round(norm, 3))
avg_weight_norm = []
for idx in range(len(weight_norms)):
avg_weight_norm.append(round(np.array(weight_norms[idx]).mean(), 3))
logger.info("%s: Weight norm per class %s" % (tag, str(avg_weight_norm)))
def display_feature_norm(logger, network, loader, n_classes, increments, tag, return_norm=False):
avg_feat_norm_per_cls = get_featnorm_grouped_by_class(network, n_classes, loader)
feature_norms = [[] for _ in range(len(increments))]
increments = np.cumsum(np.array(increments))
for idx in range(len(avg_feat_norm_per_cls)):
for i in range(len(feature_norms)):
if idx < increments[i]: #Find the mapping from class idx to step i.
break
feature_norms[i].append(round(avg_feat_norm_per_cls[idx], 3))
avg_feature_norm = []
for idx in range(len(feature_norms)):
avg_feature_norm.append(round(np.array(feature_norms[idx]).mean(), 3))
logger.info("%s: Feature norm per class %s" % (tag, str(avg_feature_norm)))
if return_norm:
return avg_feature_norm
else:
return
def check_loss(loss):
return not bool(torch.isnan(loss).item()) and bool((loss >= 0.0).item())
def compute_accuracy(ypred, ytrue, increments, n_classes):
all_acc = {"top1": {}, "top5": {}}
topk = 5 if n_classes >= 5 else n_classes
ncls = np.unique(ytrue).shape[0]
if topk > ncls:
topk = ncls
all_acc_meter = ClassErrorMeter(topk=[1, topk], accuracy=True)
all_acc_meter.add(ypred, ytrue)
all_acc["top1"]["total"] = round(all_acc_meter.value()[0], 3)
all_acc["top5"]["total"] = round(all_acc_meter.value()[1], 3)
# all_acc["total"] = round((ypred == ytrue).sum() / len(ytrue), 3)
# for class_id in range(0, np.max(ytrue), task_size):
start, end = 0, 0
for i in range(len(increments)):
if increments[i] <= 0:
pass
else:
start = end
end += increments[i]
idxes = np.where(np.logical_and(ytrue >= start, ytrue < end))[0]
topk_ = 5 if increments[i] >= 5 else increments[i]
ncls = np.unique(ytrue[idxes]).shape[0]
if topk_ > ncls:
topk_ = ncls
cur_acc_meter = ClassErrorMeter(topk=[1, topk_], accuracy=True)
cur_acc_meter.add(ypred[idxes], ytrue[idxes])
top1_acc = (ypred[idxes].argmax(1) == ytrue[idxes]).sum() / idxes.shape[0] * 100
if start < end:
label = "{}-{}".format(str(start).rjust(2, "0"), str(end - 1).rjust(2, "0"))
else:
label = "{}-{}".format(str(start).rjust(2, "0"), str(end).rjust(2, "0"))
all_acc["top1"][label] = round(top1_acc, 3)
all_acc["top5"][label] = round(cur_acc_meter.value()[1], 3)
# all_acc[label] = round((ypred[idxes] == ytrue[idxes]).sum() / len(idxes), 3)
return all_acc
def make_logger(log_name, savedir='.logs/'):
"""Set up the logger for saving log file on the disk
Args:
cfg: configuration dict
Return:
logger: a logger for record essential information
"""
import logging
import os
from logging.config import dictConfig
import time
logging_config = dict(
version=1,
formatters={'f_t': {
'format': '\n %(asctime)s | %(levelname)s | %(name)s \t %(message)s'
}},
handlers={
'stream_handler': {
'class': 'logging.StreamHandler',
'formatter': 'f_t',
'level': logging.INFO
},
'file_handler': {
'class': 'logging.FileHandler',
'formatter': 'f_t',
'level': logging.INFO,
'filename': None,
}
},
root={
'handlers': ['stream_handler', 'file_handler'],
'level': logging.DEBUG,
},
)
# set up logger
log_file = '{}.log'.format(log_name)
# if folder not exist,create it
if not os.path.exists(savedir):
os.makedirs(savedir)
log_file_path = os.path.join(savedir, log_file)
logging_config['handlers']['file_handler']['filename'] = log_file_path
open(log_file_path, 'w').close() # Clear the content of logfile
# get logger from dictConfig
dictConfig(logging_config)
logger = logging.getLogger()
return logger
| 6,969
| 33.85
| 97
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/tools/data_utils.py
|
import numpy as np
def construct_balanced_subset(x, y):
xdata, ydata = [], []
minsize = np.inf
for cls_ in np.unique(y):
xdata.append(x[y == cls_])
ydata.append(y[y == cls_])
if ydata[-1].shape[0] < minsize:
minsize = ydata[-1].shape[0]
for i in range(len(xdata)):
if xdata[i].shape[0] < minsize:
import pdb
pdb.set_trace()
idx = np.arange(xdata[i].shape[0])
np.random.shuffle(idx)
xdata[i] = xdata[i][idx][:minsize]
ydata[i] = ydata[i][idx][:minsize]
# !list
return np.concatenate(xdata, 0), np.concatenate(ydata, 0)
| 643
| 29.666667
| 61
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/tools/scheduler.py
|
import math
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
class ConstantTaskLR:
def __init__(self, lr):
self._lr = lr
def get_lr(self, task_i):
return self._lr
class CosineAnnealTaskLR:
def __init__(self, lr_max, lr_min, task_max):
self._lr_max = lr_max
self._lr_min = lr_min
self._task_max = task_max
def get_lr(self, task_i):
return self._lr_min + (self._lr_max - self._lr_min) * (1 + math.cos(math.pi * task_i / self._task_max)) / 2
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
https://github.com/ildoonet/pytorch-gradual-warmup-lr
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater thant or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_last_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [
base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.)
for base_lr in self.base_lrs
]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [
base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.)
for base_lr in self.base_lrs
]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
self._last_lr = self.after_scheduler.get_last_lr()
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
| 3,749
| 41.134831
| 152
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/tools/factory.py
|
import torch
from torch import nn
from torch import optim
from inclearn import models
from inclearn.convnet import resnet, cifar_resnet, modified_resnet_cifar, preact_resnet
from inclearn.datasets import data
def get_optimizer(params, optimizer, lr, weight_decay=0.0):
if optimizer == "adam":
return optim.Adam(params, lr=lr, weight_decay=weight_decay, betas=(0.9, 0.999))
elif optimizer == "sgd":
return optim.SGD(params, lr=lr, weight_decay=weight_decay, momentum=0.9)
else:
raise NotImplementedError
def get_convnet(convnet_type, **kwargs):
if convnet_type == "resnet18":
return resnet.resnet18(**kwargs)
elif convnet_type == "resnet32":
return cifar_resnet.resnet32()
elif convnet_type == "modified_resnet32":
return modified_resnet_cifar.resnet32(**kwargs)
elif convnet_type == "preact_resnet18":
return preact_resnet.PreActResNet18(**kwargs)
else:
raise NotImplementedError("Unknwon convnet type {}.".format(convnet_type))
def get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset):
if cfg["model"] == "incmodel":
return models.IncModel(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
if cfg["model"] == "weight_align":
return models.Weight_Align(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
if cfg["model"] == "bic":
return models.BiC(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
else:
raise NotImplementedError(cfg["model"])
def get_data(cfg, trial_i):
return data.IncrementalDataset(
trial_i=trial_i,
dataset_name=cfg["dataset"],
random_order=cfg["random_classes"],
shuffle=True,
batch_size=cfg["batch_size"],
workers=cfg["workers"],
validation_split=cfg["validation"],
resampling=cfg["resampling"],
increment=cfg["increment"],
data_folder=cfg["data_folder"],
start_class=cfg["start_class"],
)
def set_device(cfg):
device_type = cfg["device"]
if device_type == -1:
device = torch.device("cpu")
else:
device = torch.device("cuda:{}".format(device_type))
cfg["device"] = device
return device
| 2,205
| 30.971014
| 87
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/tools/metrics.py
|
import numpy as np
import torch
import numbers
import math
class IncConfusionMeter:
"""Maintains a confusion matrix for a given calssification problem.
The ConfusionMeter constructs a confusion matrix for a multi-class
classification problems. It does not support multi-label, multi-class problems:
for such problems, please use MultiLabelConfusionMeter.
Args:
k (int): number of classes in the classification problem
normalized (boolean): Determines whether or not the confusion matrix
is normalized or not
"""
def __init__(self, k, increments, normalized=False):
self.conf = np.ndarray((k, k), dtype=np.int32)
self.normalized = normalized
self.increments = increments
self.cum_increments = [0] + [sum(increments[:i + 1]) for i in range(len(increments))]
self.k = k
self.reset()
def reset(self):
self.conf.fill(0)
def add(self, predicted, target):
"""Computes the confusion matrix of K x K size where K is no of classes
Args:
predicted (tensor): Can be an N x K tensor of predicted scores obtained from
the model for N examples and K classes or an N-tensor of
integer values between 0 and K-1.
target (tensor): Can be a N-tensor of integer values assumed to be integer
values between 0 and K-1 or N x K tensor, where targets are
assumed to be provided as one-hot vectors
"""
if isinstance(predicted, torch.Tensor):
predicted = predicted.cpu().numpy()
if isinstance(target, torch.Tensor):
target = target.cpu().numpy()
assert predicted.shape[0] == target.shape[0], \
'number of targets and predicted outputs do not match'
if np.ndim(predicted) != 1:
assert predicted.shape[1] == self.k, \
'number of predictions does not match size of confusion matrix'
predicted = np.argmax(predicted, 1)
else:
assert (predicted.max() < self.k) and (predicted.min() >= 0), \
'predicted values are not between 1 and k'
onehot_target = np.ndim(target) != 1
if onehot_target:
assert target.shape[1] == self.k, \
'Onehot target does not match size of confusion matrix'
assert (target >= 0).all() and (target <= 1).all(), \
'in one-hot encoding, target values should be 0 or 1'
assert (target.sum(1) == 1).all(), \
'multi-label setting is not supported'
target = np.argmax(target, 1)
else:
assert (predicted.max() < self.k) and (predicted.min() >= 0), \
'predicted values are not between 0 and k-1'
# hack for bincounting 2 arrays together
x = predicted + self.k * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.k**2)
assert bincount_2d.size == self.k**2
conf = bincount_2d.reshape((self.k, self.k))
self.conf += conf
def value(self):
"""
Returns:
Confustion matrix of K rows and K columns, where rows corresponds
to ground-truth targets and columns corresponds to predicted
targets.
"""
conf = self.conf.astype(np.float32)
new_conf = np.zeros([len(self.increments), len(self.increments) + 2])
for i in range(len(self.increments)):
idxs = range(self.cum_increments[i], self.cum_increments[i + 1])
new_conf[i, 0] = conf[idxs, idxs].sum()
new_conf[i, 1] = conf[self.cum_increments[i]:self.cum_increments[i + 1],
self.cum_increments[i]:self.cum_increments[i + 1]].sum() - new_conf[i, 0]
for j in range(len(self.increments)):
new_conf[i, j + 2] = conf[self.cum_increments[i]:self.cum_increments[i + 1],
self.cum_increments[j]:self.cum_increments[j + 1]].sum()
conf = new_conf
if self.normalized:
return conf / conf[:, 2:].sum(1).clip(min=1e-12)[:, None]
else:
return conf
class ClassErrorMeter:
def __init__(self, topk=[1], accuracy=False):
super(ClassErrorMeter, self).__init__()
self.topk = np.sort(topk)
self.accuracy = accuracy
self.reset()
def reset(self):
self.sum = {v: 0 for v in self.topk}
self.n = 0
def add(self, output, target):
if isinstance(output, np.ndarray):
output = torch.Tensor(output)
if isinstance(target, np.ndarray):
target = torch.Tensor(target)
# if torch.is_tensor(output):
# output = output.cpu().squeeze().numpy()
# if torch.is_tensor(target):
# target = target.cpu().squeeze().numpy()
# elif isinstance(target, numbers.Number):
# target = np.asarray([target])
# if np.ndim(output) == 1:
# output = output[np.newaxis]
# else:
# assert np.ndim(output) == 2, \
# 'wrong output size (1D or 2D expected)'
# assert np.ndim(target) == 1, \
# 'target and output do not match'
# assert target.shape[0] == output.shape[0], \
# 'target and output do not match'
topk = self.topk
maxk = int(topk[-1]) # seems like Python3 wants int and not np.int64
no = output.shape[0]
pred = output.topk(maxk, 1, True, True)[1]
correct = pred == target.unsqueeze(1).repeat(1, pred.shape[1])
# pred = torch.from_numpy(output).topk(maxk, 1, True, True)[1].numpy()
# correct = pred == target[:, np.newaxis].repeat(pred.shape[1], 1)
for k in topk:
self.sum[k] += no - correct[:, 0:k].sum()
self.n += no
def value(self, k=-1):
if k != -1:
assert k in self.sum.keys(), \
'invalid k (this k was not provided at construction time)'
if self.n == 0:
return float('nan')
if self.accuracy:
return (1. - float(self.sum[k]) / self.n) * 100.0
else:
return float(self.sum[k]) / self.n * 100.0
else:
return [self.value(k_) for k_ in self.topk]
class AverageValueMeter:
def __init__(self):
super(AverageValueMeter, self).__init__()
self.reset()
self.val = 0
def add(self, value, n=1):
self.val = value
self.sum += value
self.var += value * value
self.n += n
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
self.mean, self.std = self.sum, np.inf
self.mean_old = self.mean
self.m_s = 0.0
else:
self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
self.m_s += (value - self.mean_old) * (value - self.mean)
self.mean_old = self.mean
self.std = math.sqrt(self.m_s / (self.n - 1.0))
def value(self):
return self.mean, self.std
def reset(self):
self.n = 0
self.sum = 0.0
self.var = 0.0
self.val = 0.0
self.mean = np.nan
self.mean_old = 0.0
self.m_s = 0.0
self.std = np.nan
| 7,415
| 37.625
| 107
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/tools/__init__.py
| 0
| 0
| 0
|
py
|
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/resnet.py
|
"""Taken & slightly modified from:
* https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.nn import functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, remove_last_relu=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.remove_last_relu = remove_last_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if not self.remove_last_relu:
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
nf=64,
zero_init_residual=True,
dataset='cifar',
start_class=0,
remove_last_relu=False):
super(ResNet, self).__init__()
self.remove_last_relu = remove_last_relu
self.inplanes = nf
if 'cifar' in dataset:
self.conv1 = nn.Sequential(nn.Conv2d(3, nf, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(nf), nn.ReLU(inplace=True))
elif 'imagenet' in dataset:
if start_class == 0:
self.conv1 = nn.Sequential(
nn.Conv2d(3, nf, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(nf),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
else:
# Following PODNET implmentation
self.conv1 = nn.Sequential(
nn.Conv2d(3, nf, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(nf),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.layer1 = self._make_layer(block, 1 * nf, layers[0])
self.layer2 = self._make_layer(block, 2 * nf, layers[1], stride=2)
self.layer3 = self._make_layer(block, 4 * nf, layers[2], stride=2)
self.layer4 = self._make_layer(block, 8 * nf, layers[3], stride=2, remove_last_relu=remove_last_relu)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_dim = 8 * nf * block.expansion
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, remove_last_relu=False, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
if remove_last_relu:
for i in range(1, blocks - 1):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, remove_last_relu=True))
else:
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def reset_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.reset_running_stats()
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 8,130
| 32.460905
| 109
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/network.py
|
import copy
import pdb
import torch
from torch import nn
import torch.nn.functional as F
from inclearn.tools import factory
from inclearn.convnet.imbalance import BiC, WA
from inclearn.convnet.classifier import CosineClassifier
class BasicNet(nn.Module):
def __init__(
self,
convnet_type,
cfg,
nf=64,
use_bias=False,
init="kaiming",
device=None,
dataset="cifar100",
):
super(BasicNet, self).__init__()
self.nf = nf
self.init = init
self.convnet_type = convnet_type
self.dataset = dataset
self.start_class = cfg['start_class']
self.weight_normalization = cfg['weight_normalization']
self.remove_last_relu = True if self.weight_normalization else False
self.use_bias = use_bias if not self.weight_normalization else False
self.der = cfg['der']
self.aux_nplus1 = cfg['aux_n+1']
self.reuse_oldfc = cfg['reuse_oldfc']
if self.der:
print("Enable dynamical reprensetation expansion!")
self.convnets = nn.ModuleList()
self.convnets.append(
factory.get_convnet(convnet_type,
nf=nf,
dataset=dataset,
start_class=self.start_class,
remove_last_relu=self.remove_last_relu))
self.out_dim = self.convnets[0].out_dim
else:
self.convnet = factory.get_convnet(convnet_type,
nf=nf,
dataset=dataset,
remove_last_relu=self.remove_last_relu)
self.out_dim = self.convnet.out_dim
self.classifier = None
self.aux_classifier = None
self.n_classes = 0
self.ntask = 0
self.device = device
if cfg['postprocessor']['enable']:
if cfg['postprocessor']['type'].lower() == "bic":
self.postprocessor = BiC(cfg['postprocessor']["lr"], cfg['postprocessor']["scheduling"],
cfg['postprocessor']["lr_decay_factor"], cfg['postprocessor']["weight_decay"],
cfg['postprocessor']["batch_size"], cfg['postprocessor']["epochs"])
elif cfg['postprocessor']['type'].lower() == "wa":
self.postprocessor = WA()
else:
self.postprocessor = None
self.to(self.device)
def forward(self, x):
if self.classifier is None:
raise Exception("Add some classes before training.")
if self.der:
features = [convnet(x) for convnet in self.convnets]
features = torch.cat(features, 1)
else:
features = self.convnet(x)
logits = self.classifier(features)
aux_logits = self.aux_classifier(features[:, -self.out_dim:]) if features.shape[1] > self.out_dim else None
return {'feature': features, 'logit': logits, 'aux_logit': aux_logits}
@property
def features_dim(self):
if self.der:
return self.out_dim * len(self.convnets)
else:
return self.out_dim
def freeze(self):
for param in self.parameters():
param.requires_grad = False
self.eval()
return self
def copy(self):
return copy.deepcopy(self)
def add_classes(self, n_classes):
self.ntask += 1
if self.der:
self._add_classes_multi_fc(n_classes)
else:
self._add_classes_single_fc(n_classes)
self.n_classes += n_classes
def _add_classes_multi_fc(self, n_classes):
if self.ntask > 1:
new_clf = factory.get_convnet(self.convnet_type,
nf=self.nf,
dataset=self.dataset,
start_class=self.start_class,
remove_last_relu=self.remove_last_relu).to(self.device)
new_clf.load_state_dict(self.convnets[-1].state_dict())
self.convnets.append(new_clf)
if self.classifier is not None:
weight = copy.deepcopy(self.classifier.weight.data)
fc = self._gen_classifier(self.out_dim * len(self.convnets), self.n_classes + n_classes)
if self.classifier is not None and self.reuse_oldfc:
fc.weight.data[:self.n_classes, :self.out_dim * (len(self.convnets) - 1)] = weight
del self.classifier
self.classifier = fc
if self.aux_nplus1:
aux_fc = self._gen_classifier(self.out_dim, n_classes + 1)
else:
aux_fc = self._gen_classifier(self.out_dim, self.n_classes + n_classes)
del self.aux_classifier
self.aux_classifier = aux_fc
def _add_classes_single_fc(self, n_classes):
if self.classifier is not None:
weight = copy.deepcopy(self.classifier.weight.data)
if self.use_bias:
bias = copy.deepcopy(self.classifier.bias.data)
classifier = self._gen_classifier(self.features_dim, self.n_classes + n_classes)
if self.classifier is not None and self.reuse_oldfc:
classifier.weight.data[:self.n_classes] = weight
if self.use_bias:
classifier.bias.data[:self.n_classes] = bias
del self.classifier
self.classifier = classifier
def _gen_classifier(self, in_features, n_classes):
if self.weight_normalization:
classifier = CosineClassifier(in_features, n_classes).to(self.device)
else:
classifier = nn.Linear(in_features, n_classes, bias=self.use_bias).to(self.device)
if self.init == "kaiming":
nn.init.kaiming_normal_(classifier.weight, nonlinearity="linear")
if self.use_bias:
nn.init.constant_(classifier.bias, 0.0)
return classifier
| 6,100
| 35.532934
| 119
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/imbalance.py
|
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
from torch.optim.lr_scheduler import CosineAnnealingLR
class BiC(nn.Module):
def __init__(self, lr, scheduling, lr_decay_factor, weight_decay, batch_size, epochs):
super(BiC, self).__init__()
self.beta = torch.nn.Parameter(torch.ones(1)) #.cuda()
self.gamma = torch.nn.Parameter(torch.zeros(1)) #.cuda()
self.lr = lr
self.scheduling = scheduling
self.lr_decay_factor = lr_decay_factor
self.weight_decay = weight_decay
self.class_specific = False
self.batch_size = batch_size
self.epochs = epochs
self.bic_flag = False
def reset(self, lr=None, scheduling=None, lr_decay_factor=None, weight_decay=None, n_classes=-1):
with torch.no_grad():
if lr is None:
lr = self.lr
if scheduling is None:
scheduling = self.scheduling
if lr_decay_factor is None:
lr_decay_factor = self.lr_decay_factor
if weight_decay is None:
weight_decay = self.weight_decay
if self.class_specific:
assert n_classes != -1
self.beta = torch.nn.Parameter(torch.ones(n_classes).cuda())
self.gamma = torch.nn.Parameter(torch.zeros(n_classes).cuda())
else:
self.beta = torch.nn.Parameter(torch.ones(1).cuda())
self.gamma = torch.nn.Parameter(torch.zeros(1).cuda())
self.optimizer = torch.optim.SGD([self.beta, self.gamma], lr=lr, momentum=0.9, weight_decay=weight_decay)
# self.scheduler = CosineAnnealingLR(self.optimizer, 10)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, scheduling, gamma=lr_decay_factor)
def extract_preds_and_targets(self, model, loader):
preds, targets = [], []
with torch.no_grad():
for (x, y) in loader:
preds.append(model(x.cuda())['logit'])
targets.append(y.cuda())
return torch.cat((preds)), torch.cat((targets))
def update(self, logger, task_size, model, loader, loss_criterion=None):
if task_size == 0:
logger.info("no new task for BiC!")
return
if loss_criterion is None:
loss_criterion = F.cross_entropy
self.bic_flag = True
logger.info("Begin BiC ...")
model.eval()
for epoch in range(self.epochs):
preds_, targets_ = self.extract_preds_and_targets(model, loader)
order = np.arange(preds_.shape[0])
np.random.shuffle(order)
preds, targets = preds_.clone(), targets_.clone()
preds, targets = preds[order], targets[order]
_loss = 0.0
_correct = 0
_count = 0
for start in range(0, preds.shape[0], self.batch_size):
if start + self.batch_size < preds.shape[0]:
out = preds[start:start + self.batch_size, :].clone()
lbls = targets[start:start + self.batch_size]
else:
out = preds[start:, :].clone()
lbls = targets[start:]
if self.class_specific is False:
out1 = out[:, :-task_size].clone()
out2 = out[:, -task_size:].clone()
outputs = torch.cat((out1, out2 * self.beta + self.gamma), 1)
else:
outputs = out * self.beta + self.gamma
loss = loss_criterion(outputs, lbls)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
_, pred = outputs.max(1)
_correct += (pred == lbls).sum()
_count += lbls.size(0)
_loss += loss.item() * outputs.shape[0]
logger.info("epoch {} loss {:4f} acc {:4f}".format(epoch, _loss / preds.shape[0], _correct / _count))
self.scheduler.step()
logger.info("beta {:.4f} gamma {:.4f}".format(self.beta.cpu().item(), self.gamma.cpu().item()))
@torch.no_grad()
def post_process(self, preds, task_size):
if self.class_specific is False:
if task_size != 0:
preds[:, -task_size:] = preds[:, -task_size:] * self.beta + self.gamma
else:
preds = preds * self.beta + self.gamma
return preds
class WA(object):
def __init__(self):
self.gamma = None
@torch.no_grad()
def update(self, classifier, task_size):
old_weight_norm = torch.norm(classifier.weight[:-task_size], p=2, dim=1)
new_weight_norm = torch.norm(classifier.weight[-task_size:], p=2, dim=1)
self.gamma = old_weight_norm.mean() / new_weight_norm.mean()
print(self.gamma.cpu().item())
@torch.no_grad()
def post_process(self, logits, task_size):
logits[:, -task_size:] = logits[:, -task_size:] * self.gamma
return logits
| 5,074
| 40.260163
| 117
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/utils.py
|
import numpy as np
import torch
from torch import nn
from torch.optim import SGD
import torch.nn.functional as F
from inclearn.tools.metrics import ClassErrorMeter, AverageValueMeter
def finetune_last_layer(
logger,
network,
loader,
n_class,
nepoch=30,
lr=0.1,
scheduling=[15, 35],
lr_decay=0.1,
weight_decay=5e-4,
loss_type="ce",
temperature=5.0,
test_loader=None,
):
network.eval()
#if hasattr(network.module, "convnets"):
# for net in network.module.convnets:
# net.eval()
#else:
# network.module.convnet.eval()
optim = SGD(network.module.classifier.parameters(), lr=lr, momentum=0.9, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, scheduling, gamma=lr_decay)
if loss_type == "ce":
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
logger.info("Begin finetuning last layer")
for i in range(nepoch):
total_loss = 0.0
total_correct = 0.0
total_count = 0
# print(f"dataset loader length {len(loader.dataset)}")
for inputs, targets in loader:
inputs, targets = inputs.cuda(), targets.cuda()
if loss_type == "bce":
targets = to_onehot(targets, n_class)
outputs = network(inputs)['logit']
_, preds = outputs.max(1)
optim.zero_grad()
loss = criterion(outputs / temperature, targets)
loss.backward()
optim.step()
total_loss += loss * inputs.size(0)
total_correct += (preds == targets).sum()
total_count += inputs.size(0)
if test_loader is not None:
test_correct = 0.0
test_count = 0.0
with torch.no_grad():
for inputs, targets in test_loader:
outputs = network(inputs.cuda())['logit']
_, preds = outputs.max(1)
test_correct += (preds.cpu() == targets).sum().item()
test_count += inputs.size(0)
scheduler.step()
if test_loader is not None:
logger.info(
"Epoch %d finetuning loss %.3f acc %.3f Eval %.3f" %
(i, total_loss.item() / total_count, total_correct.item() / total_count, test_correct / test_count))
else:
logger.info("Epoch %d finetuning loss %.3f acc %.3f" %
(i, total_loss.item() / total_count, total_correct.item() / total_count))
return network
def extract_features(model, loader):
targets, features = [], []
model.eval()
with torch.no_grad():
for _inputs, _targets in loader:
_inputs = _inputs.cuda()
_targets = _targets.numpy()
_features = model(_inputs)['feature'].detach().cpu().numpy()
features.append(_features)
targets.append(_targets)
return np.concatenate(features), np.concatenate(targets)
def calc_class_mean(network, loader, class_idx, metric):
EPSILON = 1e-8
features, targets = extract_features(network, loader)
# norm_feats = features/(np.linalg.norm(features, axis=1)[:,np.newaxis]+EPSILON)
# examplar_mean = norm_feats.mean(axis=0)
examplar_mean = features.mean(axis=0)
if metric == "cosine" or metric == "weight":
examplar_mean /= (np.linalg.norm(examplar_mean) + EPSILON)
return examplar_mean
def update_classes_mean(network, inc_dataset, n_classes, task_size, share_memory=None, metric="cosine", EPSILON=1e-8):
loader = inc_dataset._get_loader(inc_dataset.data_inc,
inc_dataset.targets_inc,
shuffle=False,
share_memory=share_memory,
mode="test")
class_means = np.zeros((n_classes, network.module.features_dim))
count = np.zeros(n_classes)
network.eval()
with torch.no_grad():
for x, y in loader:
feat = network(x.cuda())['feature']
for lbl in torch.unique(y):
class_means[lbl] += feat[y == lbl].sum(0).cpu().numpy()
count[lbl] += feat[y == lbl].shape[0]
for i in range(n_classes):
class_means[i] /= count[i]
if metric == "cosine" or metric == "weight":
class_means[i] /= (np.linalg.norm(class_means) + EPSILON)
return class_means
| 4,496
| 35.266129
| 118
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/classifier.py
|
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from torch.nn import Module
class CosineClassifier(Module):
def __init__(self, in_features, n_classes, sigma=True):
super(CosineClassifier, self).__init__()
self.in_features = in_features
self.out_features = n_classes
self.weight = Parameter(torch.Tensor(n_classes, in_features))
if sigma:
self.sigma = Parameter(torch.Tensor(1))
else:
self.register_parameter('sigma', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.sigma is not None:
self.sigma.data.fill_(1) #for initializaiton of sigma
def forward(self, input):
out = F.linear(F.normalize(input, p=2, dim=1), F.normalize(self.weight, p=2, dim=1))
if self.sigma is not None:
out = self.sigma * out
return out
| 1,035
| 31.375
| 92
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/preact_resnet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1, remove_last_relu=False):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.remove_last_relu = remove_last_relu
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
out = self.bn3(out)
if not self.remove_last_relu:
out = F.relu(out)
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self,
block,
num_blocks,
nf=64,
zero_init_residual=True,
dataset="cifar",
start_class=0,
remove_last_relu=False):
super(PreActResNet, self).__init__()
self.in_planes = nf
self.dataset = dataset
self.remove_last_relu = remove_last_relu
if 'cifar' in dataset:
self.conv1 = nn.Conv2d(3, nf, kernel_size=3, stride=1, padding=1, bias=False)
else:
self.conv1 = nn.Sequential(nn.Conv2d(3, nf, kernel_size=7, stride=2, padding=3, bias=False),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.layer1 = self._make_layer(block, 1 * nf, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 2 * nf, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 4 * nf, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 8 * nf, num_blocks[3], stride=2, remove_last_relu=remove_last_relu)
self.out_dim = 8 * nf
if 'cifar' in dataset:
self.avgpool = nn.AvgPool2d(4)
elif 'imagenet' in dataset:
self.avgpool = nn.AvgPool2d(7)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# ---------------------------------------------
# if zero_init_residual:
# for m in self.modules():
# if isinstance(m, PreActBlock):
# nn.init.constant_(m.bn2.weight, 0)
# elif isinstance(m, PreActBottleneck):
# nn.init.constant_(m.bn3.weight, 0)
# ---------------------------------------------
def _make_layer(self, block, planes, num_blocks, stride, remove_last_relu=False):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
if remove_last_relu:
for i in range(len(strides) - 1):
layers.append(block(self.in_planes, planes, strides[i]))
self.in_planes = planes * block.expansion
layers.append(block(self.in_planes, planes, strides[-1], remove_last_relu=True))
self.in_planes = planes * block.expansion
else:
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
return out
def PreActResNet18(**kwargs):
return PreActResNet(PreActBlock, [2, 2, 2, 2], **kwargs)
def PreActResNet34(**kwargs):
return PreActResNet(PreActBlock, [3, 4, 6, 3], **kwargs)
def PreActResNet50(**kwargs):
return PreActResNet(PreActBottleneck, [3, 4, 6, 3], **kwargs)
def PreActResNet101(**kwargs):
return PreActResNet(PreActBottleneck, [3, 4, 23, 3], **kwargs)
def PreActResNet152(**kwargs):
return PreActResNet(PreActBottleneck, [3, 8, 36, 3], **kwargs)
| 5,859
| 37.552632
| 113
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/cifar_resnet.py
|
''' Incremental-Classifier Learning
Authors : Khurram Javed, Muhammad Talha Paracha
Maintainer : Khurram Javed
Lab : TUKL-SEECS R&D Lab
Email : 14besekjaved@seecs.edu.pk '''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
assert stride == 2
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
self.featureSize = 64
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + basicblock, inplace=True)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes, channels=3):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
self.featureSize = 64
# Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.out_dim = 64 * block.expansion
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, feature=False, T=1, labels=False, scale=None, keep=None):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def forwardFeature(self, x):
pass
def resnet20(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes)
return model
def resnet10mnist(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 10, num_classes, 1)
return model
def resnet20mnist(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes, 1)
return model
def resnet32mnist(num_classes=10, channels=1):
model = CifarResNet(ResNetBasicblock, 32, num_classes, channels)
return model
def resnet32(num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes)
return model
def resnet44(num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes)
return model
def resnet56(num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes)
return model
def resnet110(num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes)
return model
| 5,944
| 29.331633
| 102
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/modified_resnet_cifar.py
|
"""Taken & slightly modified from:
* https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.nn import functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, remove_last_relu=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.remove_last_relu = remove_last_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if not self.remove_last_relu:
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, nf=16, dataset='cifar', start_class=0, remove_last_relu=False):
super(ResNet, self).__init__()
self.inplanes = nf
self.conv1 = nn.Conv2d(3, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(nf)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 1 * nf, layers[0])
self.layer2 = self._make_layer(block, 2 * nf, layers[1], stride=2)
self.layer3 = self._make_layer(block, 4 * nf, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.out_dim = 4 * nf * block.expansion
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, remove_last_relu=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
if remove_last_relu:
for i in range(1, blocks - 1):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, remove_last_relu=True))
else:
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def reset_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.reset_running_stats()
def forward(self, x, pool=True):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def resnet20(pretrained=False, **kwargs):
n = 3
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
def resnet32(pretrained=False, **kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
| 4,519
| 32.731343
| 109
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/convnet/__init__.py
| 0
| 0
| 0
|
py
|
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/models/base.py
|
import abc
import logging
import torch
import torch.nn.functional as F
import numpy as np
from inclearn.tools.metrics import ClassErrorMeter
LOGGER = logging.Logger("IncLearn", level="INFO")
class IncrementalLearner(abc.ABC):
"""Base incremental learner.
Methods are called in this order (& repeated for each new task):
1. set_task_info
2. before_task
3. train_task
4. after_task
5. eval_task
"""
def __init__(self, *args, **kwargs):
self._increments = []
self._seen_classes = []
def set_task_info(self, task, total_n_classes, increment, n_train_data, n_test_data, n_tasks):
self._task = task
self._task_size = increment
self._increments.append(self._task_size)
self._total_n_classes = total_n_classes
self._n_train_data = n_train_data
self._n_test_data = n_test_data
self._n_tasks = n_tasks
def before_task(self, taski, inc_dataset):
LOGGER.info("Before task")
self.eval()
self._before_task(taski, inc_dataset)
def train_task(self, train_loader, val_loader):
LOGGER.info("train task")
self.train()
self._train_task(train_loader, val_loader)
def after_task(self, taski, inc_dataset):
LOGGER.info("after task")
self.eval()
self._after_task(taski, inc_dataset)
def eval_task(self, data_loader):
LOGGER.info("eval task")
self.eval()
return self._eval_task(data_loader)
def get_memory(self):
return None
def eval(self):
raise NotImplementedError
def train(self):
raise NotImplementedError
def _before_task(self, data_loader):
pass
def _train_task(self, train_loader, val_loader):
raise NotImplementedError
def _after_task(self, data_loader):
pass
def _eval_task(self, data_loader):
raise NotImplementedError
@property
def _new_task_index(self):
return self._task * self._task_size
@property
def _memory_per_class(self):
"""Returns the number of examplars per class."""
return self._memory_size.mem_per_cls
def _after_epoch(self, epoch, avg_loss, train_new_accu, train_old_accu, accu):
self._run.log_scalar(f"train_loss_trial{self._trial_i}_task{self._task}", avg_loss, epoch + 1)
self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/train_loss", avg_loss, epoch + 1)
# self._run.log_scalar(f"train_new_accu_trial{self._trial_i}_task{self._task}",
# train_new_accu.value()[0], epoch + 1)
# self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/train_new_accu",
# train_new_accu.value()[0], epoch + 1)
# if self._task != 0:
# self._run.log_scalar(f"train_old_accu_trial{self._trial_i}_task{self._task}",
# train_old_accu.value()[0], epoch + 1)
# self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/train_old_accu",
# train_old_accu.value()[0], epoch + 1)
self._run.log_scalar(f"train_accu_trial{self._trial_i}_task{self._task}", accu.value()[0], epoch + 1)
self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/train_accu", accu.value()[0], epoch + 1)
# self._tensorboard.close()
self._tensorboard.flush()
def _validation(self, val_loader, epoch):
topk = 5 if self._n_classes >= 5 else self._n_classes
if self._val_per_n_epoch != -1 and epoch % self._val_per_n_epoch == 0:
_val_loss = 0
_val_accu = ClassErrorMeter(accuracy=True, topk=[1, topk])
_val_new_accu = ClassErrorMeter(accuracy=True)
_val_old_accu = ClassErrorMeter(accuracy=True)
self._parallel_network.eval()
with torch.no_grad():
for i, (inputs, targets) in enumerate(val_loader, 1):
old_classes = targets < (self._n_classes - self._task_size)
new_classes = targets >= (self._n_classes - self._task_size)
val_loss, _ = self._forward_loss(
inputs,
targets,
old_classes,
new_classes,
accu=_val_accu,
old_accu=_val_old_accu,
new_accu=_val_new_accu,
)
_val_loss += val_loss.item()
self._ex.logger.info(
f"epoch{epoch} val acc:{_val_accu.value()[0]:.2f}, val top5acc:{_val_accu.value()[1]:.2f}")
# Test accu
self._run.log_scalar(f"test_accu_trial{self._trial_i}_task{self._task}", _val_accu.value()[0], epoch + 1)
self._run.log_scalar(f"test_5accu_trial{self._trial_i}_task{self._task}", _val_accu.value()[1], epoch + 1)
self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/test_accu",
_val_accu.value()[0], epoch + 1)
self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/test_5accu",
_val_accu.value()[1], epoch + 1)
# Test new accu
self._run.log_scalar(f"test_new_accu_trial{self._trial_i}_task{self._task}",
_val_new_accu.value()[0], epoch + 1)
self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/test_new_accu",
_val_new_accu.value()[0], epoch + 1)
# Test old accu
if self._task != 0:
self._run.log_scalar(f"test_old_accu_trial{self._trial_i}_task{self._task}",
_val_old_accu.value()[0], epoch + 1)
self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/test_old_accu",
_val_old_accu.value()[0], epoch + 1)
# Test loss
self._run.log_scalar(f"test_loss_trial{self._trial_i}_task{self._task}", round(_val_loss / i, 3), epoch + 1)
self._tensorboard.add_scalar(f"trial{self._trial_i}_task{self._task}/test_loss", round(_val_loss / i, 3),
epoch + 1)
self._tensorboard.close()
| 6,449
| 40.883117
| 120
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/models/align.py
|
import numpy as np
import random
import time
import math
import os
from copy import deepcopy
from scipy.spatial.distance import cdist
import torch
from torch.nn import DataParallel
from torch.nn import functional as F
from inclearn.convnet import network
from inclearn.models.base import IncrementalLearner
from inclearn.tools import factory, utils
from inclearn.tools.metrics import ClassErrorMeter
from inclearn.tools.memory import MemorySize
from inclearn.tools.scheduler import GradualWarmupScheduler
from inclearn.convnet.utils import extract_features, update_classes_mean
EPSILON = 1e-8
class Weight_Align(IncrementalLearner):
def __init__(self, cfg, trial_i, _run, ex, tensorboard, inc_dataset):
super().__init__()
self._cfg = cfg
self._device = cfg['device']
self._ex = ex
self._run = _run # the sacred _run object.
self._inc_dataset = inc_dataset
self._n_classes = 0
self._trial_i = trial_i # the No. of current run.
self._opt_name = cfg["optimizer"]
self._warmup = cfg['warmup']
self._lr = cfg["lr"]
self._weight_decay = cfg["weight_decay"]
self._n_epochs = cfg["epochs"]
self._scheduling = cfg["scheduling"]
self._lr_decay = cfg["lr_decay"]
self._tensorboard = tensorboard
if f"trial{self._trial_i}" not in self._run.info:
self._run.info[f"trial{self._trial_i}"] = {}
self._val_per_n_epoch = cfg["val_per_n_epoch"]
self._network = network.BasicNet(
cfg["convnet"],
cfg=cfg,
nf=cfg["channel"],
device=self._device,
use_bias=cfg["use_bias"],
dataset=cfg["dataset"],
)
self._parallel_network = DataParallel(self._network)
self._train_head = cfg["train_head"]
self._infer_head = cfg["infer_head"]
self._old_model = None
self._temperature = cfg["temperature"]
self._distillation = cfg["distillation"]
# Memory
self._memory_size = MemorySize(cfg["mem_size_mode"], inc_dataset, cfg["memory_size"],
cfg["fixed_memory_per_cls"])
self._herding_matrix = []
self._coreset_strategy = cfg["coreset_strategy"]
if self._cfg["save_ckpt"]:
save_path = os.path.join(os.getcwd(), "ckpts")
if not os.path.exists(save_path):
os.mkdir(save_path)
if self._cfg["save_mem"]:
save_path = os.path.join(os.getcwd(), "ckpts/mem")
if not os.path.exists(save_path):
os.mkdir(save_path)
def eval(self):
self._parallel_network.eval()
def train(self):
self._parallel_network.train()
# ----------
# Public API
# ----------
def _before_task(self, taski, inc_dataset):
self._task = taski
self._n_classes += self._task_size
self._memory_size.update_n_classes(self._n_classes)
self._memory_size.update_memory_per_cls(self._network, self._n_classes, self._task_size)
self._ex.logger.info("Now {} examplars per class.".format(self._memory_per_class))
self._network.add_classes(self._task_size)
self._network.task_size = self._task_size
self.set_optimizer()
def set_optimizer(self, lr=None):
if lr is None:
lr = self._lr
if self._cfg["dynamic_weight_decay"]:
# used in BiC official implementation
weight_decay = self._weight_decay * self._cfg["task_max"] / (self._task + 1)
else:
weight_decay = self._weight_decay
self._ex.logger.info("Step {} weight decay {:.5f}".format(self._task, weight_decay))
self._optimizer = factory.get_optimizer(filter(lambda p: p.requires_grad, self._network.parameters()),
self._opt_name, lr, weight_decay)
if "cos" in self._cfg["scheduler"]:
self._scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self._optimizer, self._n_epochs)
else:
self._scheduler = torch.optim.lr_scheduler.MultiStepLR(self._optimizer,
self._scheduling,
gamma=self._lr_decay)
if self._warmup:
print("warmup")
self._warmup_scheduler = GradualWarmupScheduler(self._optimizer,
multiplier=1,
total_epoch=self._cfg['warmup_epochs'],
after_scheduler=self._scheduler)
def _train_task(self, train_loader, val_loader):
self._ex.logger.info(f"nb {len(train_loader.dataset)}")
topk = 5 if self._n_classes > 5 else self._task_size
accu = ClassErrorMeter(accuracy=True, topk=[1, topk])
train_new_accu = ClassErrorMeter(accuracy=True)
train_old_accu = ClassErrorMeter(accuracy=True)
utils.display_weight_norm(self._ex.logger, self._parallel_network, self._increments, "Initial trainset")
utils.display_feature_norm(self._ex.logger, self._parallel_network, train_loader, self._n_classes,
self._increments, "Initial trainset")
self._optimizer.zero_grad()
self._optimizer.step()
for epoch in range(self._n_epochs):
_loss = 0.0
accu.reset()
train_new_accu.reset()
train_old_accu.reset()
if self._warmup:
self._warmup_scheduler.step()
if epoch == self._cfg['warmup_epochs']:
self._network.classifier.reset_parameters()
for i, (inputs, targets) in enumerate(train_loader, start=1):
self.train()
self._optimizer.zero_grad()
old_classes = targets < (self._n_classes - self._task_size)
new_classes = targets >= (self._n_classes - self._task_size)
loss = self._forward_loss(inputs, targets, old_classes, new_classes, accu=accu)
if not utils.check_loss(loss):
import pdb
pdb.set_trace()
loss.backward()
self._optimizer.step()
if self._cfg["postprocessor"]["enable"]:
if self._cfg["postprocessor"]["type"].lower() == "wa":
for p in self._network.classifier.parameters():
p.data.clamp_(0.0)
_loss += loss
_loss = _loss.item()
if not self._warmup:
self._scheduler.step()
self._ex.logger.info(
"Task {}/{}, Epoch {}/{} => Clf loss: {}, Train Accu: {}, Train@5 Acc: {}, old acc:{}".format(
self._task + 1,
self._n_tasks,
epoch + 1,
self._n_epochs,
round(_loss / i, 3),
round(accu.value()[0], 3),
round(accu.value()[1], 3),
round(train_old_accu.value()[0], 3),
))
if self._val_per_n_epoch > 0 and epoch % self._val_per_n_epoch == 0:
self.validate(val_loader)
self._inc_dataset.shared_data_inc = train_loader.dataset.share_memory
utils.display_weight_norm(self._ex.logger, self._parallel_network, self._increments, "After training")
utils.display_feature_norm(self._ex.logger, self._parallel_network, train_loader, self._n_classes,
self._increments, "Trainset")
self._run.info[f"trial{self._trial_i}"][f"task{self._task}_train_accu"] = round(accu.value()[0], 3)
def _forward_loss(self, inputs, targets, old_classes, new_classes, accu=None):
inputs, targets = inputs.to(self._device, non_blocking=True), targets.to(self._device, non_blocking=True)
logits = self._parallel_network(inputs)['logit']
if accu is not None:
accu.add(logits, targets)
return self._compute_loss(inputs, targets, logits)
def _compute_loss(self, inputs, targets, logits):
loss = F.cross_entropy(logits, targets)
if self._old_model is not None:
log_probs_new = (logits[:, :-self._task_size] / self._temperature).log_softmax(dim=1)
logits_old = self._old_model(inputs)['logit'].detach()
if self._task > 1:
logits_old = self._old_model.module.postprocessor.post_process(logits_old, self._task_size)
probs_old = (logits_old / self._temperature).softmax(dim=1)
loss_kl = F.kl_div(log_probs_new, probs_old, reduction="batchmean")
lamb = (self._n_classes - self._task_size) / self._n_classes
loss = (1 - lamb) * loss + lamb * loss_kl
return loss
def _after_task(self, taski, inc_dataset):
network = deepcopy(self._parallel_network)
network.eval()
self._ex.logger.info("save model")
if self._cfg["save_ckpt"] and taski >= self._cfg["start_task"]:
save_path = os.path.join(os.getcwd(), "ckpts")
torch.save(network.cpu().state_dict(), "{}/step{}.ckpt".format(save_path, self._task))
utils.display_weight_norm(self._ex.logger, network, self._increments, "After training")
if self._memory_size.memsize != 0:
self._ex.logger.info("build memory")
self.build_exemplars(inc_dataset, self._coreset_strategy)
self._parallel_network.eval()
if self._cfg["postprocessor"]["enable"] and self._task > 0:
self._update_postprocessor(inc_dataset)
self._old_model = deepcopy(self._parallel_network)
self._old_model.module.freeze()
del self._inc_dataset.shared_data_inc
self._inc_dataset.shared_data_inc = None
def _eval_task(self, data_loader):
if self._infer_head == "softmax":
ypred, ytrue = self._compute_accuracy_by_netout(data_loader)
else:
raise ValueError()
return ypred, ytrue
# -----------
# Private API
# -----------
def _compute_accuracy_by_netout(self, data_loader):
preds, targets = [], []
self._parallel_network.eval()
with torch.no_grad():
for i, (inputs, lbls) in enumerate(data_loader):
inputs = inputs.to(self._device, non_blocking=True)
_preds = self._network(inputs)['logit']
if self._cfg["postprocessor"]["enable"] and self._task > 0:
_preds = self._network.postprocessor.post_process(_preds, self._task_size)
preds.append(_preds.detach().cpu().numpy())
targets.append(lbls.long().cpu().numpy())
preds = np.concatenate(preds, axis=0)
targets = np.concatenate(targets, axis=0)
return preds, targets
def update_prototype(self):
self._class_means = update_classes_mean(self._parallel_network,
self._inc_dataset,
self._n_classes,
self._task_size,
share_memory=self._inc_dataset.shared_data_inc,
metric="none")
def _update_postprocessor(self, inc_dataset):
if self._cfg["postprocessor"]["type"].lower() == "bic":
bic_loader = inc_dataset._get_loader(inc_dataset.data_inc, inc_dataset.targets_inc, mode="balanced_train")
bic_loss = None
self._network.postprocessor.reset(n_classes=self._n_classes)
self._network.postprocessor.update(self._ex.logger,
self._task_size,
self._parallel_network,
bic_loader,
loss_criterion=bic_loss)
elif self._cfg["postprocessor"]["type"].lower() == "wa":
self._ex.logger.info("Post processor wa update !")
self._network.postprocessor.update(self._network.classifier, self._task_size)
def build_exemplars(self, inc_dataset, coreset_strategy):
save_path = os.path.join(os.getcwd(), f"ckpts/mem/mem_step{self._task}.ckpt")
if self._cfg["load_mem"] and os.path.exists(save_path):
memory_states = torch.load(save_path)
self._inc_dataset.data_memory = memory_states['x']
self._inc_dataset.targets_memory = memory_states['y']
self._herding_matrix = memory_states['herding']
self._ex.logger.info(f"Load saved step{self._task} memory!")
return
if coreset_strategy == "random":
from inclearn.tools.memory import random_selection
self._inc_dataset.data_memory, self._inc_dataset.targets_memory = random_selection(
self._n_classes,
self._task_size,
self._parallel_network,
self._ex.logger,
inc_dataset,
self._memory_per_class,
)
elif coreset_strategy == "iCaRL":
from inclearn.tools.memory import herding
data_inc = self._inc_dataset.shared_data_inc if self._inc_dataset.shared_data_inc is not None else self._inc_dataset.data_inc
self._inc_dataset.data_memory, self._inc_dataset.targets_memory, self._herding_matrix = herding(
self._n_classes,
self._task_size,
self._parallel_network,
self._herding_matrix,
inc_dataset,
data_inc,
self._memory_per_class,
self._ex.logger,
)
else:
raise ValueError()
def validate(self, data_loader):
if self._infer_head == 'NCM':
self.update_prototype()
ypred, ytrue = self._eval_task(data_loader)
test_acc_stats = utils.compute_accuracy(ypred, ytrue, increments=self._increments, n_classes=self._n_classes)
self._ex.logger.info(f"test top1acc:{test_acc_stats['top1']}")
| 14,420
| 42.436747
| 137
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/models/__init__.py
|
from .incmodel import IncModel
from .align import Weight_Align
from .bic import BiC
| 84
| 20.25
| 31
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/models/incmodel.py
|
import numpy as np
import random
import time
import math
import os
from copy import deepcopy
from scipy.spatial.distance import cdist
import torch
from torch.nn import DataParallel
from torch.nn import functional as F
from inclearn.convnet import network
from inclearn.models.base import IncrementalLearner
from inclearn.tools import factory, utils
from inclearn.tools.metrics import ClassErrorMeter
from inclearn.tools.memory import MemorySize
from inclearn.tools.scheduler import GradualWarmupScheduler
from inclearn.convnet.utils import extract_features, update_classes_mean, finetune_last_layer
# Constants
EPSILON = 1e-8
class IncModel(IncrementalLearner):
def __init__(self, cfg, trial_i, _run, ex, tensorboard, inc_dataset):
super().__init__()
self._cfg = cfg
self._device = cfg['device']
self._ex = ex
self._run = _run # the sacred _run object.
# Data
self._inc_dataset = inc_dataset
self._n_classes = 0
self._trial_i = trial_i # which class order is used
# Optimizer paras
self._opt_name = cfg["optimizer"]
self._warmup = cfg['warmup']
self._lr = cfg["lr"]
self._weight_decay = cfg["weight_decay"]
self._n_epochs = cfg["epochs"]
self._scheduling = cfg["scheduling"]
self._lr_decay = cfg["lr_decay"]
# Classifier Learning Stage
self._decouple = cfg["decouple"]
# Logging
self._tensorboard = tensorboard
if f"trial{self._trial_i}" not in self._run.info:
self._run.info[f"trial{self._trial_i}"] = {}
self._val_per_n_epoch = cfg["val_per_n_epoch"]
# Model
self._der = cfg['der'] # Whether to expand the representation
self._network = network.BasicNet(
cfg["convnet"],
cfg=cfg,
nf=cfg["channel"],
device=self._device,
use_bias=cfg["use_bias"],
dataset=cfg["dataset"],
)
self._parallel_network = DataParallel(self._network)
self._train_head = cfg["train_head"]
self._infer_head = cfg["infer_head"]
self._old_model = None
# Learning
self._temperature = cfg["temperature"]
self._distillation = cfg["distillation"]
# Memory
self._memory_size = MemorySize(cfg["mem_size_mode"], inc_dataset, cfg["memory_size"],
cfg["fixed_memory_per_cls"])
self._herding_matrix = []
self._coreset_strategy = cfg["coreset_strategy"]
if self._cfg["save_ckpt"]:
save_path = os.path.join(os.getcwd(), "ckpts")
if not os.path.exists(save_path):
os.mkdir(save_path)
if self._cfg["save_mem"]:
save_path = os.path.join(os.getcwd(), "ckpts/mem")
if not os.path.exists(save_path):
os.mkdir(save_path)
def eval(self):
self._parallel_network.eval()
def train(self):
if self._der:
self._parallel_network.train()
self._parallel_network.module.convnets[-1].train()
if self._task >= 1:
for i in range(self._task):
self._parallel_network.module.convnets[i].eval()
else:
self._parallel_network.train()
def _before_task(self, taski, inc_dataset):
self._ex.logger.info(f"Begin step {taski}")
# Update Task info
self._task = taski
self._n_classes += self._task_size
# Memory
self._memory_size.update_n_classes(self._n_classes)
self._memory_size.update_memory_per_cls(self._network, self._n_classes, self._task_size)
self._ex.logger.info("Now {} examplars per class.".format(self._memory_per_class))
self._network.add_classes(self._task_size)
self._network.task_size = self._task_size
self.set_optimizer()
def set_optimizer(self, lr=None):
if lr is None:
lr = self._lr
if self._cfg["dynamic_weight_decay"]:
# used in BiC official implementation
weight_decay = self._weight_decay * self._cfg["task_max"] / (self._task + 1)
else:
weight_decay = self._weight_decay
self._ex.logger.info("Step {} weight decay {:.5f}".format(self._task, weight_decay))
if self._der and self._task > 0:
for i in range(self._task):
for p in self._parallel_network.module.convnets[i].parameters():
p.requires_grad = False
self._optimizer = factory.get_optimizer(filter(lambda p: p.requires_grad, self._network.parameters()),
self._opt_name, lr, weight_decay)
if "cos" in self._cfg["scheduler"]:
self._scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self._optimizer, self._n_epochs)
else:
self._scheduler = torch.optim.lr_scheduler.MultiStepLR(self._optimizer,
self._scheduling,
gamma=self._lr_decay)
if self._warmup:
print("warmup")
self._warmup_scheduler = GradualWarmupScheduler(self._optimizer,
multiplier=1,
total_epoch=self._cfg['warmup_epochs'],
after_scheduler=self._scheduler)
def _train_task(self, train_loader, val_loader):
self._ex.logger.info(f"nb {len(train_loader.dataset)}")
topk = 5 if self._n_classes > 5 else self._task_size
accu = ClassErrorMeter(accuracy=True, topk=[1, topk])
train_new_accu = ClassErrorMeter(accuracy=True)
train_old_accu = ClassErrorMeter(accuracy=True)
utils.display_weight_norm(self._ex.logger, self._parallel_network, self._increments, "Initial trainset")
utils.display_feature_norm(self._ex.logger, self._parallel_network, train_loader, self._n_classes,
self._increments, "Initial trainset")
self._optimizer.zero_grad()
self._optimizer.step()
for epoch in range(self._n_epochs):
_loss, _loss_aux = 0.0, 0.0
accu.reset()
train_new_accu.reset()
train_old_accu.reset()
if self._warmup:
self._warmup_scheduler.step()
if epoch == self._cfg['warmup_epochs']:
self._network.classifier.reset_parameters()
if self._cfg['use_aux_cls']:
self._network.aux_classifier.reset_parameters()
for i, (inputs, targets) in enumerate(train_loader, start=1):
self.train()
self._optimizer.zero_grad()
old_classes = targets < (self._n_classes - self._task_size)
new_classes = targets >= (self._n_classes - self._task_size)
loss_ce, loss_aux = self._forward_loss(
inputs,
targets,
old_classes,
new_classes,
accu=accu,
new_accu=train_new_accu,
old_accu=train_old_accu,
)
if self._cfg["use_aux_cls"] and self._task > 0:
loss = loss_ce + loss_aux
else:
loss = loss_ce
if not utils.check_loss(loss):
import pdb
pdb.set_trace()
loss.backward()
self._optimizer.step()
if self._cfg["postprocessor"]["enable"]:
if self._cfg["postprocessor"]["type"].lower() == "wa":
for p in self._network.classifier.parameters():
p.data.clamp_(0.0)
_loss += loss_ce
_loss_aux += loss_aux
_loss = _loss.item()
_loss_aux = _loss_aux.item()
if not self._warmup:
self._scheduler.step()
self._ex.logger.info(
"Task {}/{}, Epoch {}/{} => Clf loss: {} Aux loss: {}, Train Accu: {}, Train@5 Acc: {}, old acc:{}".
format(
self._task + 1,
self._n_tasks,
epoch + 1,
self._n_epochs,
round(_loss / i, 3),
round(_loss_aux / i, 3),
round(accu.value()[0], 3),
round(accu.value()[1], 3),
round(train_old_accu.value()[0], 3),
))
if self._val_per_n_epoch > 0 and epoch % self._val_per_n_epoch == 0:
self.validate(val_loader)
# For the large-scale dataset, we manage the data in the shared memory.
self._inc_dataset.shared_data_inc = train_loader.dataset.share_memory
utils.display_weight_norm(self._ex.logger, self._parallel_network, self._increments, "After training")
utils.display_feature_norm(self._ex.logger, self._parallel_network, train_loader, self._n_classes,
self._increments, "Trainset")
self._run.info[f"trial{self._trial_i}"][f"task{self._task}_train_accu"] = round(accu.value()[0], 3)
def _forward_loss(self, inputs, targets, old_classes, new_classes, accu=None, new_accu=None, old_accu=None):
inputs, targets = inputs.to(self._device, non_blocking=True), targets.to(self._device, non_blocking=True)
outputs = self._parallel_network(inputs)
if accu is not None:
accu.add(outputs['logit'], targets)
# accu.add(logits.detach(), targets.cpu().numpy())
# if new_accu is not None:
# new_accu.add(logits[new_classes].detach(), targets[new_classes].cpu().numpy())
# if old_accu is not None:
# old_accu.add(logits[old_classes].detach(), targets[old_classes].cpu().numpy())
return self._compute_loss(inputs, targets, outputs, old_classes, new_classes)
def _compute_loss(self, inputs, targets, outputs, old_classes, new_classes):
loss = F.cross_entropy(outputs['logit'], targets)
if outputs['aux_logit'] is not None:
aux_targets = targets.clone()
if self._cfg["aux_n+1"]:
aux_targets[old_classes] = 0
aux_targets[new_classes] -= sum(self._inc_dataset.increments[:self._task]) - 1
aux_loss = F.cross_entropy(outputs['aux_logit'], aux_targets)
else:
aux_loss = torch.zeros([1]).cuda()
return loss, aux_loss
def _after_task(self, taski, inc_dataset):
network = deepcopy(self._parallel_network)
network.eval()
self._ex.logger.info("save model")
if self._cfg["save_ckpt"] and taski >= self._cfg["start_task"]:
save_path = os.path.join(os.getcwd(), "ckpts")
torch.save(network.cpu().state_dict(), "{}/step{}.ckpt".format(save_path, self._task))
if (self._cfg["decouple"]['enable'] and taski > 0):
if self._cfg["decouple"]["fullset"]:
train_loader = inc_dataset._get_loader(inc_dataset.data_inc, inc_dataset.targets_inc, mode="train")
else:
train_loader = inc_dataset._get_loader(inc_dataset.data_inc,
inc_dataset.targets_inc,
mode="balanced_train")
# finetuning
self._parallel_network.module.classifier.reset_parameters()
finetune_last_layer(self._ex.logger,
self._parallel_network,
train_loader,
self._n_classes,
nepoch=self._decouple["epochs"],
lr=self._decouple["lr"],
scheduling=self._decouple["scheduling"],
lr_decay=self._decouple["lr_decay"],
weight_decay=self._decouple["weight_decay"],
loss_type="ce",
temperature=self._decouple["temperature"])
network = deepcopy(self._parallel_network)
if self._cfg["save_ckpt"]:
save_path = os.path.join(os.getcwd(), "ckpts")
torch.save(network.cpu().state_dict(), "{}/decouple_step{}.ckpt".format(save_path, self._task))
if self._cfg["postprocessor"]["enable"]:
self._update_postprocessor(inc_dataset)
if self._cfg["infer_head"] == 'NCM':
self._ex.logger.info("compute prototype")
self.update_prototype()
if self._memory_size.memsize != 0:
self._ex.logger.info("build memory")
self.build_exemplars(inc_dataset, self._coreset_strategy)
if self._cfg["save_mem"]:
save_path = os.path.join(os.getcwd(), "ckpts/mem")
memory = {
'x': inc_dataset.data_memory,
'y': inc_dataset.targets_memory,
'herding': self._herding_matrix
}
if not os.path.exists(save_path):
os.makedirs(save_path)
if not (os.path.exists(f"{save_path}/mem_step{self._task}.ckpt") and self._cfg['load_mem']):
torch.save(memory, "{}/mem_step{}.ckpt".format(save_path, self._task))
self._ex.logger.info(f"Save step{self._task} memory!")
self._parallel_network.eval()
self._old_model = deepcopy(self._parallel_network)
self._old_model.module.freeze()
del self._inc_dataset.shared_data_inc
self._inc_dataset.shared_data_inc = None
def _eval_task(self, data_loader):
if self._infer_head == "softmax":
ypred, ytrue = self._compute_accuracy_by_netout(data_loader)
elif self._infer_head == "NCM":
ypred, ytrue = self._compute_accuracy_by_ncm(data_loader)
else:
raise ValueError()
return ypred, ytrue
def _compute_accuracy_by_netout(self, data_loader):
preds, targets = [], []
self._parallel_network.eval()
with torch.no_grad():
for i, (inputs, lbls) in enumerate(data_loader):
inputs = inputs.to(self._device, non_blocking=True)
_preds = self._parallel_network(inputs)['logit']
if self._cfg["postprocessor"]["enable"] and self._task > 0:
_preds = self._network.postprocessor.post_process(_preds, self._task_size)
preds.append(_preds.detach().cpu().numpy())
targets.append(lbls.long().cpu().numpy())
preds = np.concatenate(preds, axis=0)
targets = np.concatenate(targets, axis=0)
return preds, targets
def _compute_accuracy_by_ncm(self, loader):
features, targets_ = extract_features(self._parallel_network, loader)
targets = np.zeros((targets_.shape[0], self._n_classes), np.float32)
targets[range(len(targets_)), targets_.astype("int32")] = 1.0
class_means = (self._class_means.T / (np.linalg.norm(self._class_means.T, axis=0) + EPSILON)).T
features = (features.T / (np.linalg.norm(features.T, axis=0) + EPSILON)).T
# Compute score for iCaRL
sqd = cdist(class_means, features, "sqeuclidean")
score_icarl = (-sqd).T
return score_icarl[:, :self._n_classes], targets_
def _update_postprocessor(self, inc_dataset):
if self._cfg["postprocessor"]["type"].lower() == "bic":
if self._cfg["postprocessor"]["disalign_resample"] is True:
bic_loader = inc_dataset._get_loader(inc_dataset.data_inc,
inc_dataset.targets_inc,
mode="train",
resample='disalign_resample')
else:
xdata, ydata = inc_dataset._select(inc_dataset.data_train,
inc_dataset.targets_train,
low_range=0,
high_range=self._n_classes)
bic_loader = inc_dataset._get_loader(xdata, ydata, shuffle=True, mode='train')
bic_loss = None
self._network.postprocessor.reset(n_classes=self._n_classes)
self._network.postprocessor.update(self._ex.logger,
self._task_size,
self._parallel_network,
bic_loader,
loss_criterion=bic_loss)
elif self._cfg["postprocessor"]["type"].lower() == "wa":
self._ex.logger.info("Post processor wa update !")
self._network.postprocessor.update(self._network.classifier, self._task_size)
def update_prototype(self):
if hasattr(self._inc_dataset, 'shared_data_inc'):
shared_data_inc = self._inc_dataset.shared_data_inc
else:
shared_data_inc = None
self._class_means = update_classes_mean(self._parallel_network,
self._inc_dataset,
self._n_classes,
self._task_size,
share_memory=self._inc_dataset.shared_data_inc,
metric='None')
def build_exemplars(self, inc_dataset, coreset_strategy):
save_path = os.path.join(os.getcwd(), f"ckpts/mem/mem_step{self._task}.ckpt")
if self._cfg["load_mem"] and os.path.exists(save_path):
memory_states = torch.load(save_path)
self._inc_dataset.data_memory = memory_states['x']
self._inc_dataset.targets_memory = memory_states['y']
self._herding_matrix = memory_states['herding']
self._ex.logger.info(f"Load saved step{self._task} memory!")
return
if coreset_strategy == "random":
from inclearn.tools.memory import random_selection
self._inc_dataset.data_memory, self._inc_dataset.targets_memory = random_selection(
self._n_classes,
self._task_size,
self._parallel_network,
self._ex.logger,
inc_dataset,
self._memory_per_class,
)
elif coreset_strategy == "iCaRL":
from inclearn.tools.memory import herding
data_inc = self._inc_dataset.shared_data_inc if self._inc_dataset.shared_data_inc is not None else self._inc_dataset.data_inc
self._inc_dataset.data_memory, self._inc_dataset.targets_memory, self._herding_matrix = herding(
self._n_classes,
self._task_size,
self._parallel_network,
self._herding_matrix,
inc_dataset,
data_inc,
self._memory_per_class,
self._ex.logger,
)
else:
raise ValueError()
def validate(self, data_loader):
if self._infer_head == 'NCM':
self.update_prototype()
ypred, ytrue = self._eval_task(data_loader)
test_acc_stats = utils.compute_accuracy(ypred, ytrue, increments=self._increments, n_classes=self._n_classes)
self._ex.logger.info(f"test top1acc:{test_acc_stats['top1']}")
| 19,955
| 43.445434
| 137
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/datasets/dataset.py
|
import os.path as osp
import numpy as np
import glob
import albumentations as A
from albumentations.pytorch import ToTensorV2
from torchvision import datasets, transforms
import torch
def get_datasets(dataset_names):
return [get_dataset(dataset_name) for dataset_name in dataset_names.split("-")]
def get_dataset(dataset_name):
if dataset_name == "cifar10":
return iCIFAR10
elif dataset_name == "cifar100":
return iCIFAR100
elif "imagenet100" in dataset_name:
return iImageNet100
elif dataset_name == "imagenet":
return iImageNet
else:
raise NotImplementedError("Unknown dataset {}.".format(dataset_name))
class DataHandler:
base_dataset = None
train_transforms = []
common_transforms = [ToTensorV2()]
class_order = None
class iCIFAR10(DataHandler):
base_dataset_cls = datasets.cifar.CIFAR10
transform_type = 'torchvision'
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
def __init__(self, data_folder, train, is_fine_label=False):
self.base_dataset = self.base_dataset_cls(data_folder, train=train, download=True)
self.data = self.base_dataset.data
self.targets = self.base_dataset.targets
self.n_cls = 10
@property
def is_proc_inc_data(self):
return False
@classmethod
def class_order(cls, trial_i):
return [4, 0, 2, 5, 8, 3, 1, 6, 9, 7]
class iCIFAR100(iCIFAR10):
label_list = [
'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy',
'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock',
'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish',
'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion',
'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange',
'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine',
'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk',
'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank',
'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale',
'willow_tree', 'wolf', 'woman', 'worm'
]
base_dataset_cls = datasets.cifar.CIFAR100
transform_type = 'torchvision'
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
def __init__(self, data_folder, train, is_fine_label=False):
self.base_dataset = self.base_dataset_cls(data_folder, train=train, download=True)
self.data = self.base_dataset.data
self.targets = self.base_dataset.targets
self.n_cls = 100
self.transform_type = 'torchvision'
@property
def is_proc_inc_data(self):
return False
@classmethod
def class_order(cls, trial_i):
if trial_i == 0:
return [
62, 54, 84, 20, 94, 22, 40, 29, 78, 27, 26, 79, 17, 76, 68, 88, 3, 19, 31, 21, 33, 60, 24, 14, 6, 10,
16, 82, 70, 92, 25, 5, 28, 9, 61, 36, 50, 90, 8, 48, 47, 56, 11, 98, 35, 93, 44, 64, 75, 66, 15, 38, 97,
42, 43, 12, 37, 55, 72, 95, 18, 7, 23, 71, 49, 53, 57, 86, 39, 87, 34, 63, 81, 89, 69, 46, 2, 1, 73, 32,
67, 91, 0, 51, 83, 13, 58, 80, 74, 65, 4, 30, 45, 77, 99, 85, 41, 96, 59, 52
]
elif trial_i == 1:
return [
68, 56, 78, 8, 23, 84, 90, 65, 74, 76, 40, 89, 3, 92, 55, 9, 26, 80, 43, 38, 58, 70, 77, 1, 85, 19, 17,
50, 28, 53, 13, 81, 45, 82, 6, 59, 83, 16, 15, 44, 91, 41, 72, 60, 79, 52, 20, 10, 31, 54, 37, 95, 14,
71, 96, 98, 97, 2, 64, 66, 42, 22, 35, 86, 24, 34, 87, 21, 99, 0, 88, 27, 18, 94, 11, 12, 47, 25, 30,
46, 62, 69, 36, 61, 7, 63, 75, 5, 32, 4, 51, 48, 73, 93, 39, 67, 29, 49, 57, 33
]
elif trial_i == 2: #PODNet
return [
87, 0, 52, 58, 44, 91, 68, 97, 51, 15, 94, 92, 10, 72, 49, 78, 61, 14, 8, 86, 84, 96, 18, 24, 32, 45,
88, 11, 4, 67, 69, 66, 77, 47, 79, 93, 29, 50, 57, 83, 17, 81, 41, 12, 37, 59, 25, 20, 80, 73, 1, 28, 6,
46, 62, 82, 53, 9, 31, 75, 38, 63, 33, 74, 27, 22, 36, 3, 16, 21, 60, 19, 70, 90, 89, 43, 5, 42, 65, 76,
40, 30, 23, 85, 2, 95, 56, 48, 71, 64, 98, 13, 99, 7, 34, 55, 54, 26, 35, 39
]
elif trial_i == 3: #PODNet
return [
58, 30, 93, 69, 21, 77, 3, 78, 12, 71, 65, 40, 16, 49, 89, 46, 24, 66, 19, 41, 5, 29, 15, 73, 11, 70,
90, 63, 67, 25, 59, 72, 80, 94, 54, 33, 18, 96, 2, 10, 43, 9, 57, 81, 76, 50, 32, 6, 37, 7, 68, 91, 88,
95, 85, 4, 60, 36, 22, 27, 39, 42, 34, 51, 55, 28, 53, 48, 38, 17, 83, 86, 56, 35, 45, 79, 99, 84, 97,
82, 98, 26, 47, 44, 62, 13, 31, 0, 75, 14, 52, 74, 8, 20, 1, 92, 87, 23, 64, 61
]
elif trial_i == 4: #PODNet
return [
71, 54, 45, 32, 4, 8, 48, 66, 1, 91, 28, 82, 29, 22, 80, 27, 86, 23, 37, 47, 55, 9, 14, 68, 25, 96, 36,
90, 58, 21, 57, 81, 12, 26, 16, 89, 79, 49, 31, 38, 46, 20, 92, 88, 40, 39, 98, 94, 19, 95, 72, 24, 64,
18, 60, 50, 63, 61, 83, 76, 69, 35, 0, 52, 7, 65, 42, 73, 74, 30, 41, 3, 6, 53, 13, 56, 70, 77, 34, 97,
75, 2, 17, 93, 33, 84, 99, 51, 62, 87, 5, 15, 10, 78, 67, 44, 59, 85, 43, 11
]
class DataHandler:
base_dataset = None
train_transforms = []
common_transforms = [ToTensorV2()]
class_order = None
class iImageNet(DataHandler):
base_dataset_cls = datasets.ImageFolder
transform_type = 'albumentations'
if transform_type == 'albumentations':
train_transforms = A.Compose([
A.RandomResizedCrop(224, 224),
A.HorizontalFlip(),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2()
])
test_transforms = A.Compose([
A.Resize(256, 256),
A.CenterCrop(224, 224),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2()
])
else:
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(brightness=63 / 255),
])
test_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __init__(self, data_folder, train, is_fine_label=False):
if train is True:
self.base_dataset = self.base_dataset_cls(osp.join(data_folder, "train"))
else:
self.base_dataset = self.base_dataset_cls(osp.join(data_folder, "val"))
self.data, self.targets = zip(*self.base_dataset.samples)
self.data = np.array(self.data)
self.targets = np.array(self.targets)
self.n_cls = 1000
@property
def is_proc_inc_data(self):
return False
@classmethod
def class_order(cls, trial_i):
return [
54, 7, 894, 512, 126, 337, 988, 11, 284, 493, 133, 783, 192, 979, 622, 215, 240, 548, 238, 419, 274, 108,
928, 856, 494, 836, 473, 650, 85, 262, 508, 590, 390, 174, 637, 288, 658, 219, 912, 142, 852, 160, 704, 289,
123, 323, 600, 542, 999, 634, 391, 761, 490, 842, 127, 850, 665, 990, 597, 722, 748, 14, 77, 437, 394, 859,
279, 539, 75, 466, 886, 312, 303, 62, 966, 413, 959, 782, 509, 400, 471, 632, 275, 730, 105, 523, 224, 186,
478, 507, 470, 906, 699, 989, 324, 812, 260, 911, 446, 44, 765, 759, 67, 36, 5, 30, 184, 797, 159, 741, 954,
465, 533, 585, 150, 101, 897, 363, 818, 620, 824, 154, 956, 176, 588, 986, 172, 223, 461, 94, 141, 621, 659,
360, 136, 578, 163, 427, 70, 226, 925, 596, 336, 412, 731, 755, 381, 810, 69, 898, 310, 120, 752, 93, 39,
326, 537, 905, 448, 347, 51, 615, 601, 229, 947, 348, 220, 949, 972, 73, 913, 522, 193, 753, 921, 257, 957,
691, 155, 820, 584, 948, 92, 582, 89, 379, 392, 64, 904, 169, 216, 694, 103, 410, 374, 515, 484, 624, 409,
156, 455, 846, 344, 371, 468, 844, 276, 740, 562, 503, 831, 516, 663, 630, 763, 456, 179, 996, 936, 248,
333, 941, 63, 738, 802, 372, 828, 74, 540, 299, 750, 335, 177, 822, 643, 593, 800, 459, 580, 933, 306, 378,
76, 227, 426, 403, 322, 321, 808, 393, 27, 200, 764, 651, 244, 479, 3, 415, 23, 964, 671, 195, 569, 917,
611, 644, 707, 355, 855, 8, 534, 657, 571, 811, 681, 543, 313, 129, 978, 592, 573, 128, 243, 520, 887, 892,
696, 26, 551, 168, 71, 398, 778, 529, 526, 792, 868, 266, 443, 24, 57, 15, 871, 678, 745, 845, 208, 188,
674, 175, 406, 421, 833, 106, 994, 815, 581, 676, 49, 619, 217, 631, 934, 932, 568, 353, 863, 827, 425, 420,
99, 823, 113, 974, 438, 874, 343, 118, 340, 472, 552, 937, 0, 10, 675, 316, 879, 561, 387, 726, 255, 407,
56, 927, 655, 809, 839, 640, 297, 34, 497, 210, 606, 971, 589, 138, 263, 587, 993, 973, 382, 572, 735, 535,
139, 524, 314, 463, 895, 376, 939, 157, 858, 457, 935, 183, 114, 903, 767, 666, 22, 525, 902, 233, 250, 825,
79, 843, 221, 214, 205, 166, 431, 860, 292, 976, 739, 899, 475, 242, 961, 531, 110, 769, 55, 701, 532, 586,
729, 253, 486, 787, 774, 165, 627, 32, 291, 962, 922, 222, 705, 454, 356, 445, 746, 776, 404, 950, 241, 452,
245, 487, 706, 2, 137, 6, 98, 647, 50, 91, 202, 556, 38, 68, 649, 258, 345, 361, 464, 514, 958, 504, 826,
668, 880, 28, 920, 918, 339, 315, 320, 768, 201, 733, 575, 781, 864, 617, 171, 795, 132, 145, 368, 147, 327,
713, 688, 848, 690, 975, 354, 853, 148, 648, 300, 436, 780, 693, 682, 246, 449, 492, 162, 97, 59, 357, 198,
519, 90, 236, 375, 359, 230, 476, 784, 117, 940, 396, 849, 102, 122, 282, 181, 130, 467, 88, 271, 793, 151,
847, 914, 42, 834, 521, 121, 29, 806, 607, 510, 837, 301, 669, 78, 256, 474, 840, 52, 505, 547, 641, 987,
801, 629, 491, 605, 112, 429, 401, 742, 528, 87, 442, 910, 638, 785, 264, 711, 369, 428, 805, 744, 380, 725,
480, 318, 997, 153, 384, 252, 985, 538, 654, 388, 100, 432, 832, 565, 908, 367, 591, 294, 272, 231, 213,
196, 743, 817, 433, 328, 970, 969, 4, 613, 182, 685, 724, 915, 311, 931, 865, 86, 119, 203, 268, 718, 317,
926, 269, 161, 209, 807, 645, 513, 261, 518, 305, 758, 872, 58, 65, 146, 395, 481, 747, 41, 283, 204, 564,
185, 777, 33, 500, 609, 286, 567, 80, 228, 683, 757, 942, 134, 673, 616, 960, 450, 350, 544, 830, 736, 170,
679, 838, 819, 485, 430, 190, 566, 511, 482, 232, 527, 411, 560, 281, 342, 614, 662, 47, 771, 861, 692, 686,
277, 373, 16, 946, 265, 35, 9, 884, 909, 610, 358, 18, 737, 977, 677, 803, 595, 135, 458, 12, 46, 418, 599,
187, 107, 992, 770, 298, 104, 351, 893, 698, 929, 502, 273, 20, 96, 791, 636, 708, 267, 867, 772, 604, 618,
346, 330, 554, 816, 664, 716, 189, 31, 721, 712, 397, 43, 943, 804, 296, 109, 576, 869, 955, 17, 506, 963,
786, 720, 628, 779, 982, 633, 891, 734, 980, 386, 365, 794, 325, 841, 878, 370, 695, 293, 951, 66, 594, 717,
116, 488, 796, 983, 646, 499, 53, 1, 603, 45, 424, 875, 254, 237, 199, 414, 307, 362, 557, 866, 341, 19,
965, 143, 555, 687, 235, 790, 125, 173, 364, 882, 727, 728, 563, 495, 21, 558, 709, 719, 877, 352, 83, 998,
991, 469, 967, 760, 498, 814, 612, 715, 290, 72, 131, 259, 441, 924, 773, 48, 625, 501, 440, 82, 684, 862,
574, 309, 408, 680, 623, 439, 180, 652, 968, 889, 334, 61, 766, 399, 598, 798, 653, 930, 149, 249, 890, 308,
881, 40, 835, 577, 422, 703, 813, 857, 995, 602, 583, 167, 670, 212, 751, 496, 608, 84, 639, 579, 178, 489,
37, 197, 789, 530, 111, 876, 570, 700, 444, 287, 366, 883, 385, 536, 460, 851, 81, 144, 60, 251, 13, 953,
270, 944, 319, 885, 710, 952, 517, 278, 656, 919, 377, 550, 207, 660, 984, 447, 553, 338, 234, 383, 749,
916, 626, 462, 788, 434, 714, 799, 821, 477, 549, 661, 206, 667, 541, 642, 689, 194, 152, 981, 938, 854,
483, 332, 280, 546, 389, 405, 545, 239, 896, 672, 923, 402, 423, 907, 888, 140, 870, 559, 756, 25, 211, 158,
723, 635, 302, 702, 453, 218, 164, 829, 247, 775, 191, 732, 115, 331, 901, 416, 873, 754, 900, 435, 762,
124, 304, 329, 349, 295, 95, 451, 285, 225, 945, 697, 417
]
class iImageNet100(DataHandler):
base_dataset_cls = datasets.ImageFolder
transform_type = 'albumentations'
if transform_type == 'albumentations':
train_transforms = A.Compose([
A.RandomResizedCrop(224, 224),
A.HorizontalFlip(),
# A.ColorJitter(brightness=63 / 255),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
ToTensorV2()
])
test_transforms = A.Compose([
A.Resize(256, 256),
A.CenterCrop(224, 224),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
ToTensorV2()
])
else:
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
test_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __init__(self, data_folder, train, is_fine_label=False):
if train is True:
self.base_dataset = self.base_dataset_cls(osp.join(data_folder, "train"))
else:
self.base_dataset = self.base_dataset_cls(osp.join(data_folder, "val"))
self.data, self.targets = zip(*self.base_dataset.samples)
self.data = np.array(self.data)
self.targets = np.array(self.targets)
self.n_cls = 100
@property
def is_proc_inc_data(self):
return False
@classmethod
def class_order(cls, trial_i):
return [
68, 56, 78, 8, 23, 84, 90, 65, 74, 76, 40, 89, 3, 92, 55, 9, 26, 80, 43, 38, 58, 70, 77, 1, 85, 19, 17, 50,
28, 53, 13, 81, 45, 82, 6, 59, 83, 16, 15, 44, 91, 41, 72, 60, 79, 52, 20, 10, 31, 54, 37, 95, 14, 71, 96,
98, 97, 2, 64, 66, 42, 22, 35, 86, 24, 34, 87, 21, 99, 0, 88, 27, 18, 94, 11, 12, 47, 25, 30, 46, 62, 69,
36, 61, 7, 63, 75, 5, 32, 4, 51, 48, 73, 93, 39, 67, 29, 49, 57, 33
]
| 16,245
| 51.918567
| 120
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/datasets/data.py
|
import random
import cv2
import numpy as np
import os.path as osp
from copy import deepcopy
from PIL import Image
import multiprocessing as mp
from multiprocessing import Pool
import albumentations as A
from albumentations.pytorch import ToTensorV2
import warnings
warnings.filterwarnings("ignore", "Corrupt EXIF data", UserWarning)
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler, WeightedRandomSampler
from torchvision import datasets, transforms
from torchvision.datasets.folder import pil_loader
from .dataset import get_dataset
from inclearn.tools.data_utils import construct_balanced_subset
def get_data_folder(data_folder, dataset_name):
return osp.join(data_folder, dataset_name)
class IncrementalDataset:
def __init__(
self,
trial_i,
dataset_name,
random_order=False,
shuffle=True,
workers=10,
batch_size=128,
seed=1,
increment=10,
validation_split=0.0,
resampling=False,
data_folder="./data",
start_class=0,
):
# The info about incremental split
self.trial_i = trial_i
self.start_class = start_class
#the number of classes for each step in incremental stage
self.task_size = increment
self.increments = []
self.random_order = random_order
self.validation_split = validation_split
#-------------------------------------
#Dataset Info
#-------------------------------------
self.data_folder = get_data_folder(data_folder, dataset_name)
self.dataset_name = dataset_name
self.train_dataset = None
self.test_dataset = None
self.n_tot_cls = -1
datasets = get_dataset(dataset_name)
self._setup_data(datasets)
self._workers = workers
self._shuffle = shuffle
self._batch_size = batch_size
self._resampling = resampling
#Currently, don't support multiple datasets
self.train_transforms = datasets.train_transforms
self.test_transforms = datasets.test_transforms
#torchvision or albumentations
self.transform_type = datasets.transform_type
# memory Mt
self.data_memory = None
self.targets_memory = None
# Incoming data D_t
self.data_cur = None
self.targets_cur = None
# Available data \tilde{D}_t = D_t \cup M_t
self.data_inc = None # Cur task data + memory
self.targets_inc = None
# Available data stored in cpu memory.
self.shared_data_inc = None
self.shared_test_data = None
#Current states for Incremental Learning Stage.
self._current_task = 0
@property
def n_tasks(self):
return len(self.increments)
def new_task(self):
if self._current_task >= len(self.increments):
raise Exception("No more tasks.")
min_class, max_class, x_train, y_train, x_test, y_test = self._get_cur_step_data_for_raw_data()
self.data_cur, self.targets_cur = x_train, y_train
if self.data_memory is not None:
print("Set memory of size: {}.".format(len(self.data_memory)))
if len(self.data_memory) != 0:
x_train = np.concatenate((x_train, self.data_memory))
y_train = np.concatenate((y_train, self.targets_memory))
self.data_inc, self.targets_inc = x_train, y_train
self.data_test_inc, self.targets_test_inc = x_test, y_test
train_loader = self._get_loader(x_train, y_train, mode="train")
val_loader = self._get_loader(x_test, y_test, shuffle=False, mode="test")
test_loader = self._get_loader(x_test, y_test, shuffle=False, mode="test")
task_info = {
"min_class": min_class,
"max_class": max_class,
"increment": self.increments[self._current_task],
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": len(x_train),
"n_test_data": len(y_train),
}
self._current_task += 1
return task_info, train_loader, val_loader, test_loader
def _get_cur_step_data_for_raw_data(self, ):
min_class = sum(self.increments[:self._current_task])
max_class = sum(self.increments[:self._current_task + 1])
x_train, y_train = self._select(self.data_train, self.targets_train, low_range=min_class, high_range=max_class)
x_test, y_test = self._select(self.data_test, self.targets_test, low_range=0, high_range=max_class)
return min_class, max_class, x_train, y_train, x_test, y_test
#--------------------------------
# Data Setup
#--------------------------------
def _setup_data(self, dataset):
# FIXME: handles online loading of images
self.data_train, self.targets_train = [], []
self.data_test, self.targets_test = [], []
self.data_val, self.targets_val = [], []
self.increments = []
self.class_order = []
current_class_idx = 0 # When using multiple datasets
train_dataset = dataset(self.data_folder, train=True)
test_dataset = dataset(self.data_folder, train=False)
self.train_dataset = train_dataset
self.test_datasets = test_dataset
self.n_tot_cls = self.train_dataset.n_cls #number of classes in whole dataset
self._setup_data_for_raw_data(dataset, train_dataset, test_dataset, current_class_idx)
# !list
self.data_train = np.concatenate(self.data_train)
self.targets_train = np.concatenate(self.targets_train)
self.data_val = np.concatenate(self.data_val)
self.targets_val = np.concatenate(self.targets_val)
self.data_test = np.concatenate(self.data_test)
self.targets_test = np.concatenate(self.targets_test)
def _setup_data_for_raw_data(self, dataset, train_dataset, test_dataset, current_class_idx=0):
increment = self.task_size
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._split_per_class(x_train, y_train, self.validation_split)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
# Get Class Order
order = [i for i in range(len(np.unique(y_train)))]
if self.random_order:
random.seed(self._seed) # Ensure that following order is determined by seed:
random.shuffle(order)
elif dataset.class_order(self.trial_i) is not None:
order = dataset.class_order(self.trial_i)
self.class_order.append(order)
y_train = self._map_new_class_index(y_train, order)
y_val = self._map_new_class_index(y_val, order)
y_test = self._map_new_class_index(y_test, order)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
current_class_idx += len(order)
if self.start_class == 0:
self.increments = [increment for _ in range(len(order) // increment)]
else:
self.increments.append(self.start_class)
for _ in range((len(order) - self.start_class) // increment):
self.increments.append(increment)
self.data_train.append(x_train)
self.targets_train.append(y_train)
self.data_val.append(x_val)
self.targets_val.append(y_val)
self.data_test.append(x_test)
self.targets_test.append(y_test)
@staticmethod
def _split_per_class(x, y, validation_split=0.0):
"""Splits train data for a subset of validation data.
Split is done so that each class has a much data.
"""
shuffled_indexes = np.random.permutation(x.shape[0])
x = x[shuffled_indexes]
y = y[shuffled_indexes]
x_val, y_val = [], []
x_train, y_train = [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val.append(x[val_indexes])
y_val.append(y[val_indexes])
x_train.append(x[train_indexes])
y_train.append(y[train_indexes])
# !list
x_val, y_val = np.concatenate(x_val), np.concatenate(y_val)
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train)
return x_val, y_val, x_train, y_train
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
def _select(self, x, y, low_range=0, high_range=0):
idxes = sorted(np.where(np.logical_and(y >= low_range, y < high_range))[0])
if isinstance(x, list):
selected_x = [x[idx] for idx in idxes]
else:
selected_x = x[idxes]
return selected_x, y[idxes]
#--------------------------------
# Get Loader
#--------------------------------
def get_datainc_loader(self, mode='train'):
print(self.data_inc.shape)
train_loader = self._get_loader(self.data_inc, self.targets_inc, mode=mode)
return train_loader
def get_custom_loader_from_memory(self, class_indexes, mode="test"):
if not isinstance(class_indexes, list):
class_indexes = [class_indexes]
data, targets = [], []
for class_index in class_indexes:
class_data, class_targets = self._select(self.data_memory,
self.targets_memory,
low_range=class_index,
high_range=class_index + 1)
data.append(class_data)
targets.append(class_targets)
data = np.concatenate(data)
targets = np.concatenate(targets)
return data, targets, self._get_loader(data, targets, shuffle=False, mode=mode)
def _get_loader(self, x, y, share_memory=None, shuffle=True, mode="train", batch_size=None, resample=None):
if "balanced" in mode:
x, y = construct_balanced_subset(x, y)
batch_size = batch_size if batch_size is not None else self._batch_size
if "train" in mode:
trsf = self.train_transforms
resample_ = self._resampling if resample is None else True
if resample_ is False:
sampler = None
else:
sampler = get_weighted_random_sampler(y)
shuffle = False if resample_ is True else True
elif "test" in mode:
trsf = self.test_transforms
sampler = None
elif mode == "flip":
if "imagenet" in self.dataset_name:
trsf = A.Compose([A.HorizontalFlip(p=1.0), *self.test_transforms.transforms])
else:
trsf = transforms.Compose([transforms.RandomHorizontalFlip(p=1.0), *self.test_transforms.transforms])
sampler = None
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
return DataLoader(DummyDataset(x,
y,
trsf,
trsf_type=self.transform_type,
share_memory_=share_memory,
dataset_name=self.dataset_name),
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._workers,
sampler=sampler,
pin_memory=True)
def get_custom_loader(self, class_indexes, mode="test", data_source="train", imgs=None, tgts=None):
"""Returns a custom loader.
:param class_indexes: A list of class indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if not isinstance(class_indexes, list): # TODO: deprecated, should always give a list
class_indexes = [class_indexes]
if data_source == "train":
x, y = self.data_inc, self.targets_inc
elif data_source == "val":
x, y = self.data_val, self.targets_val
elif data_source == "test":
x, y = self.data_test, self.targets_test
elif data_source == 'specified' and imgs is not None and tgts is not None:
x, y = imgs, tgts
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
data, targets = [], []
for class_index in class_indexes:
class_data, class_targets, = self._select(x, y, low_range=class_index, high_range=class_index + 1)
data.append(class_data)
targets.append(class_targets)
data = np.concatenate(data)
targets = np.concatenate(targets)
return data, targets, self._get_loader(data, targets, shuffle=False, mode=mode)
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, x, y, trsf, trsf_type, share_memory_=None, dataset_name=None):
self.dataset_name = dataset_name
self.x, self.y = x, y
self.trsf = trsf
self.trsf_type = trsf_type
self.manager = mp.Manager()
self.buffer_size = 4000000
if share_memory_ is None:
if self.x.shape[0] > self.buffer_size:
self.share_memory = self.manager.list([None for i in range(self.buffer_size)])
else:
self.share_memory = self.manager.list([None for i in range(len(x))])
else:
self.share_memory = share_memory_
def __len__(self):
if isinstance(self.x, list):
return len(self.x)
else:
return self.x.shape[0]
def __getitem__(self, idx):
x, y, = self.x[idx], self.y[idx]
if isinstance(x, np.ndarray):
# assume cifar
x = Image.fromarray(x)
else:
# Assume the dataset is ImageNet
if idx < len(self.share_memory):
if self.share_memory[idx] is not None:
x = self.share_memory[idx]
else:
x = cv2.imread(x)
x = x[:, :, ::-1]
self.share_memory[idx] = x
else:
x = cv2.imread(x)
x = x[:, :, ::-1]
if 'torch' in self.trsf_type:
x = self.trsf(x)
else:
x = self.trsf(image=x)['image']
return x, y
| 14,954
| 37.44473
| 119
|
py
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/inclearn/datasets/__init__.py
| 0
| 0
| 0
|
py
|
|
DER-ClassIL.pytorch
|
DER-ClassIL.pytorch-main/codes/base/main.py
|
'''
@Author : Yan Shipeng, Xie Jiangwei
@Contact: yanshp@shanghaitech.edu.cn, xiejw@shanghaitech.edu.cn
'''
import sys
import os
import os.path as osp
import copy
import time
import shutil
import cProfile
import logging
from pathlib import Path
import numpy as np
import random
from easydict import EasyDict as edict
from tensorboardX import SummaryWriter
repo_name = 'DER-ClassIL.pytorch'
base_dir = osp.realpath(".")[:osp.realpath(".").index(repo_name) + len(repo_name)]
sys.path.insert(0, base_dir)
from sacred import Experiment
ex = Experiment(base_dir=base_dir)
# Save which files
# ex.add_source_file(osp.join(base_dir, "inclearn/models/icarl.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/data.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/lib/network.py"))
# ex.add_source_file(osp.join(base_dir, "inclearn/convnet/resnet.py"))
# ex.add_source_file(osp.join(os.getcwd(), "icarl.py"))
# ex.add_source_file(osp.join(os.getcwd(), "network.py"))
# ex.add_source_file(osp.join(os.getcwd(), "resnet.py"))
# MongoDB Observer
# ex.observers.append(MongoObserver.create(url='xx.xx.xx.xx:port', db_name='classil'))
import torch
from inclearn.tools import factory, results_utils, utils
from inclearn.learn.pretrain import pretrain
from inclearn.tools.metrics import IncConfusionMeter
def initialization(config, seed, mode, exp_id):
# Add it if your input size is fixed
# ref: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True # This will result in non-deterministic results.
# ex.captured_out_filter = lambda text: 'Output capturing turned off.'
cfg = edict(config)
utils.set_seed(cfg['seed'])
if exp_id is None:
exp_id = -1
cfg.exp.savedir = "./logs"
logger = utils.make_logger(f"exp{exp_id}_{cfg.exp.name}_{mode}", savedir=cfg.exp.savedir)
# Tensorboard
exp_name = f'{exp_id}_{cfg["exp"]["name"]}' if exp_id is not None else f'../inbox/{cfg["exp"]["name"]}'
tensorboard_dir = cfg["exp"]["tensorboard_dir"] + f"/{exp_name}"
# If not only save latest tensorboard log.
# if Path(tensorboard_dir).exists():
# shutil.move(tensorboard_dir, cfg["exp"]["tensorboard_dir"] + f"/../inbox/{time.time()}_{exp_name}")
tensorboard = SummaryWriter(tensorboard_dir)
return cfg, logger, tensorboard
@ex.command
def train(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "train", _run._id)
ex.logger.info(cfg)
cfg.data_folder = osp.join(base_dir, "data")
start_time = time.time()
_train(cfg, _run, ex, tensorboard)
ex.logger.info("Training finished in {}s.".format(int(time.time() - start_time)))
def _train(cfg, _run, ex, tensorboard):
device = factory.set_device(cfg)
trial_i = cfg['trial']
inc_dataset = factory.get_data(cfg, trial_i)
ex.logger.info("classes_order")
ex.logger.info(inc_dataset.class_order)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
if _run.meta_info["options"]["--file_storage"] is not None:
_save_dir = osp.join(_run.meta_info["options"]["--file_storage"], str(_run._id))
else:
_save_dir = cfg["exp"]["ckptdir"]
results = results_utils.get_template_results(cfg)
for task_i in range(inc_dataset.n_tasks):
task_info, train_loader, val_loader, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=inc_dataset.n_tasks,
)
model.before_task(task_i, inc_dataset)
# TODO: Move to incmodel.py
if 'min_class' in task_info:
ex.logger.info("Train on {}->{}.".format(task_info["min_class"], task_info["max_class"]))
# Pretraining at step0 if needed
if task_i == 0 and cfg["start_class"] > 0:
do_pretrain(cfg, ex, model, device, train_loader, test_loader)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
elif task_i < cfg['start_task']:
state_dict = torch.load(f'./ckpts/step{task_i}.ckpt')
model._parallel_network.load_state_dict(state_dict)
inc_dataset.shared_data_inc = train_loader.dataset.share_memory
else:
model.train_task(train_loader, val_loader)
model.after_task(task_i, inc_dataset)
ex.logger.info("Eval on {}->{}.".format(0, task_info["max_class"]))
ypred, ytrue = model.eval_task(test_loader)
acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
#Logging
model._tensorboard.add_scalar(f"taskaccu/trial{trial_i}", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_taskaccu", acc_stats["top1"]["total"], task_i)
_run.log_scalar(f"trial{trial_i}_task_top5_accu", acc_stats["top5"]["total"], task_i)
ex.logger.info(f"top1:{acc_stats['top1']}")
ex.logger.info(f"top5:{acc_stats['top5']}")
results["results"].append(acc_stats)
top1_avg_acc, top5_avg_acc = results_utils.compute_avg_inc_acc(results["results"])
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"] = top1_avg_acc
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"] = top5_avg_acc
ex.logger.info("Average Incremental Accuracy Top 1: {} Top 5: {}.".format(
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top1"],
_run.info[f"trial{trial_i}"][f"avg_incremental_accu_top5"],
))
if cfg["exp"]["name"]:
results_utils.save_results(results, cfg["exp"]["name"])
def do_pretrain(cfg, ex, model, device, train_loader, test_loader):
if not os.path.exists(osp.join(ex.base_dir, 'pretrain/')):
os.makedirs(osp.join(ex.base_dir, 'pretrain/'))
model_path = osp.join(
ex.base_dir,
"pretrain/{}_{}_cosine_{}_multi_{}_aux{}_nplus1_{}_{}_trial_{}_{}_seed_{}_start_{}_epoch_{}.pth".format(
cfg["model"],
cfg["convnet"],
cfg["weight_normalization"],
cfg["der"],
cfg["use_aux_cls"],
cfg["aux_n+1"],
cfg["dataset"],
cfg["trial"],
cfg["train_head"],
cfg['seed'],
cfg["start_class"],
cfg["pretrain"]["epochs"],
),
)
if osp.exists(model_path):
print("Load pretrain model")
if hasattr(model._network, "module"):
model._network.module.load_state_dict(torch.load(model_path))
else:
model._network.load_state_dict(torch.load(model_path))
else:
pretrain(cfg, ex, model, device, train_loader, test_loader, model_path)
@ex.command
def test(_run, _rnd, _seed):
cfg, ex.logger, tensorboard = initialization(_run.config, _seed, "test", _run._id)
ex.logger.info(cfg)
trial_i = cfg['trial']
cfg.data_folder = osp.join(base_dir, "data")
inc_dataset = factory.get_data(cfg, trial_i)
# inc_dataset._current_task = taski
# train_loader = inc_dataset._get_loader(inc_dataset.data_cur, inc_dataset.targets_cur)
model = factory.get_model(cfg, trial_i, _run, ex, tensorboard, inc_dataset)
model._network.task_size = cfg.increment
test_results = results_utils.get_template_results(cfg)
for taski in range(inc_dataset.n_tasks):
task_info, train_loader, _, test_loader = inc_dataset.new_task()
model.set_task_info(
task=task_info["task"],
total_n_classes=task_info["max_class"],
increment=task_info["increment"],
n_train_data=task_info["n_train_data"],
n_test_data=task_info["n_test_data"],
n_tasks=task_info["max_task"]
)
model.before_task(taski, inc_dataset)
state_dict = torch.load(f'./ckpts/step{taski}.ckpt')
model._parallel_network.load_state_dict(state_dict)
model.eval()
#Build exemplars
model.after_task(taski, inc_dataset)
ypred, ytrue = model.eval_task(test_loader)
test_acc_stats = utils.compute_accuracy(ypred, ytrue, increments=model._increments, n_classes=model._n_classes)
test_results['results'].append(test_acc_stats)
ex.logger.info(f"task{taski} test top1acc:{test_acc_stats['top1']}")
avg_test_acc = results_utils.compute_avg_inc_acc(test_results['results'])
ex.logger.info(f"Test Average Incremental Accuracy: {avg_test_acc}")
if __name__ == "__main__":
# ex.add_config('./codes/base/configs/default.yaml')
ex.add_config("./configs/default.yaml")
ex.run_commandline()
| 8,825
| 37.710526
| 119
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/dual_func_calculator.py
|
import numpy as np
class PrimalDualCalculator:
def __init__(self, phi_big_oracle, h_oracle, freeflowtimes, capacities, rho = 10.0, mu = 0.25, base_flows = None):
self.links_number = len(freeflowtimes)
self.rho = rho
self.mu = mu
self.freeflowtimes = freeflowtimes #\bar{t}
self.capacities = capacities #\bar{f}
self.phi_big_oracle = phi_big_oracle
self.h_oracle = h_oracle
self.dual_gap_init = None
if mu == 0:
if base_flows is None:
raise TypeError("Admissible flows should be given")
elif np.any(base_flows < 0) or np.any(base_flows >= capacities):
raise ValueError("Admissible flows should be non-negative and less than capacities")
else:
self.base_flows = base_flows
self.alpha = 1 - np.max(base_flows / capacities)
def __call__(self, flows, times):
gap = self.duality_gap(times, flows)
primal = self.primal_func_value(flows)
dual = self.dual_func_value(times)
if self.dual_gap_init is None:
self.dual_gap_init = gap
state_msg = 'Primal_init = {:g}'.format(primal) + \
'\nDual_init = {:g}'.format(dual) + \
'\nDuality_gap_init = {:g}'.format(self.dual_gap_init)
else:
state_msg = 'Primal_func_value = {:g}'.format(primal) + \
'\nDual_func_value = {:g}'.format(dual) + \
'\nDuality_gap = {:g}'.format(gap) + \
'\nDuality_gap / Duality_gap_init = {:g}'.format(gap / self.dual_gap_init)
return primal, dual, gap, state_msg
def dual_func_value(self, times):
return self.phi_big_oracle.func(times) + self.h_oracle.func(times)
def primal_func_value(self, flows):
return self.h_oracle.conjugate_func(flows)
def duality_gap(self, times, flows):
if self.mu > 0:
return self.dual_func_value(times) + self.primal_func_value(flows)
else:
beta = max(0, np.max(flows / self.capacities) - 1)
admissible_flows = (beta * self.base_flows + self.alpha * flows) / (self.alpha + beta)
return self.dual_func_value(times) + self.primal_func_value(admissible_flows)
def get_flows(self, times):
return - self.phi_big_oracle.grad(times)
#for Frank-Wolfe algorithm
def get_times(self, flows):
return self.freeflowtimes * (1.0 + self.rho * np.power(flows / self.capacities, 1.0 / self.mu))
| 2,630
| 42.85
| 118
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/model.py
|
# model parameters:
import copy
import numpy as np
import transport_graph as tg
import oracles
import dual_func_calculator as dfc
from grad_methods import universal_similar_triangles_method as ustm
from grad_methods import universal_gradient_descent_method as ugd
from grad_methods import subgradient_descent_method as sd
from grad_methods import frank_wolfe_method as fwm
from grad_methods import weighted_dual_averages_method as wda
class Model:
node_type = np.int64
def __init__(self, graph_data, graph_correspondences, total_od_flow, mu = 0.25, rho = 0.15):
self.total_od_flow = total_od_flow
self.mu = mu
self.rho = rho
self.inds_to_nodes, self.graph_correspondences, graph_table = self._index_nodes(graph_data['graph_table'],
graph_correspondences)
self.graph = tg.TransportGraph(graph_table, len(self.inds_to_nodes), graph_data['links number'])
def _index_nodes(self, graph_table, graph_correspondences):
table = graph_table.copy()
inits = np.unique(table['init_node'][table['init_node_thru'] == False])
terms = np.unique(table['term_node'][table['term_node_thru'] == False])
through_nodes = np.unique(np.r_[table['init_node'][table['init_node_thru'] == True].to_numpy(),
table['term_node'][table['term_node_thru'] == True].to_numpy()])
nodes = np.concatenate((inits, through_nodes, terms))
nodes_inds = list(zip(nodes, np.arange(len(nodes), dtype = self.node_type)))
init_to_ind = dict(nodes_inds[ : len(inits) + len(through_nodes)])
term_to_ind = dict(nodes_inds[len(inits) : ])
inds_to_nodes = dict(zip(np.arange(len(nodes), dtype = self.node_type), nodes))
table['init_node'] = table['init_node'].map(init_to_ind)
table['term_node'] = table['term_node'].map(term_to_ind)
correspondences = {}
for origin, dests in graph_correspondences.items():
if dests['targets']:
correspondences[init_to_ind[origin]] = \
{'targets' : np.array([term_to_ind[dest] for dest in dests['targets']], dtype = self.node_type),
'corrs' : np.array(dests['corrs'])}
return inds_to_nodes, correspondences, table
def find_equilibrium(self, solver_name = 'ustm', composite = True, solver_kwargs = {}, base_flows = None):
if solver_name == 'fwm':
solver_func = fwm.frank_wolfe_method
starting_msg = 'Frank-Wolfe method...'
elif solver_name == 'ustm':
solver_func = ustm.universal_similar_triangles_method
starting_msg = 'Universal similar triangles method...'
if not 'L_init' in solver_kwargs:
solver_kwargs['L_init'] = self.graph.max_path_length**0.5 * self.total_od_flow
elif solver_name == 'ugd':
solver_func = ugd.universal_gradient_descent_method
starting_msg = 'Universal gradient descent method...'
if not 'L_init' in solver_kwargs:
solver_kwargs['L_init'] = self.graph.max_path_length**0.5 * self.total_od_flow
elif solver_name == 'wda':
solver_func = wda.weighted_dual_averages_method
starting_msg = 'Weighted dual averages method...'
elif solver_name == 'sd':
solver_func = sd.subgradient_descent_method
starting_msg = 'Subgradient descent method...'
else:
raise NotImplementedError('Unknown solver!')
phi_big_oracle = oracles.PhiBigOracle(self.graph, self.graph_correspondences)
h_oracle = oracles.HOracle(self.graph.freeflow_times, self.graph.capacities,
rho = self.rho, mu = self.mu)
primal_dual_calculator = dfc.PrimalDualCalculator(phi_big_oracle, h_oracle,
self.graph.freeflow_times, self.graph.capacities,
rho = self.rho, mu = self.mu, base_flows = base_flows)
if composite == True or solver_name == 'fwm':
if not solver_name == 'fwm':
print('Composite optimization...')
oracle = phi_big_oracle
prox = h_oracle.prox
else:
print('Non-composite optimization...')
oracle = phi_big_oracle + h_oracle
def prox_func(grad, point, A):
"""
Computes argmin_{t: t \in Q} <g, t> + A / 2 * ||t - p||^2
where Q - the feasible set {t: t >= free_flow_times},
A - constant, g - (sub)gradient vector, p - point at which prox is calculated
"""
return np.maximum(point - grad / A, self.graph.freeflow_times)
prox = prox_func
print('Oracles created...')
print(starting_msg)
if solver_name == 'fwm':
result = solver_func(oracle,
primal_dual_calculator,
t_start = self.graph.freeflow_times,
**solver_kwargs)
else:
result = solver_func(oracle, prox,
primal_dual_calculator,
t_start = self.graph.freeflow_times,
**solver_kwargs)
#getting travel times of every non-zero trips between zones:
result['zone travel times'] = {}
for source in self.graph_correspondences:
targets = self.graph_correspondences[source]['targets']
travel_times, _ = self.graph.shortest_distances(source, targets, result['times'])
#mapping nodes' indices to initial nodes' names:
source_nodes = [self.inds_to_nodes[source]] * len(targets)
target_nodes = list(map(self.inds_to_nodes.get, targets))
result['zone travel times'].update(zip(zip(source_nodes, target_nodes), travel_times))
return result
| 6,187
| 50.566667
| 117
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/oracles.py
|
#import multiprocessing as mp
from collections import defaultdict
#from scipy.misc import logsumexp
import numpy as np
import time
import numba
from numba import njit
from numba.typed import List, Dict
@njit
def get_tree_order(nodes_number, targets, pred_arr):
#get nodes visiting order for flow calculation
visited = np.zeros(nodes_number, dtype = np.bool_)
sorted_vertices = List()
sorted_vertices.append(targets[0])
sorted_vertices.clear()
for vertex in targets:
vertex
temp = List()
temp.append(targets[0])
temp.clear()
while (not visited[vertex]):
visited[vertex] = True
if pred_arr[vertex] != vertex:
temp.append(vertex)
vertex = pred_arr[vertex]
temp.extend(sorted_vertices)
sorted_vertices = temp
return sorted_vertices
# @njit
# def get_tree_order(nodes_number, targets, pred_arr):
# #get nodes visiting order for flow calculation
# visited = np.zeros(nodes_number, dtype = np.bool_)
# sorted_vertices = [0] * 0
# for vertex in targets:
# temp = []
# while (not visited[vertex]):
# visited[vertex] = True
# if pred_arr[vertex] != vertex:
# temp.append(vertex)
# vertex = pred_arr[vertex]
# sorted_vertices[0:0] = temp
# return sorted_vertices
@njit
def get_flows(nodes_number, edges_number, targets, target_flows,
pred_arr, sorted_vertices, pred_to_edges, t_parameter):
flows = np.zeros(edges_number, dtype = target_flows.dtype)
vertex_flows = np.zeros(nodes_number, dtype = target_flows.dtype)
vertex_flows[targets] = target_flows
for vertex in sorted_vertices:
pred = pred_arr[vertex]
edges = pred_to_edges[vertex][pred]
edge = edges[np.argmin(t_parameter[edges])]
flows[edge] = vertex_flows[vertex]
vertex_flows[pred] += vertex_flows[vertex]
return flows
class BaseOracle(object):
"""
Base class for implementation of oracles.
"""
def func(self, x):
"""
Computes the value of function at point x.
"""
raise NotImplementedError('Func oracle is not implemented.')
def grad(self, x):
"""
Computes the gradient at point x.
"""
raise NotImplementedError('Grad oracle is not implemented.')
def prox(self, p, A, x_start = None):
"""
Calculates prox of the function f(x) at point p:
prox_f (p) = argmin_{x in Q} 0.5 ||x - p||^2 + A * f(x)
p - point
Q - feasible set
A - constant
x_start - start point for iterative minimization method
"""
raise NotImplementedError('Prox of the function is not implemented.')
def __add__(self, other):
return AdditiveOracle(self, other)
class AdditiveOracle(BaseOracle):
def __init__(self, *oracles):
self.oracles = oracles
def func(self, x):
func = 0
for oracle in self.oracles:
func += oracle.func(x)
return func
def grad(self, x):
grad = np.zeros(len(x))
for oracle in self.oracles:
grad += oracle.grad(x)
return grad
@property
def time(self): #getter
return np.sum([oracle.time for oracle in self.oracles])
class AutomaticOracle(BaseOracle):
"""
Oracle for automatic calculations of function kGamma * \Psi (t)
kGamma -> +0
"""
def __init__(self, source, graph, source_correspondences, pred_to_edges):
self.graph = graph
self.source = source
self.corr_targets = np.array(source_correspondences['targets'])
self.corr_values = np.array(source_correspondences['corrs'])
self.pred_to_edges = pred_to_edges
self.flows = None
self.distances = None
self.time = 0
def func(self, t_parameter):
self.update_shortest_paths(t_parameter)
return - np.dot(self.distances, self.corr_values)
#correct answer if func calculated flows!
def grad(self, t_parameter):
sorted_vertices = get_tree_order(self.graph.nodes_number, self.corr_targets, self.pred_map)
flows = get_flows(self.graph.nodes_number, self.graph.links_number,
self.corr_targets, self.corr_values,
self.pred_map, sorted_vertices, self.pred_to_edges, t_parameter)
return - flows
def update_shortest_paths(self, t_parameter):
tic = time.time()
self.distances, self.pred_map = self.graph.shortest_distances(self.source, self.corr_targets, t_parameter)
self.time += time.time() - tic
def get_pred_to_edges(graph):
pred_to_edges = List()
dtype = graph.edges.dtype
numba_type = numba.from_dtype(dtype)
for node in range(graph.nodes_number):
temp_dict = {}
for source, _, edge in graph.in_edges(node):
if source in temp_dict:
temp_dict[source].append(edge)
else:
temp_dict[source] = [edge]
pred_to_edges_cur = Dict.empty(key_type = numba_type, value_type = numba_type[:])
for source in temp_dict:
pred_to_edges_cur[source] = np.array(temp_dict[source], dtype = dtype)
pred_to_edges.append(pred_to_edges_cur)
return pred_to_edges
class PhiBigOracle(BaseOracle):
def __init__(self, graph, correspondences, processes_number = None):
self.graph = graph
self.correspondences = correspondences
if processes_number:
self.processes_number = processes_number
else:
self.processes_number = len(correspondences)
self.t_current = self.func_current = self.grad_current = None
pred_to_edges = get_pred_to_edges(graph)
self.auto_oracles = []
for source, source_correspondences in self.correspondences.items():
self.auto_oracles.append(AutomaticOracle(source, self.graph, source_correspondences, pred_to_edges))
self.time = 0.0
def _reset(self, t_parameter):
tic = time.time()
self.t_current = t_parameter
self.func_current = 0.0
self.auto_oracles_time = 0
for auto_oracle in self.auto_oracles:
self.func_current += auto_oracle.func(self.t_current)
self.auto_oracles_time += auto_oracle.time
self.time += time.time() - tic
def func(self, t_parameter):
if self.t_current is None or np.any(self.t_current != t_parameter):
self._reset(t_parameter)
return self.func_current
def grad(self, t_parameter):
if self.t_current is None or np.any(self.t_current != t_parameter):
self._reset(t_parameter)
tic = time.time()
self.t_current = t_parameter
self.grad_current = np.zeros(self.graph.links_number)
self.auto_oracles_time = 0
for auto_oracle in self.auto_oracles:
self.grad_current += auto_oracle.grad(self.t_current)
self.auto_oracles_time += auto_oracle.time
self.time += time.time() - tic
return self.grad_current
#Newton's method for HOracle
@njit
def newton(x_0_arr, a_arr, mu,
tol = 1e-7, max_iter = 1000):
"""
Newton method for equation x - x_0 + a x^mu = 0, x >= 0
"""
res = np.empty(len(x_0_arr), dtype = np.float_)
for i in range(len(x_0_arr)):
x_0 = x_0_arr[i]
a = a_arr[i]
if x_0 <= 0:
res[i] = 0
continue
x = min(x_0, (x_0 / a) ** (1 / mu))
for it in range(max_iter):
x_next = x - f(x, x_0, a, mu) / der_f(x, x_0, a, mu)
if x_next <= 0:
x_next = 0.1 * x
x = x_next
if np.abs(f(x, x_0, a, mu)) < tol:
break
res[i] = x
return res
@njit
def f(x, x_0, a, mu):
return x - x_0 + a * x ** mu
@njit
def der_f(x, x_0, a, mu):
return 1.0 + a * mu * x ** (mu - 1)
class HOracle(BaseOracle):
def __init__(self, freeflowtimes, capacities, rho = 10.0, mu = 0.25):
self.links_number = len(freeflowtimes)
self.rho = rho
self.mu = mu
self.freeflowtimes = np.copy(freeflowtimes)
self.capacities = np.copy(capacities)
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
self.func_coef = self.capacities / (self.rho * self.freeflowtimes) ** self.mu / (1.0 + self.mu)
self.grad_coef = self.capacities / (self.rho * self.freeflowtimes) ** self.mu
# for centroid free_flow_time =0 -> func_coef = 0, grad_coef = 0
self.func_coef = np.where(self.freeflowtimes == 0, 0, self.func_coef)
self.grad_coef = np.where(self.freeflowtimes == 0, 0, self.grad_coef)
self.time = 0
def func(self, t_parameter):
"""
Computes value of the function h(times) = \sum_i sigma^*(times[i])
"""
if self.mu == 0:
h_func = np.dot(self.capacities, np.maximum(t_parameter - self.freeflowtimes, 0))
else:
h_func = np.dot(self.func_coef, np.maximum(t_parameter - self.freeflowtimes, 0) ** (1.0 + self.mu))
# np.sum(self.capacities * (t_parameter - self.freeflowtimes) *
# (np.maximum(t_parameter - self.freeflowtimes, 0.0) /
# (self.rho * self.freeflowtimes)) ** self.mu) / (1.0 + self.mu)
return h_func
def conjugate_func(self, flows):
"""
Computes the conjugate of the function h(t):
h^*(flows) = \sum_i sigma(flows[i]), since h(t) is a separable function
"""
if self.mu == 0:
return np.dot(self.freeflowtimes, flows)
else:
return np.dot(self.freeflowtimes * flows,
self.rho * self.mu / (1.0 + self.mu) *
(flows / self.capacities) ** (1.0 / self.mu) + 1.0)
def grad(self, t_parameter):
if self.mu == 0:
h_grad = self.capacities
else:
h_grad = self.capacities * (np.maximum(t_parameter - self.freeflowtimes, 0.0) /
(self.rho * self.freeflowtimes)) ** self.mu
return h_grad
def prox(self, grad, point, A):
"""
Computes argmin_{t: t \in Q} <g, t> + A / 2 * ||t - p||^2 + h(t)
where Q - the feasible set {t: t >= free_flow_times},
A - constant, g - (sub)gradient vector, p - point at which prox is calculated
"""
#rewrite as A/2 ||t - p_new||^2 + h(t)
point_new = point - grad / A
if self.mu == 0:
return np.maximum(point_new - self.capacities / A, self.freeflowtimes)
elif self.mu == 1:
pass
elif self.mu == 0.5:
pass
elif self.mu == 0.25:
pass
#rewrite as x - x_0 + a x^mu = 0, x >= 0
#where x = (t - bar{t})/(bar{t} * rho), x_0 = (p_new - bar{t})/(bar{t} * rho),
# a = bar{f} / (A * bar{t} * rho)
x = newton(x_0_arr = (point_new - self.freeflowtimes) / (self.rho * self.freeflowtimes),
a_arr = self.capacities / (A * self.rho * self.freeflowtimes),
mu = self.mu)
argmin = (1 + self.rho * x) * self.freeflowtimes
return argmin
| 11,657
| 34.327273
| 114
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/transport_graph.py
|
# Attention: as shown on the table above
# nodes indexed from 0 to ...
# edges indexed from 0 to ...
import graph_tool.all as gt
import graph_tool.topology as gtt
import numpy as np
import math
class TransportGraph:
def __init__(self, graph_table, nodes_number, links_number, maxpath_const = 3):
self.nodes_number = nodes_number
self.links_number = links_number
self.max_path_length = maxpath_const * int(math.sqrt(self.links_number))
self.graph = gt.Graph(directed=True)
#nodes indexed from 0 to V-1
vlist = self.graph.add_vertex(self.nodes_number)
# let's create some property maps
ep_freeflow_time = self.graph.new_edge_property("double")
ep_capacity = self.graph.new_edge_property("double")
#define data for edge properties
self.capacities = np.array(graph_table[['capacity']], dtype = 'float64').flatten()
self.freeflow_times = np.array(graph_table[['free_flow_time']], dtype = 'float64').flatten()
#adding edges to the graph
self.inits = np.array(graph_table[['init_node']]).flatten()
self.terms = np.array(graph_table[['term_node']]).flatten()
for index in range(self.links_number):
init = self.inits[index]
term = self.terms[index]
edge = self.graph.add_edge(self.graph.vertex(init),
self.graph.vertex(term))
ep_freeflow_time[edge] = self.freeflow_times[index]
ep_capacity[edge] = self.capacities[index]
#save properties to graph
self.graph.edge_properties["freeflow_times"] = ep_freeflow_time
self.graph.edge_properties["capacities"] = ep_capacity
@property
def edges(self):
return self.graph.get_edges([self.graph.edge_index])
def successors(self, node):
return self.graph.get_out_neighbors(node)
def predecessors(self, node):
return self.graph.get_in_neighbors(node)
#source, target and index of an edge
def in_edges(self, node):
return self.graph.get_in_edges(node, [self.graph.edge_index])
#source, target and index of an edge
def out_edges(self, node):
return self.graph.get_out_edges(node, [self.graph.edge_index])
def shortest_distances(self, source, targets, times):
if targets is None:
targets = np.arange(self.nodes_number)
ep_time_map = self.graph.new_edge_property("double", vals = times)
distances, pred_map = gtt.shortest_distance(g = self.graph,
source = source,
target = targets,
weights = ep_time_map,
pred_map = True)
return distances, pred_map.get_array()
# def nodes_number(self):
# return self.nodes_number
# def links_number(self):
# return self.links_number
# def capacities(self):
# return self.capacities
# def freeflow_times(self):
# return self.freeflow_times
| 3,201
| 38.04878
| 102
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/data_handler.py
|
from scanf import scanf
import re
import numpy as np
import pandas as pd
#TODO: DOCUMENTATION!!!
class DataHandler:
def GetGraphData(self, file_name, columns):
graph_data = {}
metadata = ''
with open(file_name, 'r') as myfile:
for index, line in enumerate(myfile):
if re.search(r'^~', line) is not None:
skip_lines = index + 1
headlist = re.findall(r'[\w]+', line)
break
else:
metadata += line
graph_data['nodes number'] = scanf('<NUMBER OF NODES> %d', metadata)[0]
graph_data['links number'] = scanf('<NUMBER OF LINKS> %d', metadata)[0]
graph_data['zones number'] = scanf('<NUMBER OF ZONES> %d', metadata)[0]
first_thru_node = scanf('<FIRST THRU NODE> %d', metadata)[0]
dtypes = {'init_node' : np.int32, 'term_node' : np.int32, 'capacity' : np.float64, 'length': np.float64,
'free_flow_time': np.float64, 'b': np.float64, 'power': np.float64, 'speed': np.float64,'toll': np.float64,
'link_type' : np.int32}
df = pd.read_csv(file_name, names = headlist, dtype = dtypes, skiprows = skip_lines, sep = r'[\s;]+', engine='python',
index_col = False)
df = df[columns]
df.insert(loc = list(df).index('init_node') + 1, column = 'init_node_thru', value = (df['init_node'] >= first_thru_node))
df.insert(loc = list(df).index('term_node') + 1, column = 'term_node_thru', value = (df['term_node'] >= first_thru_node))
graph_data['graph_table'] = df
return graph_data
def GetGraphCorrespondences(self, file_name):
with open(file_name, 'r') as myfile:
trips_data = myfile.read()
total_od_flow = scanf('<TOTAL OD FLOW> %f', trips_data)[0]
#zones_number = scanf('<NUMBER OF ZONES> %d', trips_data)[0]
origins_data = re.findall(r'Origin[\s\d.:;]+', trips_data)
graph_correspondences = {}
for data in origins_data:
origin_index = scanf('Origin %d', data)[0]
origin_correspondences = re.findall(r'[\d]+\s+:[\d.\s]+;', data)
targets = []
corrs_vals = []
for line in origin_correspondences:
target, corrs = scanf('%d : %f', line)
targets.append(target)
corrs_vals.append(corrs)
graph_correspondences[origin_index] = {'targets' : targets, 'corrs' : corrs_vals}
return graph_correspondences, total_od_flow
def ReadAnswer(self, filename):
with open(filename) as myfile:
lines = myfile.readlines()
lines = lines[1 :]
flows = []
times = []
for line in lines:
_, _, flow, time = scanf('%d %d %f %f', line)
flows.append(flow)
times.append(time)
return {'flows' : flows, 'times' : times}
| 3,029
| 40.506849
| 129
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/history.py
|
class History():
"""
history handler
"""
def __init__(self, *attributes):
self.dict = {}
self.attributes = list(attributes)
for attribute in self.attributes:
self.dict[attribute] = []
def update(self, *values):
for index, value in enumerate(values):
self.dict[self.attributes[index]].append(value)
def get(self, attribute):
return self.dict[attribute]
| 461
| 27.875
| 59
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/grad_methods/subgradient_descent_method.py
|
import numpy as np
from history import History
def subgradient_descent_method(oracle, prox, primal_dual_oracle,
t_start, max_iter = 1000,
eps = 1e-5, eps_abs = None, stop_crit = 'dual_gap_rel',
verbose_step = 100, verbose = False, save_history = False):
if stop_crit == 'dual_gap_rel':
def crit():
return duality_gap <= eps * duality_gap_init
elif stop_crit == 'dual_gap':
def crit():
return duality_gap <= eps_abs
elif stop_crit == 'max_iter':
def crit():
return it_counter == max_iter
elif callable(stop_crit):
crit = stop_crit
else:
raise ValueError("stop_crit should be callable or one of the following names: \
'dual_gap', 'dual_gap_rel', 'max iter'")
A = 0.0
t = np.copy(t_start)
t_weighted = np.zeros(len(t_start))
flows_weighted = - oracle.grad(t_start)
primal, dual, duality_gap_init, state_msg = primal_dual_oracle(flows_weighted, t_start)
if eps_abs is None:
eps_abs = eps * duality_gap_init
if verbose:
print(state_msg)
if save_history:
history = History('iter', 'primal_func', 'dual_func', 'dual_gap')
history.update(0, primal, dual, duality_gap_init)
success = False
for it_counter in range(1, max_iter+1):
grad_t = oracle.grad(t)
flows = primal_dual_oracle.get_flows(t)
alpha = eps_abs / np.linalg.norm(grad_t)**2
t = prox(grad_t, t, 1.0 / alpha)
A += alpha
t_weighted = ((A - alpha) * t_weighted + alpha * t) / A
flows_weighted = ((A - alpha) * flows_weighted + alpha * flows) / A
primal, dual, duality_gap, state_msg = primal_dual_oracle(flows_weighted, t_weighted)
if save_history:
history.update(it_counter, primal, dual, duality_gap)
if verbose and (it_counter % verbose_step == 0):
print('\nIterations number: {:d}'.format(it_counter))
print(state_msg, flush = True)
if crit():
success = True
break
result = {'times': t_weighted, 'flows': flows_weighted,
'iter_num': it_counter,
'res_msg' : 'success' if success else 'iterations number exceeded'}
if save_history:
result['history'] = history.dict
if verbose:
print('\nResult: ' + result['res_msg'])
print('Total iters: ' + str(it_counter))
print(state_msg)
print('Oracle elapsed time: {:.0f} sec'.format(oracle.time))
return result
| 2,664
| 38.191176
| 94
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/grad_methods/weighted_dual_averages_method.py
|
from math import sqrt
import numpy as np
from history import History
def weighted_dual_averages_method(oracle, prox, primal_dual_oracle,
t_start, max_iter = 1000,
eps = 1e-5, eps_abs = None, stop_crit = 'dual_gap_rel',
verbose_step = 100, verbose = False, save_history = False):
if stop_crit == 'dual_gap_rel':
def crit():
return duality_gap <= eps * duality_gap_init
elif stop_crit == 'dual_gap':
def crit():
return duality_gap <= eps_abs
elif stop_crit == 'max_iter':
def crit():
return it_counter == max_iter
elif callable(stop_crit):
crit = stop_crit
else:
raise ValueError("stop_crit should be callable or one of the following names: \
'dual_gap', 'dual_gap_rel', 'max iter'")
A = 0.0
t = np.copy(t_start)
grad_sum = np.zeros(len(t_start))
beta_seq = 1.0
rho_wda = np.sqrt(2) * np.linalg.norm(t_start)
flows_weighted = primal_dual_oracle.get_flows(t_start)
t_weighted = np.copy(t_start)
primal, dual, duality_gap_init, state_msg = primal_dual_oracle(flows_weighted, t_weighted)
if save_history:
history = History('iter', 'primal_func', 'dual_func', 'dual_gap')
history.update(0, primal, dual, duality_gap_init)
if verbose:
print(state_msg)
success = False
for it_counter in range(1, max_iter+1):
grad_t = oracle.grad(t)
flows = primal_dual_oracle.get_flows(t) #grad() is called here
alpha = 1 / np.linalg.norm(grad_t)
A += alpha
grad_sum += alpha * grad_t
beta_seq = 1 if it_counter == 1 else beta_seq + 1.0 / beta_seq
beta = beta_seq / rho_wda
t = prox(grad_sum / A, t_start, beta / A)
t_weighted = (t_weighted * (A - alpha) + t * alpha) / A
flows_weighted = (flows_weighted * (A - alpha) + flows * alpha) / A
primal, dual, duality_gap, state_msg = primal_dual_oracle(flows_weighted, t_weighted)
if save_history:
history.update(it_counter, primal, dual, duality_gap)
if verbose and (it_counter % verbose_step == 0):
print('\nIterations number: {:d}'.format(it_counter))
print(state_msg, flush = True)
if crit():
success = True
break
result = {'times': t_weighted, 'flows': flows_weighted,
'iter_num': it_counter,
'res_msg': 'success' if success else 'iterations number exceeded'}
if save_history:
result['history'] = history.dict
if verbose:
print('\nResult: ' + result['res_msg'])
print('Total iters: ' + str(it_counter))
print(state_msg)
print('Oracle elapsed time: {:.0f} sec'.format(oracle.time))
return result
| 2,922
| 36.961039
| 94
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/grad_methods/frank_wolfe_method.py
|
from math import sqrt
import numpy as np
from history import History
def frank_wolfe_method(oracle, primal_dual_oracle,
t_start, max_iter = 1000,
eps = 1e-5, eps_abs = None, stop_crit = 'dual_gap_rel',
verbose_step = 100, verbose = False, save_history = False):
if stop_crit == 'dual_gap_rel':
def crit():
return duality_gap <= eps * duality_gap_init
elif stop_crit == 'dual_gap':
def crit():
return duality_gap <= eps_abs
elif stop_crit == 'max_iter':
def crit():
return it_counter == max_iter
elif callable(stop_crit):
crit = stop_crit
else:
raise ValueError("stop_crit should be callable or one of the following names: \
'dual_gap', 'dual_gap_rel', 'max iter'")
t = None
flows = - oracle.grad(t_start)
t_weighted = np.copy(t_start)
primal, dual, duality_gap_init, state_msg = primal_dual_oracle(flows, t_weighted)
if save_history:
history = History('iter', 'primal_func', 'dual_func', 'dual_gap')
history.update(0, primal, dual, duality_gap_init)
if verbose:
print(state_msg)
if eps_abs is None:
eps_abs = eps * duality_gap_init
success = False
for it_counter in range(1, max_iter+1):
t = primal_dual_oracle.get_times(flows)
y_parameter = primal_dual_oracle.get_flows(t)
gamma = 2.0 / (it_counter + 1)
flows = (1.0 - gamma) * flows + gamma * y_parameter
t_weighted = (1.0 - gamma) * t_weighted + gamma * t
primal, dual, duality_gap, state_msg = primal_dual_oracle(flows, t_weighted)
if save_history:
history.update(it_counter, primal, dual, duality_gap)
if verbose and (it_counter % verbose_step == 0):
print('\nIterations number: {:d}'.format(it_counter))
print(state_msg, flush = True)
if crit():
success = True
break
result = {'times': t_weighted, 'flows': flows,
'iter_num': it_counter,
'res_msg' : 'success' if success else 'iterations number exceeded'}
if save_history:
result['history'] = history.dict
if verbose:
print('\nResult: ' + result['res_msg'])
print('Total iters: ' + str(it_counter))
print(state_msg)
print('Oracle elapsed time: {:.0f} sec'.format(oracle.time))
return result
| 2,500
| 38.078125
| 87
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/grad_methods/universal_similar_triangles_method.py
|
from math import sqrt
import numpy as np
from history import History
def universal_similar_triangles_method(oracle, prox, primal_dual_oracle,
t_start, L_init = None, max_iter = 1000,
eps = 1e-5, eps_abs = None, stop_crit = 'dual_gap_rel',
verbose_step = 100, verbose = False, save_history = False):
if stop_crit == 'dual_gap_rel':
def crit():
return duality_gap <= eps * duality_gap_init
elif stop_crit == 'dual_gap':
def crit():
return duality_gap <= eps_abs
elif stop_crit == 'max_iter':
def crit():
return it_counter == max_iter
elif callable(stop_crit):
crit = stop_crit
else:
raise ValueError("stop_crit should be callable or one of the following names: \
'dual_gap', 'dual_gap_rel', 'max iter'")
L_value = L_init if L_init is not None else np.linalg.norm(oracle.grad(t_start))
A_prev = 0.0
y_start = u_prev = t_prev = np.copy(t_start)
A = u = t = y = None
grad_sum = None
grad_sum_prev = np.zeros(len(t_start))
flows_weighted = primal_dual_oracle.get_flows(y_start)
primal, dual, duality_gap_init, state_msg = primal_dual_oracle(flows_weighted, y_start)
if save_history:
history = History('iter', 'primal_func', 'dual_func', 'dual_gap', 'inner_iters')
history.update(0, primal, dual, duality_gap_init, 0)
if verbose:
print(state_msg)
if eps_abs is None:
eps_abs = eps * duality_gap_init
success = False
inner_iters_num = 0
for it_counter in range(1, max_iter+1):
while True:
inner_iters_num += 1
alpha = 0.5 / L_value + sqrt(0.25 / L_value**2 + A_prev / L_value)
A = A_prev + alpha
y = (alpha * u_prev + A_prev * t_prev) / A
grad_y = oracle.grad(y)
flows = primal_dual_oracle.get_flows(y) #grad() is called here
grad_sum = grad_sum_prev + alpha * grad_y
u = prox(grad_sum / A, y_start, 1.0 / A)
t = (alpha * u + A_prev * t_prev) / A
left_value = (oracle.func(y) + np.dot(grad_y, t - y) +
0.5 * alpha / A * eps_abs) - oracle.func(t)
right_value = - 0.5 * L_value * np.sum((t - y)**2)
if left_value >= right_value:
break
else:
L_value *= 2
A_prev = A
L_value /= 2
t_prev = t
u_prev = u
grad_sum_prev = grad_sum
flows_weighted = (flows_weighted * (A - alpha) + flows * alpha ) / A
primal, dual, duality_gap, state_msg = primal_dual_oracle(flows_weighted, t)
if save_history:
history.update(it_counter, primal, dual, duality_gap, inner_iters_num)
if verbose and (it_counter % verbose_step == 0):
print('\nIterations number: {:d}'.format(it_counter))
print('Inner iterations number: {:d}'.format(inner_iters_num))
print(state_msg, flush = True)
if crit():
success = True
break
result = {'times': t, 'flows': flows_weighted,
'iter_num': it_counter,
'res_msg': 'success' if success else 'iterations number exceeded'}
if save_history:
result['history'] = history.dict
if verbose:
print('\nResult: ' + result['res_msg'])
print('Total iters: ' + str(it_counter))
print(state_msg)
print('Oracle elapsed time: {:.0f} sec'.format(oracle.time))
return result
#print('Dijkstra elapsed time: {:.0f} sec'.format(oracle.auto_oracles_time))
#criteria: stable dynamic 'dual_threshold' AND 'primal_threshold', 'dual_rel' AND 'primal_rel'.
#beckman : + 'dual_gap_rel', 'dual_gap_threshold', 'primal_threshold', 'primal_rel'
#criteria: 'star_solution_residual',
#practice: 'dual_rel'
# if crit_name == 'dual_gap_rel':
# def crit():
# nonlocal duality_gap, duality_gap_init, eps
# return duality_gap < eps * duality_gap_init
# if crit_name == 'dual_rel':
# def crit():
# nonlocal dual_func_history, eps
# l = len(dual_func_history)
# return dual_func_history[l // 2] - dual_func_history[-1] \
# < eps * (dual_func_history[0] - dual_func_history[-1])
# if crit_name == 'primal_rel':
# def crit():
# nonlocal primal_func_history, eps
# l = len(primal_func_history)
# return primal_func_history[l // 2] - primal_func_history[-1] \
# < eps * (primal_func_history[0] - primal_func_history[-1])
| 4,828
| 37.632
| 98
|
py
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/grad_methods/__init__.py
| 1
| 0
| 0
|
py
|
|
TransportNet
|
TransportNet-master/Stable Dynamic & Beckman/grad_methods/universal_gradient_descent_method.py
|
import numpy as np
from history import History
def universal_gradient_descent_method(oracle, prox, primal_dual_oracle,
t_start, L_init = None, max_iter = 1000,
eps = 1e-5, eps_abs = None, stop_crit = 'dual_gap_rel',
verbose_step = 100, verbose = False, save_history = False):
if stop_crit == 'dual_gap_rel':
def crit():
return duality_gap <= eps * duality_gap_init
elif stop_crit == 'dual_gap':
def crit():
return duality_gap <= eps_abs
elif stop_crit == 'max_iter':
def crit():
return it_counter == max_iter
elif callable(stop_crit):
crit = stop_crit
else:
raise ValueError("stop_crit should be callable or one of the following names: \
'dual_gap', 'dual_gap_rel', 'max iter'")
L_value = L_init if L_init is not None else np.linalg.norm(oracle.grad(t_start))
A = 0.0
t_prev = np.copy(t_start)
t = None
flows_weighted = primal_dual_oracle.get_flows(t_start)
t_weighted = np.copy(t_start)
primal, dual, duality_gap_init, state_msg = primal_dual_oracle(flows_weighted, t_weighted)
if save_history:
history = History('iter', 'primal_func', 'dual_func', 'dual_gap', 'inner_iters')
history.update(0, primal, dual, duality_gap_init, 0)
if verbose:
print(state_msg)
if eps_abs is None:
eps_abs = eps * duality_gap_init
success = False
inner_iters_num = 0
for it_counter in range(1, max_iter+1):
while True:
inner_iters_num += 1
alpha = 1 / L_value
grad_t = oracle.grad(t_prev)
flows = primal_dual_oracle.get_flows(t_prev) #grad() is called here
t = prox(grad_t, t_prev, 1.0 / alpha)
left_value = (oracle.func(t_prev) + np.dot(grad_t, t - t_prev) +
0.5 * eps_abs) - oracle.func(t)
right_value = - 0.5 * L_value * np.sum((t - t_prev)**2)
if left_value >= right_value:
break
else:
L_value *= 2
L_value /= 2
t_prev = t
A += alpha
t_weighted = (t_weighted * (A - alpha) + t * alpha) / A
flows_weighted = (flows_weighted * (A - alpha) + flows * alpha ) / A
primal, dual, duality_gap, state_msg = primal_dual_oracle(flows_weighted, t_weighted)
if save_history:
history.update(it_counter, primal, dual, duality_gap, inner_iters_num)
if verbose and (it_counter % verbose_step == 0):
print('\nIterations number: {:d}'.format(it_counter))
print('Inner iterations number: {:d}'.format(inner_iters_num))
print(state_msg, flush = True)
if crit():
success = True
break
result = {'times': t_weighted, 'flows': flows_weighted,
'iter_num': it_counter,
'res_msg': 'success' if success else 'iterations number exceeded'}
if save_history:
result['history'] = history.dict
if verbose:
print('\nResult: ' + result['res_msg'])
print('Total iters: ' + str(it_counter))
print(state_msg)
print('Oracle elapsed time: {:.0f} sec'.format(oracle.time))
return result
| 3,435
| 38.045455
| 97
|
py
|
TransportNet
|
TransportNet-master/Stochastic Nash-Wardrop equilibrium/dual_func_calculator.py
|
import numpy as np
#from numba import jit
class PrimalDualCalculator:
def __init__(self, phi_big_oracle, h_oracle, freeflowtimes, capacities, rho = 10.0, mu = 0.25):
self.links_number = len(freeflowtimes)
self.rho = rho
self.mu = mu
self.freeflowtimes = freeflowtimes #\bar{t}
self.capacities = capacities #\bar{f}
self.phi_big_oracle = phi_big_oracle
self.h_oracle = h_oracle
self.dual_gap_init = None
self.A = self.entropy_weighted = 0.
def __call__(self, flows, times, alpha = 0.):
primal = self.primal_func_value(flows, times, alpha)
dual = self.dual_func_value(times)
gap = primal + dual
if self.dual_gap_init is None:
self.dual_gap_init = gap
state_msg = 'Primal_init = {:g}'.format(primal) + \
'\nDual_init = {:g}'.format(dual) + \
'\nDuality_gap_init = {:g}'.format(self.dual_gap_init)
else:
state_msg = 'Primal_func_value = {:g}'.format(primal) + \
'\nDual_func_value = {:g}'.format(dual) + \
'\nDuality_gap = {:g}'.format(gap) + \
'\nDuality_gap / Duality_gap_init = {:g}'.format(gap / self.dual_gap_init)
return primal, dual, gap, state_msg
# @jit
def dual_func_value(self, times):
assert(not np.any(np.isnan(times)))
return self.phi_big_oracle.func(times) + self.h_oracle.func(times)
# @jit
def primal_func_value(self, flows, times, alpha):
#upper estimate of the primal function when gamma > 0
self.A += alpha
if self.A == 0:
self.entropy_weighted = self.phi_big_oracle.entropy(times)
else:
self.entropy_weighted = ((self.A - alpha) * self.entropy_weighted +
alpha * self.phi_big_oracle.entropy(times)) / self.A
return self.h_oracle.conjugate_func(flows) - self.entropy_weighted
def get_flows(self, times):
return - self.phi_big_oracle.grad(times)
| 2,133
| 40.038462
| 99
|
py
|
TransportNet
|
TransportNet-master/Stochastic Nash-Wardrop equilibrium/model.py
|
# model parameters:
import copy
import numpy as np
import transport_graph as tg
import oracles
import dual_func_calculator as dfc
import universal_similar_triangles_method as ustm
import universal_gradient_descent_method as ugd
#from numba import jit
import math
class Model:
def __init__(self, graph_data, graph_correspondences, total_od_flow, mu = 0.25, rho = 0.15, gamma = 1.):
self.total_od_flow = total_od_flow
self.mu = mu
self.rho = rho
self.gamma = gamma
self.inds_to_nodes, self.graph_correspondences, graph_table = self._index_nodes(graph_data['graph_table'],
graph_correspondences)
self.graph = tg.TransportGraph(graph_table, len(self.inds_to_nodes), graph_data['links number'])
def _index_nodes(self, graph_table, graph_correspondences):
table = graph_table.copy()
inits = np.unique(table['init_node'][table['init_node_thru'] == False])
terms = np.unique(table['term_node'][table['term_node_thru'] == False])
through_nodes = np.unique([table['init_node'][table['init_node_thru'] == True],
table['term_node'][table['term_node_thru'] == True]])
nodes = np.concatenate((inits, through_nodes, terms))
nodes_inds = list(zip(nodes, np.arange(len(nodes))))
init_to_ind = dict(nodes_inds[ : len(inits) + len(through_nodes)])
term_to_ind = dict(nodes_inds[len(inits) : ])
table['init_node'] = table['init_node'].map(init_to_ind)
table['term_node'] = table['term_node'].map(term_to_ind)
correspondences = {}
for origin, dests in graph_correspondences.items():
dests = copy.deepcopy(dests)
correspondences[init_to_ind[origin]] = {'targets' : list(map(term_to_ind.get , dests['targets'])),
'corrs' : dests['corrs']}
inds_to_nodes = dict(zip(range(len(nodes)), nodes))
return inds_to_nodes, correspondences, table
def find_equilibrium(self, solver_name = 'ustm', composite = True, solver_kwargs = {}):
if solver_name == 'ustm':
solver_func = ustm.universal_similar_triangles_method
starting_msg = 'Universal similar triangles method...'
if not 'L_init' in solver_kwargs:
solver_kwargs['L_init'] = 0.1 * self.graph.max_path_length * self.total_od_flow / self.gamma
elif solver_name == 'ugd':
solver_func = ugd.universal_gradient_descent_method
starting_msg = 'Universal gradient descent method...'
if not 'L_init' in solver_kwargs:
solver_kwargs['L_init'] = 0.1 * self.graph.max_path_length * self.total_od_flow / self.gamma
else:
raise NotImplementedError('Unknown solver!')
phi_big_oracle = oracles.PhiBigOracle(self.graph, self.graph_correspondences, gamma = self.gamma)
h_oracle = oracles.HOracle(self.graph.free_flow_times, self.graph.capacities,
rho = self.rho, mu = self.mu)
primal_dual_calculator = dfc.PrimalDualCalculator(phi_big_oracle, h_oracle,
self.graph.free_flow_times, self.graph.capacities,
rho = self.rho, mu = self.mu)
if composite == True:
print('Composite optimization...')
oracle = phi_big_oracle
prox = h_oracle.prox
else:
print('Non-composite optimization...')
oracle = phi_big_oracle + h_oracle
def prox_func(grad, point, A):
"""
Computes argmin_{t: t \in Q} <g, t> + A / 2 * ||t - p||^2
where Q - the feasible set {t: t >= free_flow_times},
A - constant, g - (sub)gradient vector, p - point at which prox is calculated
"""
return np.maximum(point - grad / A, self.graph.free_flow_times)
prox = prox_func
print('Oracles created...')
print(starting_msg)
result = solver_func(oracle, prox,
primal_dual_calculator,
t_start = self.graph.free_flow_times,
**solver_kwargs)
#TODO: add equilibrium travel times between zones
return result
| 4,564
| 47.56383
| 114
|
py
|
TransportNet
|
TransportNet-master/Stochastic Nash-Wardrop equilibrium/oracles.py
|
# from scipy.special import expit
#import multiprocessing as mp
from collections import defaultdict
#from scipy.misc import logsumexp
from scipy.special import expit
import numpy as np
import time
from transport_graph import JitTransportGraph
from numba.experimental import jitclass
from numba import jit, int32, int64, float64
from numba import njit
#TODO PhiBigOracle should memorize func and grad at least for 2 points.
@jit(["float64(float64[:])"])
def logsumexp(ns):
nmax = np.max(ns)
if np.isinf(nmax):
return - np.inf
ds = ns - nmax
exp_sum = np.exp(ds).sum()
return nmax + np.log(exp_sum)
class BaseOracle(object):
"""
Base class for implementation of oracles.
"""
def func(self, x):
"""
Computes the value of function at point x.
"""
raise NotImplementedError('Func oracle is not implemented.')
def grad(self, x):
"""
Computes the gradient at point x.
"""
raise NotImplementedError('Grad oracle is not implemented.')
def prox(self, p, A, x_start = None):
"""
Calculates prox of the function f(x) at point p:
prox_f (p) = argmin_{x in Q} 0.5 ||x - p||^2 + A * f(x)
p - point
Q - feasible set
A - constant
x_start - start point for iterative minimization method
"""
raise NotImplementedError('Prox of the function is not implemented.')
def __add__(self, other):
return AdditiveOracle(self, other)
class AdditiveOracle(BaseOracle):
def __init__(self, *oracles):
self.oracles = oracles
def func(self, x):
func = 0
for oracle in self.oracles:
func += oracle.func(x)
return func
def grad(self, x):
grad = np.zeros(len(x))
for oracle in self.oracles:
grad += oracle.grad(x)
return grad
@property
def time(self): #getter
return np.sum([oracle.time for oracle in self.oracles])
class AutomaticOracle(BaseOracle):
def __init__(self, source, graph, source_correspondences, gamma = 1.0):
#stock graph
self._graph = graph
self._source = source
corr_targets = np.array(graph.get_nodes_indices(source_correspondences['targets']), dtype = 'int64')
corr_values = np.array(source_correspondences['corrs'], dtype = 'float64')
nonzero_indices = np.nonzero(corr_values)
corr_targets = corr_targets[nonzero_indices]
corr_values = corr_values[nonzero_indices]
self._jit_oracle = JitAutomaticOracle(graph.jit_graph, graph.get_node_index(source),
corr_targets, corr_values, gamma)
def func(self, t_parameter):
return self._jit_oracle.func(t_parameter)
def grad(self, t_parameter):
return self._jit_oracle.grad(t_parameter)
spec = [
('graph', JitTransportGraph.class_type.instance_type),
('nodes_number', int64),
('edges_number', int64),
('path_max_length', int64),
('source', int64),
('t_current', float64[:]),
('corr_targets', int64[:]),
('corr_values', float64[:]),
('targets', int64[:]),
('gamma', float64),
('A_values', float64[:,:]),
('B_values', float64[:,:]),
]
@jitclass(spec)
class JitAutomaticOracle:
"""
Oracle for automatic calculations of function kGamma * \Psi (t)
"""
def __init__(self, graph, source, corr_targets, corr_values, gamma):
#stock graph
self.graph = graph
self.nodes_number = graph.nodes_number
self.edges_number = graph.links_number
self.path_max_length = graph.max_path_length
self.source = source
self.corr_targets = corr_targets
self.corr_values = corr_values
self.targets = np.zeros(self.edges_number, dtype = int64)
for edge in range(self.edges_number):
self.targets[edge] = graph.target_of_edge(edge)
self.gamma = gamma
self.t_current = np.zeros(self.edges_number)
self.A_values = np.empty((self.path_max_length + 1, self.nodes_number))
self.B_values = np.empty((self.path_max_length + 1, self.nodes_number))
def func(self, t_parameter):
#print('automatic func called...')
self.t_current[:] = t_parameter
self._calculate_a_b_values()
return np.dot(self.corr_values,
self.B_values[self.path_max_length][self.corr_targets])
def grad(self, t_parameter):
#assert(np.all(self.t_current == t_parameter))
#print('automatic grad called...')
gradient_vector = np.zeros(self.edges_number)
#psi_d_beta_values initial values path_length = kMaxPathLength
psi_d_beta_values = np.zeros(self.nodes_number)
psi_d_beta_values[self.corr_targets] = self.corr_values
psi_d_alpha_values = np.zeros(self.nodes_number)
alpha_d_time = self._alpha_d_time_function(self.path_max_length)
for path_length in range(self.path_max_length - 1, 0, -1):
beta_d_beta_values = self._beta_d_beta_function(path_length + 1)
psi_d_beta_values[:] = psi_d_beta_values * beta_d_beta_values
#calculating psi_d_alpha_values
beta_d_alpha_values = self._beta_d_alpha_function(path_length)
psi_d_alpha_values = psi_d_beta_values * beta_d_alpha_values - \
np.array([np.dot(psi_d_alpha_values[self.graph.successors(node)],
alpha_d_time[self.graph.out_edges(node)]) for
node in range(self.nodes_number)])
#calculating gradient
alpha_d_time = self._alpha_d_time_function(path_length)
gradient_vector += psi_d_alpha_values[self.targets] * alpha_d_time
#print('my result = ' + str(gradient_vector))
return gradient_vector
def _alpha_d_time_function(self, path_length):
#print('alpha_d_time_func called...')
result = np.zeros(self.edges_number)
if path_length == 1:
result[self.graph.out_edges(self.source)] = - 1.0
else:
for node in range(self.nodes_number):
A_node = self.A_values[path_length][node]
if not np.isinf(A_node):
A_source = self.A_values[path_length - 1][self.graph.predecessors(node)]
in_edges = self.graph.in_edges(node)
result[in_edges] = - np.exp((A_source - self.t_current[in_edges] - A_node) /
self.gamma)
return result
def _beta_d_beta_function(self, path_length):
if path_length == 1:
return np.zeros(self.nodes_number)
beta_new = self.B_values[path_length][:]
beta_old = self.B_values[path_length - 1][:]
indices = np.nonzero(np.logical_not(np.isinf(beta_new)))
result = np.zeros(self.nodes_number)
result[indices] = np.exp((beta_old[indices] - beta_new[indices]) / self.gamma)
return result
def _beta_d_alpha_function(self, path_length):
if path_length == 1:
return np.ones(self.nodes_number)
alpha_values = self.A_values[path_length][:]
beta_values = self.B_values[path_length][:]
indices = np.nonzero(np.logical_not(np.isinf(beta_values)))
result = np.zeros(self.nodes_number)
result[indices] = np.exp((alpha_values[indices] - beta_values[indices]) / self.gamma)
return result
def _calculate_a_b_values(self):
self.A_values = np.full(self.A_values.shape, - np.inf)
self.B_values = np.full(self.B_values.shape, - np.inf)
initial_values = - 1.0 * self.t_current[self.graph.out_edges(self.source)]
self.A_values[1][self.graph.successors(self.source)] = initial_values
self.B_values[1][self.graph.successors(self.source)] = initial_values
for path_length in range(2, self.path_max_length + 1):
for term_vertex in range(self.nodes_number):
if len(self.graph.predecessors(term_vertex)) > 0:
alpha = self.gamma * logsumexp(1.0 / self.gamma *
(self.A_values[path_length - 1][self.graph.predecessors(term_vertex)]
- self.t_current[self.graph.in_edges(term_vertex)]))
beta = self.gamma * logsumexp(np.array([1.0 / self.gamma *
self.B_values[path_length - 1][term_vertex],
1.0 / self.gamma * alpha]))
self.A_values[path_length][term_vertex] = alpha
self.B_values[path_length][term_vertex] = beta
class PhiBigOracle(BaseOracle):
def __init__(self, graph, correspondences, processes_number = None, gamma = 1.0):
self.graph = graph
self.correspondences = correspondences
if processes_number:
self.processes_number = processes_number
else:
self.processes_number = len(correspondences)
self.gamma = gamma
self.t_current = None
self.func_current = None
self.grad_current = None
self.entropy_current = None
self.auto_oracles = []
for source, source_correspondences in self.correspondences.items():
self.auto_oracles.append(AutomaticOracle(source, self.graph, source_correspondences, gamma = self.gamma))
self.time = 0.0
def _reset(self, t_parameter):
#print('Start reset')
tic = time.time()
self.t_current = t_parameter
self.func_current = 0.0
self.grad_current = np.zeros(self.graph.links_number)
for auto_oracle in self.auto_oracles:
self.func_current += auto_oracle.func(self.t_current)
self.grad_current += auto_oracle.grad(self.t_current)
self.entropy_current = self.func_current - np.dot(self.t_current, self.grad_current)
self.time += time.time() - tic
#print('Stop reset')
def func(self, t_parameter):
if self.t_current is None or np.any(self.t_current != t_parameter):
self._reset(t_parameter)
return self.func_current
def grad(self, t_parameter):
if self.t_current is None or np.any(self.t_current != t_parameter):
self._reset(t_parameter)
return self.grad_current
def entropy(self, t_parameter):
if self.t_current is None or np.any(self.t_current != t_parameter):
self._reset(t_parameter)
return self.entropy_current
#Newton's method for HOracle
@njit
def newton(x_0_arr, a_arr, mu,
tol = 1e-7, max_iter = 1000):
"""
Newton method for equation x - x_0 + a x^mu = 0, x >= 0
"""
res = np.empty(len(x_0_arr), dtype = np.float_)
for i in range(len(x_0_arr)):
x_0 = x_0_arr[i]
a = a_arr[i]
if x_0 <= 0:
res[i] = 0
continue
x = min(x_0, (x_0 / a) ** (1 / mu))
for it in range(max_iter):
x_next = x - f(x, x_0, a, mu) / der_f(x, x_0, a, mu)
if x_next <= 0:
x_next = 0.1 * x
x = x_next
if np.abs(f(x, x_0, a, mu)) < tol:
break
res[i] = x
return res
@njit
def f(x, x_0, a, mu):
return x - x_0 + a * x ** mu
@njit
def der_f(x, x_0, a, mu):
return 1.0 + a * mu * x ** (mu - 1)
class HOracle(BaseOracle):
def __init__(self, freeflowtimes, capacities, rho = 10.0, mu = 0.25):
self.links_number = len(freeflowtimes)
self.rho = rho
self.mu = mu
self.freeflowtimes = np.copy(freeflowtimes)
self.capacities = np.copy(capacities)
self.time = 0
def func(self, t_parameter):
"""
Computes value of the function h(times) = \sum_i sigma^*(times[i])
"""
if self.mu == 0:
h_func = np.dot(self.capacities, np.maximum(t_parameter - self.freeflowtimes,0))
else:
h_func = np.sum(self.capacities * (t_parameter - self.freeflowtimes) *
(np.maximum(t_parameter - self.freeflowtimes, 0.0) /
(self.rho * self.freeflowtimes)) ** self.mu) / (1.0 + self.mu)
return h_func
def conjugate_func(self, flows):
"""
Computes the conjugate of the function h(t):
h^*(flows) = \sum_i sigma(flows[i]), since h(t) is a separable function
"""
if self.mu == 0:
return np.dot(self.freeflowtimes, flows)
else:
return np.dot(self.freeflowtimes * flows,
self.rho * self.mu / (1.0 + self.mu) *
(flows / self.capacities) ** (1.0 / self.mu) + 1.0)
def grad(self, t_parameter):
if self.mu == 0:
h_grad = self.capacities
else:
h_grad = self.capacities * (np.maximum(t_parameter - self.freeflowtimes, 0.0) /
(self.rho * self.freeflowtimes)) ** self.mu
return h_grad
def prox(self, grad, point, A):
"""
Computes argmin_{t: t \in Q} <g, t> + A / 2 * ||t - p||^2 + h(t)
where Q - the feasible set {t: t >= free_flow_times},
A - constant, g - (sub)gradient vector, p - point at which prox is calculated
"""
#rewrite as A/2 ||t - p_new||^2 + h(t)
point_new = point - grad / A
if self.mu == 0:
return np.maximum(point_new - self.capacities / A, self.freeflowtimes)
elif self.mu == 1:
pass
elif self.mu == 0.5:
pass
elif self.mu == 0.25:
pass
#rewrite as x - x_0 + a x^mu = 0, x >= 0
#where x = (t - bar{t})/(bar{t} * rho), x_0 = (p_new - bar{t})/(bar{t} * rho),
# a = bar{f} / (A * bar{t} * rho)
x = newton(x_0_arr = (point_new - self.freeflowtimes) / (self.rho * self.freeflowtimes),
a_arr = self.capacities / (A * self.rho * self.freeflowtimes),
mu = self.mu)
argmin = (1 + self.rho * x) * self.freeflowtimes
return argmin
"""
def pickle_func(oracle, args):
return oracle._process_func(*args)
def _process_func(self, source, graph,
source_correspondences, operation, t_parameter):
automatic_oracle = AutomaticOracle(source, graph, source_correspondences)
if operation == 'func':
res = automatic_oracle.func(t_parameter)
if operation == 'grad':
res = automatic_oracle.grad(t_parameter)
return res
def func(self, t_parameter):
if self.t_current is None or np.any(self.t_current != t_parameter):
self.t_current = t_parameter
#pool = mp.Pool(processes = self.processes_number)
results = []
for key, value in self.correspondences.iteritems():
#results.append(pool.apply_async(pickle_func, args=(self, (key, self.graph, value, 'func', t_parameter))))
results.append(pickle_func(self, (key, self.graph, value, 'func', t_parameter)))
#results = np.array([p.get() for p in results])
results = np.array(results)
self.func_current = np.sum(results)
pool.close()
return self.func_current
def grad(self, t_parameter):
if self.t_current is None or np.any(self.t_current != t_parameter):
pool = mp.Pool(processes = self.processes_number)
results = []
for key, value in self.correspondences.iteritems():
#results.append(pool.apply_async(pickle_func, args=(self, (key, self.graph, value, 'grad', t_parameter))))
results.append(pickle_func(self, (key, self.graph, value, 'grad', t_parameter)))
#results = np.array([p.get() for p in results])
results = np.array(results)
self.grad_current = np.sum(results, axis = 0)
pool.close()
return self.grad_current
"""
| 16,548
| 37.575758
| 122
|
py
|
TransportNet
|
TransportNet-master/Stochastic Nash-Wardrop equilibrium/universal_similar_triangles_method.py
|
from math import sqrt
import numpy as np
from history import History
def universal_similar_triangles_method(oracle, prox, primal_dual_oracle,
t_start, L_init = None, max_iter = 1000,
eps = 1e-5, eps_abs = None, stop_crit = 'dual_gap_rel',
verbose_step = 100, verbose = False, save_history = False):
if stop_crit == 'dual_gap_rel':
def crit():
return duality_gap <= eps * duality_gap_init
elif stop_crit == 'dual_gap':
def crit():
return duality_gap <= eps_abs
elif stop_crit == 'max_iter':
def crit():
return it_counter == max_iter
elif callable(stop_crit):
crit = stop_crit
else:
raise ValueError("stop_crit should be callable or one of the following names: \
'dual_gap', 'dual_gap_rel', 'max iter'")
L_value = L_init if L_init is not None else np.linalg.norm(oracle.grad(t_start))
A_prev = 0.0
y_start = u_prev = t_prev = np.copy(t_start)
A = u = t = y = None
grad_sum = None
grad_sum_prev = np.zeros(len(t_start))
flows_weighted = primal_dual_oracle.get_flows(y_start)
primal, dual, duality_gap_init, state_msg = primal_dual_oracle(flows_weighted, y_start)
if save_history:
history = History('iter', 'primal_func', 'dual_func', 'dual_gap', 'inner_iters')
history.update(0, primal, dual, duality_gap_init, 0)
if verbose:
print(state_msg)
if eps_abs is None:
eps_abs = eps * duality_gap_init
success = False
inner_iters_num = 0
for it_counter in range(1, max_iter+1):
while True:
inner_iters_num += 1
alpha = 0.5 / L_value + sqrt(0.25 / L_value**2 + A_prev / L_value)
A = A_prev + alpha
y = (alpha * u_prev + A_prev * t_prev) / A
grad_y = oracle.grad(y)
flows = primal_dual_oracle.get_flows(y) #grad() is called here
grad_sum = grad_sum_prev + alpha * grad_y
u = prox(grad_sum / A, y_start, 1.0 / A)
t = (alpha * u + A_prev * t_prev) / A
left_value = (oracle.func(y) + np.dot(grad_y, t - y) +
0.5 * alpha / A * eps_abs) - oracle.func(t)
right_value = - 0.5 * L_value * np.sum((t - y)**2)
if left_value >= right_value:
break
else:
L_value *= 2
A_prev = A
L_value /= 2
t_prev = t
u_prev = u
grad_sum_prev = grad_sum
flows_weighted = (flows_weighted * (A - alpha) + flows * alpha) / A
primal, dual, duality_gap, state_msg = primal_dual_oracle(flows_weighted, t, alpha)
if save_history:
history.update(it_counter, primal, dual, duality_gap, inner_iters_num)
if verbose and (it_counter % verbose_step == 0):
print('\nIterations number: {:d}'.format(it_counter))
print('Inner iterations number: {:d}'.format(inner_iters_num))
print(state_msg, flush = True)
if crit():
success = True
break
result = {'times': t, 'flows': flows_weighted,
'iter_num': it_counter,
'res_msg': 'success' if success else 'iterations number exceeded'}
if save_history:
result['history'] = history.dict
if verbose:
print('\nResult: ' + result['res_msg'])
print('Total iters: ' + str(it_counter))
print(state_msg)
print('Oracle elapsed time: {:.0f} sec'.format(oracle.time))
return result
#print('Dijkstra elapsed time: {:.0f} sec'.format(oracle.auto_oracles_time))
#criteria: stable dynamic 'dual_threshold' AND 'primal_threshold', 'dual_rel' AND 'primal_rel'.
#beckman : + 'dual_gap_rel', 'dual_gap_threshold', 'primal_threshold', 'primal_rel'
#criteria: 'star_solution_residual',
#practice: 'dual_rel'
# if crit_name == 'dual_gap_rel':
# def crit():
# nonlocal duality_gap, duality_gap_init, eps
# return duality_gap < eps * duality_gap_init
# if crit_name == 'dual_rel':
# def crit():
# nonlocal dual_func_history, eps
# l = len(dual_func_history)
# return dual_func_history[l // 2] - dual_func_history[-1] \
# < eps * (dual_func_history[0] - dual_func_history[-1])
# if crit_name == 'primal_rel':
# def crit():
# nonlocal primal_func_history, eps
# l = len(primal_func_history)
# return primal_func_history[l // 2] - primal_func_history[-1] \
# < eps * (primal_func_history[0] - primal_func_history[-1])
| 4,846
| 37.776
| 98
|
py
|
TransportNet
|
TransportNet-master/Stochastic Nash-Wardrop equilibrium/transport_graph.py
|
# Attention: as shown on the table above
# nodes indexed from 1 to ...
# edges indexed from 0 to ...
#import networkx as nx
import numpy as np
import copy
import scipy.sparse as sp
import math
from numba.experimental import jitclass
from numba import int64, float64
spec = [
('_nodes_number', int64),
('_links_number', int64),
('_max_path_length', int64),
('_capacities', float64[:]),
('_free_flow_times', float64[:]),
('_sources', int64[:]),
('_targets', int64[:]),
('_in_pointers', int64[:]),
('_in_edges_array', int64[:]),
('_pred', int64[:]),
('_out_pointers', int64[:]),
('_out_edges_array', int64[:]),
('_succ', int64[:]),
]
@jitclass(spec)
class JitTransportGraph:
def __init__(self, nodes_number, links_number, max_path_length,
sources, targets, capacities, free_flow_times,
in_pointers, in_edges_array, pred,
out_pointers, out_edges_array, succ):
self._nodes_number = nodes_number
self._links_number = links_number
self._max_path_length = max_path_length
self._capacities = capacities
self._free_flow_times = free_flow_times
self._sources = sources
self._targets = targets
self._in_pointers = in_pointers
self._in_edges_array = in_edges_array
self._pred = pred
self._out_pointers = out_pointers
self._out_edges_array = out_edges_array
self._succ = succ
@property
def nodes_number(self):
return self._nodes_number
@property
def links_number(self):
return self._links_number
@property
def max_path_length(self):
return self._max_path_length
@property
def capacities(self):
#return np.array(self.graph_table[['Capacity']]).flatten()
return self._capacities
@property
def free_flow_times(self):
#return np.array(self.graph_table[['Free Flow Time']]).flatten()
return self._free_flow_times
def successors(self, node_index):
#return list(self.transport_graph.successors(vertex))
return self._succ[self._out_pointers[node_index] : self._out_pointers[node_index + 1]]
def predecessors(self, node_index):
#return list(self.transport_graph.predecessors(vertex))
return self._pred[self._in_pointers[node_index] : self._in_pointers[node_index + 1]]
def in_edges(self, node_index):
#return self._edges_indices(self.transport_graph.in_edges(vertex, data = True))
return self._in_edges_array[self._in_pointers[node_index] : self._in_pointers[node_index + 1]]
def out_edges(self, node_index):
#return self._edges_indices(self.transport_graph.out_edges(vertex, data = True))
return self._out_edges_array[self._out_pointers[node_index] : self._out_pointers[node_index + 1]]
def source_of_edge(self, edge_index):
#return self.graph_table.get_value(edge_index, 0, takeable=True)
return self._sources[edge_index]
def target_of_edge(self, edge_index):
#return self.graph_table.get_value(edge_index, 1, takeable=True)
return self._targets[edge_index]
class TransportGraph:
def __init__(self, graph_table, nodes_number, links_number, maxpath_const = 3):
max_path_length = maxpath_const * int(math.sqrt(links_number))
#define data for edge properties
capacities = np.array(graph_table[['capacity']], dtype = 'float64').flatten()
free_flow_times = np.array(graph_table[['free_flow_time']], dtype = 'float64').flatten()
sources = np.zeros(links_number, dtype = 'int64')
targets = np.zeros(links_number, dtype = 'int64')
in_incident_matrix = sp.lil_matrix((nodes_number, links_number), dtype = 'int64')
out_incident_matrix = sp.lil_matrix((nodes_number, links_number), dtype = 'int64')
self._nodes_indices = {}
index = 0
for edge, row in enumerate(graph_table[['init_node', 'term_node']].itertuples()):
if row[1] not in self._nodes_indices:
self._nodes_indices[row[1]] = index
index += 1
source = self._nodes_indices[row[1]]
sources[edge] = source
out_incident_matrix[source, edge] = 1
if row[2] not in self._nodes_indices:
self._nodes_indices[row[2]] = index
index += 1
target = self._nodes_indices[row[2]]
targets[edge] = target
in_incident_matrix[target, edge] = 1
in_incident_matrix = in_incident_matrix.tocsr()
in_pointers = np.array(in_incident_matrix.indptr, dtype= 'int64')
in_edges_array = np.array(in_incident_matrix.indices, dtype= 'int64')
pred = sources[in_edges_array]
out_incident_matrix = out_incident_matrix.tocsr()
out_pointers = np.array(out_incident_matrix.indptr, dtype= 'int64')
out_edges_array = np.array(out_incident_matrix.indices, dtype= 'int64')
succ = targets[out_edges_array]
self._jit_graph = JitTransportGraph(nodes_number, links_number, max_path_length,
sources, targets, capacities, free_flow_times,
in_pointers, in_edges_array, pred,
out_pointers, out_edges_array, succ)
@property
def jit_graph(self):
return self._jit_graph
@property
def nodes_number(self):
return self._jit_graph.nodes_number
@property
def links_number(self):
return self._jit_graph.links_number
@property
def max_path_length(self):
return self._jit_graph.max_path_length
@property
def capacities(self):
#return np.array(self.graph_table[['Capacity']]).flatten()
return self._jit_graph.capacities
@property
def free_flow_times(self):
#return np.array(self.graph_table[['Free Flow Time']]).flatten()
return self._jit_graph.free_flow_times
def successors(self, node_index):
#return list(self.transport_graph.successors(vertex))
return self._jit_graph.successors(node_index)
def predecessors(self, node_index):
#return list(self.transport_graph.predecessors(vertex))
return self._jit_graph.predecessors(node_index)
#TODO: this func should be removed, nodes are already indexed in model.py:
def get_nodes_indices(self, nodes):
return [self._nodes_indices[node] for node in nodes]
#TODO: this func should be removed, nodes are already indexed in model.py:
def get_node_index(self, node):
return self._nodes_indices[node]
def in_edges(self, node_index):
#return self._edges_indices(self.transport_graph.in_edges(vertex, data = True))
return self._jit_graph.in_edges(node_index)
def out_edges(self, node_index):
#return self._edges_indices(self.transport_graph.out_edges(vertex, data = True))
return self._jit_graph.out_edges(node_index)
def source_of_edge(self, edge_index):
#return self.graph_table.get_value(edge_index, 0, takeable=True)
return self._jit_graph.source_of_edge(edge_index)
def target_of_edge(self, edge_index):
#return self.graph_table.get_value(edge_index, 1, takeable=True)
return self._jit_graph.target_of_edge(edge_index)
| 7,589
| 36.95
| 105
|
py
|
TransportNet
|
TransportNet-master/Stochastic Nash-Wardrop equilibrium/data_handler.py
|
from scanf import scanf
import re
import numpy as np
import pandas as pd
#TODO: DOCUMENTATION!!!
class DataHandler:
def GetGraphData(self, file_name, columns):
graph_data = {}
metadata = ''
with open(file_name, 'r') as myfile:
for index, line in enumerate(myfile):
if re.search(r'^~', line) is not None:
skip_lines = index + 1
headlist = re.findall(r'[\w]+', line)
break
else:
metadata += line
graph_data['nodes number'] = scanf('<NUMBER OF NODES> %d', metadata)[0]
graph_data['links number'] = scanf('<NUMBER OF LINKS> %d', metadata)[0]
graph_data['zones number'] = scanf('<NUMBER OF ZONES> %d', metadata)[0]
first_thru_node = scanf('<FIRST THRU NODE> %d', metadata)[0]
dtypes = {'init_node' : np.int32, 'term_node' : np.int32, 'capacity' : np.float64, 'length': np.float64,
'free_flow_time': np.float64, 'b': np.float64, 'power': np.float64, 'speed': np.float64,'toll': np.float64,
'link_type' : np.int32}
df = pd.read_csv(file_name, names = headlist, dtype = dtypes, skiprows = skip_lines, sep = r'[\s;]+', engine='python',
index_col = False)
df = df[columns]
df.insert(loc = list(df).index('init_node') + 1, column = 'init_node_thru', value = (df['init_node'] >= first_thru_node))
df.insert(loc = list(df).index('term_node') + 1, column = 'term_node_thru', value = (df['term_node'] >= first_thru_node))
graph_data['graph_table'] = df
return graph_data
def GetGraphCorrespondences(self, file_name):
with open(file_name, 'r') as myfile:
trips_data = myfile.read()
total_od_flow = scanf('<TOTAL OD FLOW> %f', trips_data)[0]
#zones_number = scanf('<NUMBER OF ZONES> %d', trips_data)[0]
origins_data = re.findall(r'Origin[\s\d.:;]+', trips_data)
graph_correspondences = {}
for data in origins_data:
origin_index = scanf('Origin %d', data)[0]
origin_correspondences = re.findall(r'[\d]+\s+:[\d.\s]+;', data)
targets = []
corrs_vals = []
for line in origin_correspondences:
target, corrs = scanf('%d : %f', line)
targets.append(target)
corrs_vals.append(corrs)
graph_correspondences[origin_index] = {'targets' : targets, 'corrs' : corrs_vals}
return graph_correspondences, total_od_flow
def ReadAnswer(self, filename):
with open(filename) as myfile:
lines = myfile.readlines()
lines = lines[1 :]
flows = []
times = []
for line in lines:
_, _, flow, time = scanf('%d %d %f %f', line)
flows.append(flow)
times.append(time)
return {'flows' : flows, 'times' : times}
| 3,029
| 40.506849
| 129
|
py
|
TransportNet
|
TransportNet-master/Stochastic Nash-Wardrop equilibrium/history.py
|
class History():
"""
history handler
"""
def __init__(self, *attributes):
self.dict = {}
self.attributes = list(attributes)
for attribute in self.attributes:
self.dict[attribute] = []
def update(self, *values):
for index, value in enumerate(values):
self.dict[self.attributes[index]].append(value)
def get(self, attribute):
return self.dict[attribute]
| 461
| 27.875
| 59
|
py
|
TransportNet
|
TransportNet-master/Stochastic Nash-Wardrop equilibrium/universal_gradient_descent_method.py
|
import numpy as np
from history import History
def universal_gradient_descent_method(oracle, prox, primal_dual_oracle,
t_start, L_init = None, max_iter = 1000,
eps = 1e-5, eps_abs = None, stop_crit = 'dual_gap_rel',
verbose_step = 100, verbose = False, save_history = False):
if stop_crit == 'dual_gap_rel':
def crit():
return duality_gap <= eps * duality_gap_init
elif stop_crit == 'dual_gap':
def crit():
return duality_gap <= eps_abs
elif stop_crit == 'max_iter':
def crit():
return it_counter == max_iter
elif callable(stop_crit):
crit = stop_crit
else:
raise ValueError("stop_crit should be callable or one of the following names: \
'dual_gap', 'dual_gap_rel', 'max iter'")
L_value = L_init if L_init is not None else np.linalg.norm(oracle.grad(t_start))
A = 0.0
t_prev = np.copy(t_start)
t = None
flows_weighted = primal_dual_oracle.get_flows(t_start)
t_weighted = np.copy(t_start)
primal, dual, duality_gap_init, state_msg = primal_dual_oracle(flows_weighted, t_weighted)
if save_history:
history = History('iter', 'primal_func', 'dual_func', 'dual_gap', 'inner_iters')
history.update(0, primal, dual, duality_gap_init, 0)
if verbose:
print(state_msg)
if eps_abs is None:
eps_abs = eps * duality_gap_init
success = False
inner_iters_num = 0
for it_counter in range(1, max_iter+1):
while True:
inner_iters_num += 1
alpha = 1 / L_value
grad_t = oracle.grad(t_prev)
flows = primal_dual_oracle.get_flows(t_prev) #grad() is called here
t = prox(grad_t, t_prev, 1.0 / alpha)
left_value = (oracle.func(t_prev) + np.dot(grad_t, t - t_prev) +
0.5 * eps_abs) - oracle.func(t)
right_value = - 0.5 * L_value * np.sum((t - t_prev)**2)
if left_value >= right_value:
break
else:
L_value *= 2
L_value /= 2
t_prev = t
A += alpha
t_weighted = (t_weighted * (A - alpha) + t * alpha) / A
flows_weighted = (flows_weighted * (A - alpha) + flows * alpha ) / A
primal, dual, duality_gap, state_msg = primal_dual_oracle(flows_weighted, t_weighted, alpha)
if save_history:
history.update(it_counter, primal, dual, duality_gap, inner_iters_num)
if verbose and (it_counter % verbose_step == 0):
print('\nIterations number: {:d}'.format(it_counter))
print('Inner iterations number: {:d}'.format(inner_iters_num))
print(state_msg, flush = True)
if crit():
success = True
break
result = {'times': t_weighted, 'flows': flows_weighted,
'iter_num': it_counter,
'res_msg': 'success' if success else 'iterations number exceeded'}
if save_history:
result['history'] = history.dict
if verbose:
print('\nResult: ' + result['res_msg'])
print('Total iters: ' + str(it_counter))
print(state_msg)
print('Oracle elapsed time: {:.0f} sec'.format(oracle.time))
return result
| 3,442
| 38.125
| 100
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/setup.py
|
from setuptools import setup, find_packages
setup(
name='pareto',
version='0.1',
packages=find_packages(),
zip_safe=False,
install_requires=[
'numpy',
'scipy',
'torch',
'torchvision',
'tqdm',
],
)
| 262
| 15.4375
| 43
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/utils.py
|
from typing import Iterable
from itertools import product
from termcolor import colored
import numpy as np
class TopTrace(object):
def __init__(
self,
num_objs: int,
*,
indent_size: int = 4,
):
self.tops = [[] for _ in range(num_objs)]
self.msgs = [[] for _ in range(num_objs)]
self.indent_size = indent_size
def print(
self,
new_tops: Iterable[float],
*,
show: bool = True,
):
for new_top, top, msg in zip(new_tops, self.tops, self.msgs):
new_top_msg = f'{new_top * 100.0:.2f}%'
if top:
new_top_msg = colored(new_top_msg, 'green' if new_top >= top[-1] else 'red')
delta = '\u0394=' + colored(f'{(new_top - top[-1]) * 100.0:.2f}%', 'green' if new_top >= top[-1] else 'red')
abs_delta = 'abs\u0394=' + colored(f'{(new_top - top[0]) * 100.0:.2f}%', 'green' if new_top >= top[0] else 'red')
top.append(new_top)
msg.append(new_top_msg)
if show:
print(' ' * self.indent_size + ' '.join(msg + [delta, abs_delta]))
print(flush=True)
def evenly_dist_weights(num_weights, dim):
return [ret for ret in product(
np.linspace(0.0, 1.0, num_weights), repeat=dim) if round(sum(ret), 3) == 1.0 and all(r not in (0.0, 1.0) for r in ret)]
if __name__ == "__main__":
print(evenly_dist_weights(7, 2))
| 1,500
| 30.270833
| 129
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/metrics.py
|
from typing import Iterable
from torch import Tensor
__all__ = ['topk_accuracies', 'topk_accuracy']
def topk_accuracies(
output: Tensor,
label: Tensor,
ks: Iterable[int] = (1,),
):
assert output.dim() == 2
assert label.dim() == 1
assert output.size(0) == label.size(0)
maxk = max(ks)
_, pred = output.topk(maxk, dim=1, largest=True, sorted=True)
label = label.unsqueeze(1).expand_as(pred)
correct = pred.eq(label).float()
accu_list = []
for k in ks:
accu = correct[:, :k].sum(1).mean()
accu_list.append(accu.item())
return accu_list
def topk_accuracy(
output: Tensor,
label: Tensor,
k: int = 1,
):
return topk_accuracies(output, label, (k,))[0]
| 771
| 19.864865
| 65
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/__init__.py
|
from . import optim
from . import metrics
from . import networks
from . import datasets
from . import utils
| 108
| 17.166667
| 22
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/networks/multi_lenet.py
|
from typing import Tuple, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class MultiLeNet(nn.Module):
def __init__(self) -> None:
super(MultiLeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, (5, 5))
self.conv2 = nn.Conv2d(10, 20, (5, 5))
self.fc1 = nn.Linear(20 * 4 * 4, 50)
self.fc3_1 = nn.Linear(50, 10)
self.fc3_2 = nn.Linear(50, 10)
def shared_parameters(self) -> List[Tensor]:
return [p for n, p in self.named_parameters() if not n.startswith('fc3')]
def forward(
self,
x: Tensor,
) -> Tuple[Tensor, Tensor]:
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = (self.fc3_1(x), self.fc3_2(x))
return x
| 927
| 27.121212
| 81
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/networks/__init__.py
|
from .multi_lenet import MultiLeNet
| 36
| 17.5
| 35
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/optim/hvp_solver.py
|
from functools import partial
from typing import Tuple, List, Iterable, Callable
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.utils import parameters_to_vector
__all__ = ['HVPSolver', 'AutogradHVPSolver', 'VisionHVPSolver']
class HVPSolver(object):
"""
Hessian-Vector product calculation
network: PyTorch network to compute hessian for
parameters: parameters which are computed hessian w.r.t.
dataloader: PyTorch dataloader that we get examples from to compute grads
device: gpu/cpu device
"""
def __init__(
self,
network: nn.Module,
parameters: Iterable[Tensor],
device: torch.device,
dataloader: torch.utils.data.DataLoader,
) -> None:
self.parameters = list(parameters)
self.size = int(sum(p.numel() for p in self.parameters))
self.network = network
self.device = device
self.dataloader = dataloader
# Make a copy since we will go over it a bunch
self.dataiter = iter(dataloader) if dataloader else None
self.apply = self.apply_batch
self.grad = self.grad_batch
def close(self) -> None:
try:
while True:
_ = next(self.dataiter)
except StopIteration:
pass
self.dataiter = None
self.dataloader = None
def set_hess(
self,
*,
batch: bool = True,
num_batches: int = None,
) -> None:
self.apply = self.apply_batch if batch else partial(self.apply_full, num_batches=num_batches)
def set_grad(
self,
*,
batch: bool = True,
num_batches: int = None,
) -> None:
self.grad = self.grad_batch if batch else partial(self.grad_full, num_batches=num_batches)
@torch.enable_grad()
def apply_batch(
self,
vec: Tensor,
weights: Tensor = None,
*,
grads: Tensor = None,
retain_graph: bool = True,
) -> Tuple[Tensor, Tensor]:
"""
Returns H * vec where H is the hessian of the loss w.r.t.
the vectorized model parameters
"""
raise NotImplementedError
@torch.enable_grad()
def apply_full(
self,
vec: Tensor,
weights: Tensor = None,
*,
grads: Tensor = None,
num_batches: int = None,
retain_graph: bool = False,
) -> Tensor:
apply_batch = self.apply_batch
num_batches = len(self.dataloader) if num_batches is None else num_batches
weighted_hvp = None
for _ in range(num_batches):
weighted_hvp_batch, _ = apply_batch(
vec, weights, grads=grads, retain_graph=retain_graph)
if weighted_hvp is None:
weighted_hvp = weighted_hvp_batch
else:
weighted_hvp.add_(weighted_hvp_batch)
weighted_hvp.div_(num_batches)
return weighted_hvp
def zero_grad(self) -> None:
"""
Zeros out the gradient info for each parameter in the model
"""
for p in self.parameters:
if p.grad is not None:
p.grad.data.zero_()
def set_data(
self,
dataloader: torch.utils.data.DataLoader,
) -> None:
self.dataloader = dataloader
self.dataiter = iter(dataloader)
@torch.enable_grad()
def get_losses(self) -> List[Tensor]:
raise NotImplementedError
@torch.enable_grad()
def grad_batch(
self,
*,
create_graph: bool = True,
) -> Tuple[Tensor, List[Tensor]]:
parameters = self.parameters
losses = self.get_losses()
param_grads = [list(torch.autograd.grad(
loss, parameters,
allow_unused=True, retain_graph=True, create_graph=create_graph)) for loss in losses]
for param_grad in param_grads:
for i, (param_grad_module, param) in enumerate(zip(param_grad, parameters)):
if param_grad_module is None:
param_grad[i] = torch.zeros_like(param)
grads = torch.stack([parameters_to_vector(param_grad) for param_grad in param_grads], dim=0)
return grads, losses
@torch.enable_grad()
def grad_full(
self,
*,
create_graph: bool = False,
num_batches: int = None,
) -> Tensor:
grad_batch = self.grad_batch
num_batches = len(self.dataloader) if num_batches is None else num_batches
grads = None
for _ in range(num_batches):
grads_batch, _ = grad_batch(create_graph=create_graph)
if grads is None:
grads = grads_batch
else:
grads.add_(grads_batch)
grads.div_(num_batches)
grads = grads.clone().detach()
return grads
class AutogradHVPSolver(HVPSolver):
"""
Use PyTorch autograd for Hessian-Vector product calculation
"""
def get_losses(self) -> List[Tensor]:
raise NotImplementedError
@torch.enable_grad()
def apply_batch(
self,
vec: Tensor,
weights: Tensor = None,
*,
grads: Tensor = None,
retain_graph: bool = True,
) -> Tuple[Tensor, Tensor]:
"""
Returns H * vec where H is the hessian of the loss w.r.t.
the vectorized model parameters
"""
if grads is None:
# compute original gradient, tracking computation graph
self.zero_grad()
grads, _ = self.grad_batch(create_graph=True)
self.zero_grad()
if weights is None:
weighted_grad = grads.sum(dim=0)
else:
weighted_grad = torch.matmul(weights, grads)
dot = vec.dot(weighted_grad)
param_weighted_hvp = torch.autograd.grad(dot, self.parameters, retain_graph=retain_graph)
# concatenate the results over the different components of the network
weighted_hvp = parameters_to_vector([p.contiguous() for p in param_weighted_hvp])
return weighted_hvp, grads
class VisionHVPSolver(AutogradHVPSolver):
def __init__(
self,
network: nn.Module,
device: torch.device,
dataloader: torch.utils.data.DataLoader,
closures: List[Callable],
*,
shared: bool = False,
) -> None:
parameters = network.shared_parameters() if shared else network.parameters()
super(VisionHVPSolver, self).__init__(network, parameters, device, dataloader)
self.closures = closures
@torch.enable_grad()
def get_losses(self) -> List[Tensor]:
try:
inputs, targets = next(self.dataiter)
except StopIteration:
self.dataiter = iter(self.dataloader)
inputs, targets = next(self.dataiter)
inputs = inputs.to(self.device)
if isinstance(targets, list):
targets = [target.to(self.device) for target in targets]
else:
targets = targets.to(self.device)
logits = self.network(inputs)
return [c(self.network, logits, targets) for c in self.closures]
| 7,426
| 26.712687
| 101
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/optim/linalg_solver.py
|
from contextlib import contextmanager
from functools import partial
from typing import Tuple
import numpy as np
from scipy.sparse.linalg import LinearOperator, minres
import torch
import torch.nn as nn
from torch import Tensor
from .hvp_solver import HVPSolver
__all__ = ['PDError', 'HVPLinearOperator', 'KrylovSolver', 'MINRESSolver', 'CGSolver']
class PDError(RuntimeError):
pass
class HVPLinearOperator(LinearOperator):
def __init__(
self,
network: nn.Module,
hvp_solver: HVPSolver,
device: torch.device,
damping: float,
) -> None:
shape = (hvp_solver.size, hvp_solver.size)
dtype = list(network.parameters())[0].detach().cpu().numpy().dtype
super(HVPLinearOperator, self).__init__(dtype, shape)
self.network = network
self.hvp_solver = hvp_solver
self.device = device
self.damping = damping
self.jacobians = None
self.alphas = None
self.reset_parameters()
self.hvp_counter = 0
self.matvec_counter = 0
self.reset_counters()
def set_parameters(
self,
jacobians: Tensor,
alphas: Tensor,
) -> None:
self.jacobians = jacobians
self.alphas = alphas
def reset_parameters(self) -> None:
self.jacobians = None
self.alphas = None
def reset_counters(self) -> None:
self.hvp_counter = 0
self.matvec_counter = 0
def get_counters(self) -> Tuple[int, int]:
return self.hvp_counter, self.matvec_counter
def _matvec_tensor(
self,
tensor: Tensor,
) -> Tensor:
alphas_hvps, _ = self.hvp_solver.apply(
tensor, self.alphas, grads=self.jacobians, retain_graph=self.jacobians is not None) # (N,)
if self.damping > 0.0:
alphas_hvps.add_(tensor, alpha=self.damping)
self.hvp_counter += 1
self.matvec_counter += 1
return alphas_hvps
def _matvec(
self,
x: np.ndarray,
) -> np.ndarray:
"""HVP matrix-vector multiplication handler.
If self is a linear operator of shape (N, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (N,) or (N, 1) ndarray.
In our case, it computes alpha_hession @ x.
"""
tensor = torch.as_tensor(x.astype(self.dtype), device=self.device)
ret = self._matvec_tensor(tensor)
return ret.detach().cpu().numpy()
class KrylovSolver(object):
def solve(
self,
lazy_jacobians: Tensor,
jacobians: Tensor,
alphas: Tensor,
rhs: Tensor,
*,
verbose: bool = False,
) -> Tuple[Tensor, Tuple[int, int]]:
raise NotImplementedError
class MINRESSolver(KrylovSolver):
def __init__(
self,
network: nn.Module,
hvp_solver: HVPSolver,
device: torch.device,
shift: float,
tol: float,
damping: float,
maxiter: int,
) -> None:
self.device = device
self.linear_operator = HVPLinearOperator(network, hvp_solver, device, damping)
self.minres = partial(minres, shift=shift, tol=tol, maxiter=maxiter)
self.shape = self.linear_operator.shape
self.dtype = self.linear_operator.dtype
@contextmanager
def solve(
self,
lazy_jacobians: Tensor,
jacobians: Tensor,
alphas: Tensor,
rhs: Tensor,
*,
verbose: bool = False,
) -> Tuple[Tensor, Tuple[int, int]]:
"""Control counters automatically.
Parameters
----------
lazy_jacobians : torch.Tensor or None
If not None, it is for gradient reusing. A matrix with shape (M,N).
jacobians : torch.Tensor
A matrix with shape (M,N). It should be identical to `rhs` and
`lazy_jacobians` in this case (if `lazy_jacobians` is not None).
alphas: torch.Tensor
An array with shape (M,).
rhs: torch.Tensor
A matrix with shape (N,).
"""
try:
self.linear_operator.set_parameters(lazy_jacobians, alphas)
x0 = jacobians.mean(0).neg().clone().detach().cpu().numpy()
rhs = rhs.cpu().numpy()
results = self.minres(self.linear_operator, rhs, show=verbose, x0=x0)
d = torch.as_tensor(results[0].astype(self.dtype), device=self.device)
yield d, self.linear_operator.get_counters()
finally:
self.linear_operator.reset_parameters()
self.linear_operator.reset_counters()
class CGSolver(KrylovSolver):
def __init__(
self,
hvp_solver: HVPSolver,
device: torch.device,
tol: float,
damping: float,
maxiter: int,
pd_strict: bool = False,
) -> None:
self.hvp_solver = hvp_solver
self.device = device
self.tol = tol
self.damping = damping
self.maxiter = maxiter
self.pd_strict = pd_strict
self.hvp_counter = 0
self.matvec_counter = 0
self.reset_counters()
def reset_counters(self) -> None:
self.hvp_counter = 0
self.matvec_counter = 0
def cg(
self,
lazy_jacobians: Tensor,
alphas: Tensor,
rhs: Tensor,
x0: Tensor = None,
*,
verbose: bool = False,
) -> Tensor:
hvp_solver_apply = self.hvp_solver.apply
tol = self.tol
damping = self.damping
maxiter = self.maxiter
pd_strict = self.pd_strict
if x0 is None:
x0 = torch.ones_like(rhs)
x_next = x0.clone()
r = hvp_solver_apply(x0, alphas, lazy_jacobians)
r.add_(x0, alpha=damping).sub_(rhs)
p = r.neg()
r_k_norm = r.dot(r).item()
if maxiter is None:
n = len(rhs)
maxiter = 2 * n
for i in range(maxiter):
Ap = hvp_solver_apply(p, alphas, lazy_jacobians).add(p, alpha=damping)
pAp = p.dot(Ap).item()
if pAp <= 0:
if verbose:
print(i, round(pAp, 5), round(r_kplus1_norm, 5))
if pd_strict:
if x0.dot(hvp_solver_apply(x0, alphas, lazy_jacobians).add(x0, alpha=damping)) <= 0:
raise PDError
x_next.copy_(x0)
break
x0.copy_(x_next)
alpha = r_k_norm / pAp
x_next.add_(p, alpha=alpha)
r.add_(Ap, alpha=alpha)
r_kplus1_norm = r.dot(r).item()
beta = r_kplus1_norm / r_k_norm
r_k_norm = r_kplus1_norm
if verbose:
print(i, round(pAp, 5), round(r_kplus1_norm, 5))
if r_kplus1_norm < tol:
break
p = p.mul(beta).sub(r)
return x_next
def get_counters(self) -> Tuple[int, int]:
return self.hvp_counter, self.matvec_counter
@contextmanager
def solve(
self,
lazy_jacobians: Tensor,
jacobians: Tensor,
alphas: Tensor,
rhs: Tensor,
*,
verbose: bool = False,
) -> Tuple[Tensor, Tuple[int, int]]:
"""Control counters automatically.
Parameters
----------
lazy_jacobians : torch.Tensor or None
If not None, it is for gradient reusing. A matrix with shape (M,N).
jacobians : torch.Tensor
A matrix with shape (M,N). It should be identical to `rhs` and
`lazy_jacobians` in this case (if `lazy_jacobians` is not None).
alphas: torch.Tensor
An array with shape (M,).
rhs: torch.Tensor
A matrix with shape (N,).
"""
try:
x0 = jacobians.mean(0).neg().clone().detach()
d = self.cg(lazy_jacobians, alphas, rhs, x0, verbose=verbose)
yield d, self.get_counters()
finally:
self.reset_counters()
| 8,325
| 26.66113
| 104
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/optim/min_norm_solver.py
|
# This code is from
# Multi-Task Learning as Multi-Objective Optimization
# Ozan Sener, Vladlen Koltun
# Neural Information Processing Systems (NeurIPS) 2018
# https://github.com/intel-isl/MultiObjectiveOptimization
from itertools import combinations
import numpy as np
import torch
__all__ = ['find_min_norm_element']
def _min_norm_element_from2(v1v1, v1v2, v2v2):
r"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimized
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = (v2v2 - v1v2) / (v1v1 + v2v2 - 2 * v1v2)
# v2v2 - gamm * gamma * (v1 - v2)^2
# cost = v2v2 - gamma * gamma * (v1v1 + v2v2 - 2 * v1v2)
# = v2v2 - gamma * (v2v2 - v1v2)
cost = v2v2 + gamma * (v1v2 - v2v2)
return gamma, cost
def _min_norm_2d(vecs):
r"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = None
dps = vecs.matmul(vecs.t()).cpu().numpy()
for i, j in combinations(range(len(vecs)), 2):
c, d = _min_norm_element_from2(dps[i, i], dps[i, j], dps[j, j])
if dmin is None:
dmin = d
if d <= dmin:
dmin = d
sol = [(i, j), c, d]
return sol, dps
def _projection2simplex(y):
r"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0) / m
for i in range(m - 1):
tmpsum += sorted_y[i]
tmax = (tmpsum - 1) / (i + 1.0)
if tmax > sorted_y[i + 1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
def _next_point(cur_val, grad, n):
proj_grad = grad - (np.sum(grad) / n)
tm1 = -cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
t = 1
if len(tm1[tm1 > 1e-7]) > 0:
t = np.min(tm1[tm1 > 1e-7])
if len(tm2[tm2 > 1e-7]) > 0:
t = min(t, np.min(tm2[tm2 > 1e-7]))
next_point = proj_grad * t + cur_val
next_point = _projection2simplex(next_point)
return next_point
@torch.no_grad()
def find_min_norm_element(vecs, max_iter=250, stop_crit=1e-5):
r"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j;
the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
init_sol, dps = _min_norm_2d(vecs.detach())
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
while iter_count < max_iter:
grad_dir = -1.0 * np.dot(dps, sol_vec)
new_point = _next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i] * sol_vec[j] * dps[i, j]
v1v2 += sol_vec[i] * new_point[j] * dps[i, j]
v2v2 += new_point[i] * new_point[j] * dps[i, j]
nc, nd = _min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec + (1 - nc) * new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < stop_crit:
break
sol_vec = new_sol_vec
return sol_vec, nd
@torch.no_grad()
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = np.sqrt(np.sum([gr.pow(2).sum().item()
for gr in grads[t]]))
elif normalization_type == 'loss':
for t in grads:
gn[t] = losses[t]
elif normalization_type == 'loss+':
for t in grads:
gn[t] = losses[t] * \
np.sqrt(np.sum([gr.pow(2).sum().item() for gr in grads[t]]))
elif normalization_type == 'none':
for t in grads:
gn[t] = 1.0
else:
print('ERROR: Invalid Normalization Type')
return gn
| 4,953
| 29.9625
| 109
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/optim/kkt_solver.py
|
from typing import Tuple, Mapping
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from .hvp_solver import HVPSolver
from .min_norm_solver import find_min_norm_element
from .linalg_solver import KrylovSolver, MINRESSolver, CGSolver
__all__ = ['KKTSolver', 'KrylovKKTSolver', 'CGKKTSolver', 'MINRESKKTSolver']
class KKTSolver(object):
def __init__(
self,
network: nn.Module,
hvp_solver: HVPSolver,
device: torch.device,
*,
kkt_momentum: float = 0.0,
create_graph: bool = False,
grad_correction: bool = False,
) -> None:
self.network = network
self.hvp_solver = hvp_solver
self.device = device
self.kkt_momentum = kkt_momentum
self.jacobians_momentum_buffer = None
self.alphas_momentum_buffer = None
self.create_graph = create_graph
self.grad_correction = grad_correction
def zero_grad(self) -> None:
self.hvp_solver.zero_grad()
def _jacobians_alphas_rhs(
self,
weights: Tensor,
*,
verbose: bool = True,
) -> Tuple[Tensor, Tensor, Tensor]:
grad_correction = self.grad_correction
kkt_momentum = self.kkt_momentum
hvp_solver = self.hvp_solver
jacobians = hvp_solver.grad(create_graph=self.create_graph)
alphas, _ = find_min_norm_element(jacobians.detach())
alphas = jacobians.new_tensor(alphas).detach()
if verbose:
print(jacobians.norm(dim=1).detach().cpu().numpy())
if jacobians.size(0) == 2:
cosine = jacobians[0].dot(jacobians[1]).div(jacobians[0].norm(2) * jacobians[1].norm(2)).item()
angle = np.rad2deg(np.arccos(cosine))
print(f'alphas={alphas},angle={angle}')
else:
print(f'alphas={alphas}')
if grad_correction:
alphas_jacobians = alphas.view(1, -1).matmul(jacobians).view(1, -1).detach()
jacobians.sub_(alphas_jacobians)
if kkt_momentum > 0.0:
if self.alphas_momentum_buffer is None:
self.alphas_momentum_buffer = torch.clone(alphas).detach()
alphas_buf = self.alphas_momentum_buffer
alphas_buf.mul_(kkt_momentum).add_(alphas, alpha=1 - kkt_momentum)
alphas = alphas_buf
jacobians_buf = self.jacobians_momentum_buffer
jacobians_buf.mul_(kkt_momentum).add_(jacobians.detach(), alpha=1 - kkt_momentum)
jacobians = jacobians_buf
rhs = weights.view(1, -1).matmul(jacobians).view(-1)
return jacobians, alphas, rhs.clone().detach()
@torch.no_grad()
def _print_alpha_beta_cosine(
self,
jacobians: Tensor,
alphas: Tensor,
direction: Tensor
) -> None:
direction = self.hvp_solver.apply(direction, alphas)
jacobians = jacobians.neg().detach()
v1v1 = jacobians[0].dot(jacobians[0]).item()
v1v2 = jacobians[0].dot(jacobians[1]).item()
v2v2 = jacobians[1].dot(jacobians[1]).item()
xv1 = direction.dot(jacobians[0]).item()
xv2 = direction.dot(jacobians[1]).item()
# (alpha * v1 + beta * v2 - x) * v1 = 0.
# (alpha * v1 + beta * v2 - x) * v2 = 0.
# alpha * v1v1 + beta * v1v2 = xv1
# alpha * v1v2 + beta * v2v2 = xv2
# J = v1v1 * v2v2 - 2 * v1v2
# [v2v2, -v1v2] [xv1]
# [-v1v2, v1v1] [xv2]
# alpha = (v2v2 * xv1 - v1v2 * xv2) / J
# beta = (xv2 * v1v1 - xv1 * v1v2) / J
# J does not matter since we care about the cosine angle only, not the absolute difference.
alpha = xv1 * v2v2 - xv2 * v1v2
beta = xv2 * v1v1 - xv1 * v1v2
total = abs(alpha) + abs(beta)
alpha /= total
beta /= total
span = alpha * jacobians[0] + beta * jacobians[1]
cosine = np.rad2deg(np.arccos(span.div(span.norm(2)).dot(direction.div(direction.norm(2))).item()))
print(alpha, beta, cosine)
def backward(
self,
weights: Tensor,
*,
verbose: bool = False,
) -> None:
jacobians, alphas, rhs = self._jacobians_alphas_rhs(weights, verbose=verbose)
direction = self._explore(jacobians, alphas, rhs, weights, verbose=verbose)
self.apply_grad(direction, normalize=True)
def _explore(
self,
jacobians: Tensor,
alphas: Tensor,
rhs: Tensor,
weights: Tensor,
*,
verbose: bool,
) -> Tensor:
raise NotImplementedError
@torch.no_grad()
def cosine(self) -> float:
jacobians, _ = self.hvp_solver.grad_batch(create_graph=False)
cosine = jacobians[0].dot(jacobians[1]).div(jacobians[0].norm(2) * jacobians[1].norm(2)).item()
return cosine
@torch.no_grad()
def apply_grad(
self,
direction: Tensor,
*,
normalize: bool = True,
) -> None:
if normalize:
direction.div_(direction.norm())
offset = 0
for p in self.hvp_solver.parameters:
numel = p.numel()
p.grad = direction[offset:offset + numel].view_as(p.data).clone()
offset += numel
assert offset == direction.size(0)
class KrylovKKTSolver(KKTSolver):
def __init__(
self,
network: nn.Module,
hvp_solver: HVPSolver,
device: torch.device,
krylov_solver: KrylovSolver,
*,
stochastic: bool = True,
kkt_momentum: float = 0.0,
create_graph: bool = False,
grad_correction: bool = False,
) -> None:
super(KrylovKKTSolver, self).__init__(
network, hvp_solver, device,
kkt_momentum=kkt_momentum,
create_graph=create_graph,
grad_correction=grad_correction,
)
self.stochastic = stochastic
self.krylov_solver = krylov_solver
def _explore(
self,
jacobians: Tensor,
alphas: Tensor,
rhs: Tensor,
weights: Tensor,
*,
verbose: bool,
) -> Tensor:
lazy_jacobians = None if self.stochastic else self.hvp_solver.grad_batch(create_graph=True)[0]
with self.krylov_solver.solve(lazy_jacobians, jacobians, alphas, rhs, verbose=verbose) as results:
direction, _ = results
return direction
class CGKKTSolver(KrylovKKTSolver):
def __init__(
self,
network: nn.Module,
hvp_solver: HVPSolver,
device: torch.device,
*,
stochastic: bool = True,
kkt_momentum: float = 0.0,
create_graph: bool = False,
grad_correction: bool = False,
tol: float = 1e-5,
damping: float = 0.0,
maxiter: int = 5,
pd_strict: bool = True,
) -> None:
krylov_solver = CGSolver(hvp_solver, device, tol, damping, maxiter, pd_strict)
super(CGKKTSolver, self).__init__(
network, hvp_solver, device, krylov_solver,
stochastic=stochastic,
kkt_momentum=kkt_momentum,
create_graph=create_graph,
grad_correction=grad_correction,
)
class MINRESKKTSolver(KrylovKKTSolver):
def __init__(
self,
network: nn.Module,
hvp_solver: HVPSolver,
device: torch.device,
*,
stochastic: bool = True,
kkt_momentum: float = 0.0,
create_graph: bool = False,
grad_correction: bool = False,
shift: float = 0.0,
tol: float = 1e-5,
damping: float = 0.0,
maxiter: int = 50,
) -> None:
krylov_solver = MINRESSolver(network, hvp_solver, device, shift, tol, damping, maxiter)
super(MINRESKKTSolver, self).__init__(
network, hvp_solver, device, krylov_solver,
stochastic=stochastic,
kkt_momentum=kkt_momentum,
create_graph=create_graph,
grad_correction=grad_correction,
)
| 8,382
| 29.483636
| 111
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/optim/__init__.py
|
from .hvp_solver import VisionHVPSolver
from .kkt_solver import CGKKTSolver, MINRESKKTSolver
from .min_norm_solver import find_min_norm_element
| 144
| 35.25
| 52
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.