index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
38,474
|
MayfeelYang/angr
|
refs/heads/master
|
/angr/surveyor.py
|
#!/usr/bin/env python
import multiprocessing
#import concurrent.futures
import logging
import weakref
import functools
l = logging.getLogger("angr.surveyor")
#
# Surveyor debugging
#
STOP_RUNS = False
PAUSE_RUNS = False
def enable_singlestep():
global PAUSE_RUNS
PAUSE_RUNS = True
def disable_singlestep():
global PAUSE_RUNS
PAUSE_RUNS = False
def stop_analyses():
global STOP_RUNS
STOP_RUNS = True
def resume_analyses():
global STOP_RUNS
STOP_RUNS = False
import signal
def handler(signum, frame): # pylint: disable=W0613,
if signum == signal.SIGUSR1:
stop_analyses()
elif signum == signal.SIGUSR2:
enable_singlestep()
try:
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGUSR2, handler)
except AttributeError:
l.warning("Platform doesn't support SIGUSR")
# function that produces unpredictable results that should appease pylint's
# static analysis and stop giving us those awful errors!!!!
def dummy_func(*args, **kwargs):
return args + list(kwargs)
#
# Surveyor list
#
class Surveyors(object):
def __init__(self, project):
self._project = project
self.started = [ ]
self.Explorer = dummy_func
self.Caller = dummy_func
self.Escaper = dummy_func
for surveyor_name,surveyor in all_surveyors.items():
setattr(self, surveyor_name, functools.partial(self._start_surveyor, surveyor))
def _surveyor_finished(self, proxy):
self.started.remove(proxy)
def _start_surveyor(self, surveyor, *args, **kwargs):
"""
Calls a surveyor and adds result to the .started list. See
the arguments for the specific surveyor for its documentation.
"""
s = surveyor(self._project, *args, **kwargs)
self.started.append(weakref.proxy(s, self._surveyor_finished))
return s
def __getstate__(self):
return self._project
def __setstate__(self, s):
self.__init__(s)
class Surveyor(object):
"""
The surveyor class eases the implementation of symbolic analyses. This
provides a base upon which analyses can be implemented.
Surveyors provide at least the following members:
:ivar active: The paths that are still active in the analysis.
:ivar deadended: The paths that are still active in the analysis.
:ivar spilled: The paths that are still active in the analysis.
:ivar errored: The paths that have at least one error-state exit.
:ivar pruned: The paths that were pruned because their ancestors were unsat.
:ivar unconstrained: The paths that have a successor with an unconstrained instruction pointer.
A Surveryor has the following overloadable properties:
:ivar done: returns True if the analysis is done (by default, this is when self.active is empty).
:ivar run: runs a loop of tick()ing and spill()ing until self.done is True.
:ivar tick: ticks all paths forward. The default implementation calls tick_path() on every path.
A Surveyor has the following overloadable functions :
:func:`tick_path` moves a provided path forward, returning a set of new paths.
:func:`spill` spills all paths, in-place. The default implementation first calls :func:`spill_path` on every
path, then :func:`spill_paths` on the resulting sequence, then keeps the rest.
:func:`spill_path` returns a spilled sequence of paths from a provided sequence of paths.
An analysis can overload either the specific sub-portions of surveyor
(i.e, the tick_path and spill_path functions) or bigger and bigger pieces
to implement more and more customizeable analyses.
"""
# TODO: what about errored? It's a problem cause those paths are duplicates, and could cause confusion...
path_lists = ['active', 'deadended', 'spilled', 'errored', 'unconstrained', 'suspended', 'pruned' ]
def __init__(self, project, start=None, max_active=None, max_concurrency=None, pickle_paths=None,
save_deadends=None, enable_veritesting=False, veritesting_options=None, keep_pruned=None):
"""
Creates the Surveyor.
:param project: the angr.Project to analyze.
:param start: a path (or set of paths) to start the analysis from
:param max_active: the maximum number of paths to explore at a time
:param max_concurrency: the maximum number of worker threads
:param pickle_paths: pickle spilled paths to save memory
:param save_deadends: save deadended paths
:param enable_veritesting: use static symbolic execution to speed up exploration
:param veritesting_options: special options to be passed to Veritesting
:param keep_pruned: keep pruned unsat states
"""
self._project = project
self._max_concurrency = 1 if max_concurrency is None else max_concurrency
self._max_active = multiprocessing.cpu_count() if max_active is None else max_active
self._pickle_paths = False if pickle_paths is None else pickle_paths
self._save_deadends = True if save_deadends is None else save_deadends
self._keep_pruned = False if keep_pruned is None else keep_pruned
self._enable_veritesting = enable_veritesting
self._veritesting_options = { } if veritesting_options is None else veritesting_options
# the paths
self.active = []
self.deadended = []
self.spilled = []
self.errored = []
self.pruned = []
self.suspended = []
self.unconstrained = []
self.split_paths = {}
self._current_step = 0
self._hierarchy = PathHierarchy()
if isinstance(start, Path):
self.active.append(start)
elif isinstance(start, (tuple, list, set)):
self.active.extend(start)
elif start is None:
self.active.append(self._project.factory.path())
else:
raise AngrError('invalid "start" argument')
#
# Quick list access
#
@property
def _a(self):
return self.active[0]
@property
def _d(self):
return self.deadended[0]
@property
def _spl(self):
return self.spilled[0]
@property
def _e(self):
return self.errored[0]
#
# Overall analysis.
#
def pre_tick(self):
"""
Provided for analyses to use for pre-tick actions.
"""
pass
def post_tick(self):
"""
Provided for analyses to use for pre-tick actions.
"""
pass
def step(self):
"""
Takes one step in the analysis (called by run()).
"""
self.pre_tick()
self.tick()
#self.filter()
self.spill()
self.post_tick()
self._current_step += 1
l.debug("After iteration: %s", self)
return self
def run(self, n=None):
"""
Runs the analysis through completion (until done() returns True) or, if n is provided, n times.
:param n: the maximum number of ticks
:returns: itself for chaining
"""
global STOP_RUNS, PAUSE_RUNS # pylint: disable=W0602,
# We do a round of filtering first
self.active = self.filter_paths(self.active)
while not self.done and (n is None or n > 0):
self.step()
if STOP_RUNS:
l.warning("%s stopping due to STOP_RUNS being set.", self)
l.warning("... please call resume_analyses() and then this.run() if you want to resume execution.")
break
if PAUSE_RUNS:
l.warning("%s pausing due to PAUSE_RUNS being set.", self)
l.warning("... please call disable_singlestep() before continuing if you don't want to single-step.")
try:
import ipdb as pdb # pylint: disable=F0401,
except ImportError:
import pdb
pdb.set_trace()
if n is not None:
n -= 1
return self
@property
def done(self):
"""
True if the analysis is done.
"""
return len(self.active) == 0
#
# Utility functions.
#
def __repr__(self):
return "%d active, %d spilled, %d deadended, %d errored, %d unconstrained" % (
len(self.active), len(self.spilled), len(self.deadended), len(self.errored), len(self.unconstrained))
#
# Analysis progression
#
def tick(self):
"""
Takes one step in the analysis. Typically, this moves all active paths forward.
:return: itself, for chaining
"""
new_active = []
#with concurrent.futures.ThreadPoolExecutor(max_workers=self._max_concurrency) as executor:
# future_to_path = {executor.submit(self.safe_tick_path, p): p for p in self.active}
# for future in concurrent.futures.as_completed(future_to_path):
# p = future_to_path[future]
# successors = future.result()
for p in self.active:
if p.errored:
if isinstance(p.error, PathUnreachableError):
if self._keep_pruned:
self.pruned.append(p)
else:
self._hierarchy.unreachable_path(p)
self._hierarchy.simplify()
self.errored.append(p)
continue
self._step_path(p)
if len(p.successors) == 0 and len(p.unconstrained_successor_states) == 0:
l.debug("Path %s has deadended.", p)
self.suspend_path(p)
self.deadended.append(p)
else:
if self._enable_veritesting: # and len(p.successors) > 1:
# Try to use Veritesting!
if hasattr(self, '_find') and hasattr(self, '_avoid'):
# pylint: disable=no-member
boundaries = [ ]
if self._find is not None:
boundaries.extend(list(self._find))
if self._avoid is not None:
boundaries.extend(list(self._avoid))
veritesting = self._project.analyses.Veritesting(p,
boundaries=boundaries,
**self._veritesting_options)
else:
veritesting = self._project.analyses.Veritesting(p,
**self._veritesting_options)
if veritesting.result and veritesting.final_path_group:
pg = veritesting.final_path_group
self.deadended.extend(pg.deadended)
self.errored.extend(pg.errored)
successors = pg.successful + pg.deviated
for suc in successors:
l.info('Veritesting yields a new IP: 0x%x', suc.addr)
successors = self._tick_path(p, successors=successors)
else:
successors = self.tick_path(p)
else:
successors = self.tick_path(p)
new_active.extend(successors)
if len(p.unconstrained_successor_states) > 0:
self.unconstrained.append(p)
self.active = new_active
return self
def _step_path(self, p): #pylint:disable=no-self-use
p.step()
def _tick_path(self, p, successors=None):
if successors is None:
successors = p.successors
l.debug("Ticking path %s", p)
for s in successors:
self._hierarchy.add_path(s)
self._hierarchy.simplify()
l.debug("... path %s has produced %d successors.", p, len(successors))
l.debug("... addresses: %s", ["0x%x" % s.addr for s in successors])
filtered_successors = self.filter_paths(successors)
l.debug("Remaining: %d successors out of %d", len(filtered_successors), len(successors))
# track the path ID for visualization
if len(filtered_successors) == 1:
filtered_successors[0].path_id = p.path_id
else:
self.split_paths[p.path_id] = [sp.path_id for sp in filtered_successors]
return filtered_successors
def tick_path(self, p):
"""
Ticks a single path forward. Returns a sequence of successor paths.
"""
return self._tick_path(p)
def prune(self):
"""
Prune unsat paths.
"""
for p in list(self.active):
if not p.reachable:
self._hierarchy.unreachable_path(p)
self._hierarchy.simplify()
self.active.remove(p)
self.pruned.append(p)
for p in list(self.spilled):
if not p.reachable:
self._hierarchy.unreachable_path(p)
self._hierarchy.simplify()
self.spilled.remove(p)
self.pruned.append(p)
###
### Path termination.
###
def filter_path(self, p): # pylint: disable=W0613,R0201
"""
Returns True if the given path should be kept in the analysis, False
otherwise.
"""
return True
def filter_paths(self, paths):
"""
Given a list of paths, returns filters them and returns the rest.
"""
return [p for p in paths if self.filter_path(p)]
#def filter(self):
# """
# Filters the active paths, in-place.
# """
# old_active = self.active[ :: ]
# l.debug("before filter: %d paths", len(self.active))
# self.active = self.filter_paths(self.active)
# l.debug("after filter: %d paths", len(self.active))
# for a in old_active:
# if a not in self.active:
# self.deadended.append(a)
###
### State explosion control (spilling paths).
###
def path_comparator(self, a, b): # pylint: disable=W0613,R0201
"""
This function should compare paths a and b, to determine which should
have a higher priority in the analysis. It's used as the cmp argument
to sort.
"""
return 0
def prioritize_paths(self, paths):
"""
This function is called to sort a list of paths, to prioritize
the analysis of paths. Should return a list of paths, with higher-
priority paths first.
"""
paths.sort(cmp=self.path_comparator)
return paths
def spill_paths(self, active, spilled): # pylint: disable=R0201
"""
Called with the currently active and spilled paths to spill some
paths. Should return the new active and spilled paths.
"""
l.debug("spill_paths received %d active and %d spilled paths.", len(active), len(spilled))
prioritized = self.prioritize_paths(active + spilled)
new_active = prioritized[:self._max_active]
new_spilled = prioritized[self._max_active:]
l.debug("... %d active and %d spilled paths.", len(new_active), len(new_spilled))
return new_active, new_spilled
def spill(self):
"""
Spills/unspills paths, in-place.
"""
new_active, new_spilled = self.spill_paths(self.active, self.spilled)
num_suspended = 0
num_resumed = 0
for p in new_active:
if p in self.spilled:
num_resumed += 1
#p.resume(self._project)
for p in new_spilled:
if p in self.active:
num_suspended += 1
self.suspend_path(p)
l.debug("resumed %d and suspended %d", num_resumed, num_suspended)
self.active, self.spilled = new_active, new_spilled
def suspend_path(self, p): #pylint:disable=no-self-use
"""
Suspends and returns a state.
:param p: the path
:returns: the path
"""
# TODO: Path doesn't provide suspend() now. What should we replace it with?
# p.suspend(do_pickle=self._pickle_paths)
p.state.downsize()
return p
from .errors import AngrError, PathUnreachableError
from .path import Path
from .path_hierarchy import PathHierarchy
from .surveyors import all_surveyors
|
{"/angr/surveyors/caller.py": ["/angr/surveyors/explorer.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/test_argv.py": ["/angr/__init__.py"], "/angr/path.py": ["/angr/errors.py", "/angr/path_history.py"], "/angr/simos.py": ["/angr/errors.py", "/angr/tablespecs.py"], "/tests/test_block_cache.py": ["/angr/__init__.py"], "/tests/test_signed_div.py": ["/angr/__init__.py"], "/angr/knowledge_base.py": ["/angr/knowledge/data.py"], "/angr/surveyors/executor.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_self_modifying_code.py": ["/angr/__init__.py"], "/tests/test_hook.py": ["/angr/__init__.py"], "/angr/surveyors/slicecutor.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/broken_variableseekr.py": ["/angr/__init__.py"], "/tests/test_argc_sym.py": ["/angr/__init__.py"], "/tests/test_cfg_path.py": ["/angr/__init__.py"], "/angr/analyses/veritesting.py": ["/angr/errors.py", "/angr/analysis.py", "/angr/path_group.py", "/angr/path.py"], "/angr/analyses/congruency_check.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/knowledge/__init__.py": ["/angr/knowledge/data.py"], "/tests/test_scanf.py": ["/angr/__init__.py"], "/tests/test_vfg_path.py": ["/angr/__init__.py"], "/tests/test_serialization.py": ["/angr/__init__.py"], "/tests/test_mem_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/__init__.py": ["/angr/exploration_techniques/explorer.py", "/angr/exploration_techniques/dfs.py", "/angr/exploration_techniques/veritesting.py", "/angr/exploration_techniques/oppologist.py", "/angr/errors.py"], "/angr/surveyors/__init__.py": ["/angr/surveyors/explorer.py", "/angr/surveyors/executor.py", "/angr/surveyors/escaper.py", "/angr/surveyors/slicecutor.py", "/angr/surveyors/caller.py"], "/tests/test_static_hooker.py": ["/angr/__init__.py"], "/tests/test_cfgfast.py": ["/angr/__init__.py"], "/tests/test_str_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/oppologist.py": ["/angr/errors.py", "/angr/exploration_techniques/__init__.py"], "/tests/test_argc.py": ["/angr/__init__.py"], "/angr/analyses/cdg.py": ["/angr/analysis.py"], "/angr/surveyors/escaper.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_checkbyte.py": ["/angr/__init__.py"], "/angr/analyses/static_hooker.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/analyses/forward_analysis.py": ["/angr/errors.py"], "/angr/analyses/__init__.py": ["/angr/analyses/cdg.py", "/angr/analyses/ddg.py", "/angr/analyses/girlscout.py", "/angr/analyses/veritesting.py", "/angr/analyses/dfg.py", "/angr/analyses/congruency_check.py", "/angr/analyses/static_hooker.py"], "/angr/exploration_techniques/dfs.py": ["/angr/exploration_techniques/__init__.py"], "/angr/__init__.py": ["/angr/project.py", "/angr/regmap.py", "/angr/path.py", "/angr/errors.py", "/angr/surveyor.py", "/angr/analyses/__init__.py", "/angr/analysis.py", "/angr/tablespecs.py", "/angr/simos.py", "/angr/path_group.py", "/angr/surveyors/caller.py", "/angr/log.py"], "/tests/test_echo.py": ["/angr/__init__.py"], "/tests/test_explorer.py": ["/angr/__init__.py"], "/angr/exploration_techniques/veritesting.py": ["/angr/exploration_techniques/__init__.py"], "/angr/path_group.py": ["/angr/errors.py", "/angr/path.py", "/angr/__init__.py"], "/angr/analyses/dfg.py": ["/angr/analysis.py"], "/tests/test_cle_gdb.py": ["/angr/__init__.py"], "/angr/surveyor.py": ["/angr/errors.py", "/angr/path.py", "/angr/surveyors/__init__.py"], "/angr/surveyors/explorer.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/angr/analysis.py": ["/angr/errors.py"], "/angr/factory.py": ["/angr/surveyors/caller.py", "/angr/lifter.py", "/angr/errors.py", "/angr/path.py", "/angr/path_group.py", "/angr/knowledge/__init__.py"], "/tests/test_strtol.py": ["/angr/__init__.py"], "/angr/exploration_techniques/explorer.py": ["/angr/exploration_techniques/__init__.py"], "/tests/test_veritesting.py": ["/angr/__init__.py"]}
|
38,475
|
MayfeelYang/angr
|
refs/heads/master
|
/angr/surveyors/explorer.py
|
#!/usr/bin/env python
from ..surveyor import Surveyor
import simuvex
import collections
import networkx
import logging
l = logging.getLogger("angr.surveyors.explorer")
class Explorer(Surveyor):
"""
Explorer implements a symbolic exploration engine!
WARNING: Explorers are not really maintained - Use path_group instead when possible
found - paths where the target addresses have been found.
avoided - paths where the to-avoid addresses have been found.
deviating - paths that deviate from the restricted-to addresses.
looping - paths that were detected as looping.
"""
path_lists = Surveyor.path_lists + [ 'found', 'avoided', 'deviating', 'looping']
def __init__(self, project, start=None, max_concurrency=None, max_active=None, pickle_paths=None,
find=None, avoid=None, restrict=None, min_depth=0, max_depth=None, max_repeats=10000000,
num_find=1, num_avoid=None, num_deviate=1, num_loop=None, cfg=None, enable_veritesting=None,
veritesting_options=None, keep_pruned=None):
"""
Explores the path space until a block containing a specified address is found.
:param project:
The following parameters are optional :
:param start:
:param max_concurrency:
:param max_active:
:param pickle_paths:
:param find: A tuple containing the addresses to search for.
:param avoid: A tuple containing the addresses to avoid.
:param restrict: A tuple containing the addresses to restrict the analysis to (avoid all others).
:param min_depth: The minimum number of SimRuns in the resulting path.
:param max_depth: The maximum number of SimRuns in the resulting path.
:param num_find: The minimum number of paths to find. (default: 1)
:param num_avoid: The minimum number of paths to avoid. (default: infinite)
:param num_deviate: The minimum number of paths to deviate. (default: infinite)
:param num_loop: The minimum number of paths to loop (default: infinite)
:param cfg: A CFG to use to cut any paths that have no chance of going to the target.
:param enable_veritesting: Whether Veritesting should be enabled or not.
:param veritesting_options: Options that should be passed to Veritesting.
"""
Surveyor.__init__(self,
project,
start=start,
max_concurrency=max_concurrency,
max_active=max_active,
pickle_paths=pickle_paths,
enable_veritesting=enable_veritesting,
veritesting_options=veritesting_options,
keep_pruned=keep_pruned)
# initialize the counter
self._instruction_counter = collections.Counter()
self._find = find if not isinstance(find, (int, long)) else [find]
self._avoid = avoid
self._restrict = restrict
self._max_repeats = max_repeats
self._max_depth = max_depth
self._min_depth = min_depth
self.found = [ ]
self.avoided = [ ]
self.deviating = [ ]
self.looping = [ ]
self.lost = [ ]
self._num_find = num_find
self._num_avoid = num_avoid
self._num_deviate = num_deviate
self._num_loop = num_loop
self._cfg = cfg
if self._cfg is not None and isinstance(self._find, (tuple, set, list)):
good_find = set()
for f in self._find:
if self._cfg.get_any_irsb(f) is None:
l.warning("No node 0x%x in CFG. This will be automatically cut.", f)
else:
good_find.add(f)
self._find = good_find
if self._project.arch.name.startswith('ARM'):
self._find = [x & ~1 for x in self._find] + [x | 1 for x in self._find]
def iter_found(self, runs=None):
runs = -1 if runs is None else runs
cur_found = 0
while runs != 0:
self.run(1)
for f in self.found[cur_found:]:
l.debug("Yielding found path %s", f)
yield f
cur_found = len(self.found)
runs -= 1
if self.done:
break
__iter__ = iter_found
@property
def _f(self):
return self.found[0]
@property
def _av(self):
return self.avoided[0]
@property
def _dv(self):
return self.deviating[0]
@property
def _lo(self):
return self.looping[0]
def path_comparator(self, x, y):
return self._instruction_counter[x.addr] - self._instruction_counter[y.addr]
@property
def done(self):
if len(self.active) == 0:
l.debug("Done because we have no active paths left!")
return True
if self._num_find is not None and len(self.found) >= self._num_find:
l.debug("Done because we found the targets on %d path(s)!", len(self.found))
return True
if self._num_avoid is not None and len(self.avoided) >= self._num_avoid:
l.debug("Done because we avoided on %d path(s)!", len(self.avoided))
return True
if self._num_deviate is not None and len(self.deviating) >= self._num_deviate:
l.debug("Done because we deviated on %d path(s)!", len(self.deviating))
return True
if self._num_loop is not None and len(self.looping) >= self._num_loop:
l.debug("Done because we looped on %d path(s)!", len(self.looping))
return True
return False
def _match(self, criteria, path, imark_set): #pylint:disable=no-self-use
if criteria is None:
r = False
elif isinstance(criteria, set):
r = len(criteria & imark_set) > 0
elif isinstance(criteria, (tuple, list)):
r = len(set(criteria) & imark_set) > 0
elif isinstance(criteria, (int, long)):
r = criteria in imark_set
elif hasattr(criteria, '__call__'):
r = criteria(path)
return r
def _restricted(self, criteria, path, imark_set): #pylint:disable=no-self-use
if criteria is None:
r = False
elif isinstance(criteria, set):
r = not imark_set.issubset(criteria)
elif isinstance(criteria, (tuple, list)):
r = not imark_set.issubset(set(criteria))
elif isinstance(criteria, (int, long)):
r = criteria in imark_set
elif hasattr(criteria, '__call__'):
r = criteria(path)
return r
def _is_lost(self, p):
if self._cfg is None:
return False
elif not isinstance(self._find, (tuple, set, list)) or len(self._find) == 0:
l.warning("Explorer ignoring CFG because find is not a sequence of addresses.")
return False
elif isinstance(self._cfg.get_any_irsb(p.addr), simuvex.SimProcedure):
l.debug("Path %s is pointing to a SimProcedure. Counting as not lost.", p)
return False
elif p.length > 0 and self._cfg.get_any_irsb(p.addr_trace[-1]) is None:
l.debug("not trimming, because %s is currently outside of the CFG", p)
return False
else:
f = self._cfg.get_any_irsb(p.addr)
if f is None:
l.warning("CFG has no node at 0x%x. Cutting this path.", p.addr)
return False
if not any(((networkx.has_path(self._cfg._graph, f, self._cfg.get_any_irsb(t)) for t in self._find))):
l.debug("Trimming %s because it can't get to the target (according to the CFG)", p)
return True
else:
l.debug("Not trimming %s, because it can still get to the target.", p)
return False
def filter_path(self, p):
if self._is_lost(p):
l.debug("Cutting path %s because it's lost.", p)
self.lost.append(p)
return False
if p.length < self._min_depth:
l.debug("path %s has less than the minimum depth", p)
return True
if not self._project.is_hooked(p.addr):
try:
imark_set = set(self._project.factory.block(p.addr).instruction_addrs)
except (AngrMemoryError, AngrTranslationError):
l.debug("Cutting path because there is no code at address 0x%x", p.addr)
self.errored.append(p)
return False
else:
imark_set = { p.addr }
for addr in imark_set:
self._instruction_counter[addr] += 1
if self._match(self._avoid, p, imark_set):
l.debug("Avoiding path %s.", p)
self.avoided.append(p)
return False
elif self._match(self._find, p, imark_set):
if not p.state.satisfiable():
l.debug("Discarding 'found' path %s because it is unsat", p)
self.deadended.append(p)
return False
l.debug("Marking path %s as found.", p)
self.found.append(p)
return False
elif self._restricted(self._restrict, p, imark_set):
l.debug("Path %s is not on the restricted addresses!", p)
self.deviating.append(p)
return False
elif p.detect_loops(self._max_repeats) >= self._max_repeats:
# discard any paths that loop too much
l.debug("Path %s appears to be looping!", p)
self.looping.append(p)
return False
elif self._max_depth is not None and p.length > self._max_depth:
l.debug('Path %s exceeds the maximum depth(%d) allowed.', p, self._max_depth)
return False
else:
l.debug("Letting path %s continue", p)
return True
def __repr__(self):
return "<Explorer with paths: %s, %d found, %d avoided, %d deviating, %d looping, %d lost>" % (
Surveyor.__repr__(self), len(self.found), len(self.avoided), len(self.deviating), len(self.looping),
len(self.lost))
from ..errors import AngrMemoryError, AngrTranslationError
from . import all_surveyors
all_surveyors['Explorer'] = Explorer
|
{"/angr/surveyors/caller.py": ["/angr/surveyors/explorer.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/test_argv.py": ["/angr/__init__.py"], "/angr/path.py": ["/angr/errors.py", "/angr/path_history.py"], "/angr/simos.py": ["/angr/errors.py", "/angr/tablespecs.py"], "/tests/test_block_cache.py": ["/angr/__init__.py"], "/tests/test_signed_div.py": ["/angr/__init__.py"], "/angr/knowledge_base.py": ["/angr/knowledge/data.py"], "/angr/surveyors/executor.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_self_modifying_code.py": ["/angr/__init__.py"], "/tests/test_hook.py": ["/angr/__init__.py"], "/angr/surveyors/slicecutor.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/broken_variableseekr.py": ["/angr/__init__.py"], "/tests/test_argc_sym.py": ["/angr/__init__.py"], "/tests/test_cfg_path.py": ["/angr/__init__.py"], "/angr/analyses/veritesting.py": ["/angr/errors.py", "/angr/analysis.py", "/angr/path_group.py", "/angr/path.py"], "/angr/analyses/congruency_check.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/knowledge/__init__.py": ["/angr/knowledge/data.py"], "/tests/test_scanf.py": ["/angr/__init__.py"], "/tests/test_vfg_path.py": ["/angr/__init__.py"], "/tests/test_serialization.py": ["/angr/__init__.py"], "/tests/test_mem_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/__init__.py": ["/angr/exploration_techniques/explorer.py", "/angr/exploration_techniques/dfs.py", "/angr/exploration_techniques/veritesting.py", "/angr/exploration_techniques/oppologist.py", "/angr/errors.py"], "/angr/surveyors/__init__.py": ["/angr/surveyors/explorer.py", "/angr/surveyors/executor.py", "/angr/surveyors/escaper.py", "/angr/surveyors/slicecutor.py", "/angr/surveyors/caller.py"], "/tests/test_static_hooker.py": ["/angr/__init__.py"], "/tests/test_cfgfast.py": ["/angr/__init__.py"], "/tests/test_str_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/oppologist.py": ["/angr/errors.py", "/angr/exploration_techniques/__init__.py"], "/tests/test_argc.py": ["/angr/__init__.py"], "/angr/analyses/cdg.py": ["/angr/analysis.py"], "/angr/surveyors/escaper.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_checkbyte.py": ["/angr/__init__.py"], "/angr/analyses/static_hooker.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/analyses/forward_analysis.py": ["/angr/errors.py"], "/angr/analyses/__init__.py": ["/angr/analyses/cdg.py", "/angr/analyses/ddg.py", "/angr/analyses/girlscout.py", "/angr/analyses/veritesting.py", "/angr/analyses/dfg.py", "/angr/analyses/congruency_check.py", "/angr/analyses/static_hooker.py"], "/angr/exploration_techniques/dfs.py": ["/angr/exploration_techniques/__init__.py"], "/angr/__init__.py": ["/angr/project.py", "/angr/regmap.py", "/angr/path.py", "/angr/errors.py", "/angr/surveyor.py", "/angr/analyses/__init__.py", "/angr/analysis.py", "/angr/tablespecs.py", "/angr/simos.py", "/angr/path_group.py", "/angr/surveyors/caller.py", "/angr/log.py"], "/tests/test_echo.py": ["/angr/__init__.py"], "/tests/test_explorer.py": ["/angr/__init__.py"], "/angr/exploration_techniques/veritesting.py": ["/angr/exploration_techniques/__init__.py"], "/angr/path_group.py": ["/angr/errors.py", "/angr/path.py", "/angr/__init__.py"], "/angr/analyses/dfg.py": ["/angr/analysis.py"], "/tests/test_cle_gdb.py": ["/angr/__init__.py"], "/angr/surveyor.py": ["/angr/errors.py", "/angr/path.py", "/angr/surveyors/__init__.py"], "/angr/surveyors/explorer.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/angr/analysis.py": ["/angr/errors.py"], "/angr/factory.py": ["/angr/surveyors/caller.py", "/angr/lifter.py", "/angr/errors.py", "/angr/path.py", "/angr/path_group.py", "/angr/knowledge/__init__.py"], "/tests/test_strtol.py": ["/angr/__init__.py"], "/angr/exploration_techniques/explorer.py": ["/angr/exploration_techniques/__init__.py"], "/tests/test_veritesting.py": ["/angr/__init__.py"]}
|
38,476
|
MayfeelYang/angr
|
refs/heads/master
|
/tests/test_function_manager.py
|
import nose
import angr
from archinfo import ArchAMD64
import logging
l = logging.getLogger("angr.tests")
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def test_amd64():
logging.getLogger('angr.analyses.cfg').setLevel(logging.DEBUG)
fauxware_amd64 = angr.Project(test_location + "/x86_64/fauxware")
EXPECTED_FUNCTIONS = { 0x4004e0, 0x400510, 0x400520, 0x400530, 0x400540, 0x400550, 0x400560, 0x400570,
0x400580, 0x4005ac, 0x400640, 0x400664, 0x4006ed, 0x4006fd, 0x40071d, 0x4007e0,
0x400880 }
EXPECTED_BLOCKS = { 0x40071D, 0x40073E, 0x400754, 0x40076A, 0x400774, 0x40078A, 0x4007A0, 0x4007B3, 0x4007C7,
0x4007C9, 0x4007BD, 0x4007D3 }
EXPECTED_CALLSITES = { 0x40071D, 0x40073E, 0x400754, 0x40076A, 0x400774, 0x40078A, 0x4007A0, 0x4007BD, 0x4007C9 }
EXPECTED_CALLSITE_TARGETS = { 4195600L, 4195632L, 4195632L, 4195600L, 4195632L, 4195632L, 4195940L, 4196077L,
4196093L }
EXPECTED_CALLSITE_RETURNS = { 0x40073e, 0x400754, 0x40076a, 0x400774, 0x40078a, 0x4007a0, 0x4007b3, 0x4007c7,
None }
fauxware_amd64.analyses.CFGAccurate()
nose.tools.assert_equal(set([ k for k in fauxware_amd64.kb.functions.keys() if k < 0x500000 ]), EXPECTED_FUNCTIONS)
main = fauxware_amd64.kb.functions.function(name='main')
nose.tools.assert_equal(main.startpoint.addr, 0x40071D)
nose.tools.assert_equal(set(main.block_addrs), EXPECTED_BLOCKS)
nose.tools.assert_equal([0x4007D3], [bl.addr for bl in main.endpoints])
nose.tools.assert_equal(set(main.get_call_sites()), EXPECTED_CALLSITES)
nose.tools.assert_equal(set(map(main.get_call_target, main.get_call_sites())), EXPECTED_CALLSITE_TARGETS)
nose.tools.assert_equal(set(map(main.get_call_return, main.get_call_sites())), EXPECTED_CALLSITE_RETURNS)
nose.tools.assert_true(main.has_return)
rejected = fauxware_amd64.kb.functions.function(name='rejected')
nose.tools.assert_equal(rejected.returning, False)
# transition graph
main_g = main.transition_graph
main_g_edges_ = main_g.edges(data=True)
# Convert nodes those edges from blocks to addresses
main_g_edges = [ ]
for src_node, dst_node, data in main_g_edges_:
main_g_edges.append((src_node.addr, dst_node.addr, data))
nose.tools.assert_true((0x40071d, 0x400510, {'type': 'call'}) in main_g_edges)
nose.tools.assert_true((0x40071d, 0x40073e, {'type': 'fake_return', 'confirmed': True, 'outside': False}) in
main_g_edges
)
nose.tools.assert_true((0x40073e, 0x400530, {'type': 'call'}) in main_g_edges)
nose.tools.assert_true((0x40073e, 0x400754, {'type': 'fake_return', 'confirmed': True, 'outside': False}) in main_g_edges)
# rejected() does not return
nose.tools.assert_true((0x4007c9, 0x4006fd, {'type': 'call'}) in main_g_edges)
nose.tools.assert_true((0x4007c9, 0x4007d3, {'type': 'fake_return', 'outside': False}) in main_g_edges)
# These tests fail for reasons of fastpath, probably
#nose.tools.assert_true(main.bp_on_stack)
#nose.tools.assert_equal(main.name, 'main')
#nose.tools.assert_true(main.retaddr_on_stack)
#nose.tools.assert_equal(0x50, main.sp_difference)
#l.info(functions)
# TODO: Check the result returned
#func_man.dbg_draw()
#l.info("PNG files generated.")
def test_call_to():
project = angr.Project(test_location + "/x86_64/fauxware")
project.arch = ArchAMD64()
project.kb.functions._add_call_to(0x400000, 0x400410, 0x400420, 0x400414)
nose.tools.assert_in(0x400000, project.kb.functions.keys())
nose.tools.assert_in(0x400420, project.kb.functions.keys())
if __name__ == "__main__":
test_call_to()
test_amd64()
|
{"/angr/surveyors/caller.py": ["/angr/surveyors/explorer.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/test_argv.py": ["/angr/__init__.py"], "/angr/path.py": ["/angr/errors.py", "/angr/path_history.py"], "/angr/simos.py": ["/angr/errors.py", "/angr/tablespecs.py"], "/tests/test_block_cache.py": ["/angr/__init__.py"], "/tests/test_signed_div.py": ["/angr/__init__.py"], "/angr/knowledge_base.py": ["/angr/knowledge/data.py"], "/angr/surveyors/executor.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_self_modifying_code.py": ["/angr/__init__.py"], "/tests/test_hook.py": ["/angr/__init__.py"], "/angr/surveyors/slicecutor.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/broken_variableseekr.py": ["/angr/__init__.py"], "/tests/test_argc_sym.py": ["/angr/__init__.py"], "/tests/test_cfg_path.py": ["/angr/__init__.py"], "/angr/analyses/veritesting.py": ["/angr/errors.py", "/angr/analysis.py", "/angr/path_group.py", "/angr/path.py"], "/angr/analyses/congruency_check.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/knowledge/__init__.py": ["/angr/knowledge/data.py"], "/tests/test_scanf.py": ["/angr/__init__.py"], "/tests/test_vfg_path.py": ["/angr/__init__.py"], "/tests/test_serialization.py": ["/angr/__init__.py"], "/tests/test_mem_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/__init__.py": ["/angr/exploration_techniques/explorer.py", "/angr/exploration_techniques/dfs.py", "/angr/exploration_techniques/veritesting.py", "/angr/exploration_techniques/oppologist.py", "/angr/errors.py"], "/angr/surveyors/__init__.py": ["/angr/surveyors/explorer.py", "/angr/surveyors/executor.py", "/angr/surveyors/escaper.py", "/angr/surveyors/slicecutor.py", "/angr/surveyors/caller.py"], "/tests/test_static_hooker.py": ["/angr/__init__.py"], "/tests/test_cfgfast.py": ["/angr/__init__.py"], "/tests/test_str_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/oppologist.py": ["/angr/errors.py", "/angr/exploration_techniques/__init__.py"], "/tests/test_argc.py": ["/angr/__init__.py"], "/angr/analyses/cdg.py": ["/angr/analysis.py"], "/angr/surveyors/escaper.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_checkbyte.py": ["/angr/__init__.py"], "/angr/analyses/static_hooker.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/analyses/forward_analysis.py": ["/angr/errors.py"], "/angr/analyses/__init__.py": ["/angr/analyses/cdg.py", "/angr/analyses/ddg.py", "/angr/analyses/girlscout.py", "/angr/analyses/veritesting.py", "/angr/analyses/dfg.py", "/angr/analyses/congruency_check.py", "/angr/analyses/static_hooker.py"], "/angr/exploration_techniques/dfs.py": ["/angr/exploration_techniques/__init__.py"], "/angr/__init__.py": ["/angr/project.py", "/angr/regmap.py", "/angr/path.py", "/angr/errors.py", "/angr/surveyor.py", "/angr/analyses/__init__.py", "/angr/analysis.py", "/angr/tablespecs.py", "/angr/simos.py", "/angr/path_group.py", "/angr/surveyors/caller.py", "/angr/log.py"], "/tests/test_echo.py": ["/angr/__init__.py"], "/tests/test_explorer.py": ["/angr/__init__.py"], "/angr/exploration_techniques/veritesting.py": ["/angr/exploration_techniques/__init__.py"], "/angr/path_group.py": ["/angr/errors.py", "/angr/path.py", "/angr/__init__.py"], "/angr/analyses/dfg.py": ["/angr/analysis.py"], "/tests/test_cle_gdb.py": ["/angr/__init__.py"], "/angr/surveyor.py": ["/angr/errors.py", "/angr/path.py", "/angr/surveyors/__init__.py"], "/angr/surveyors/explorer.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/angr/analysis.py": ["/angr/errors.py"], "/angr/factory.py": ["/angr/surveyors/caller.py", "/angr/lifter.py", "/angr/errors.py", "/angr/path.py", "/angr/path_group.py", "/angr/knowledge/__init__.py"], "/tests/test_strtol.py": ["/angr/__init__.py"], "/angr/exploration_techniques/explorer.py": ["/angr/exploration_techniques/__init__.py"], "/tests/test_veritesting.py": ["/angr/__init__.py"]}
|
38,477
|
MayfeelYang/angr
|
refs/heads/master
|
/angr/analysis.py
|
import sys
import contextlib
from collections import defaultdict
import logging
l = logging.getLogger('angr.analysis')
import progressbar
from .errors import AngrAnalysisError
registered_analyses = {}
def register_analysis(analysis, name):
registered_analyses[name] = analysis
class AnalysisLogEntry(object):
def __init__(self, message, exc_info=False):
if exc_info:
(e_type, value, traceback) = sys.exc_info()
self.exc_type = e_type
self.exc_value = value
self.exc_traceback = traceback
else:
self.exc_type = None
self.exc_value = None
self.exc_traceback = None
self.message = message
def __getstate__(self):
return str(self.__dict__.get("exc_type")), \
str(self.__dict__.get("exc_value")), \
str(self.__dict__.get("exc_traceback")), \
self.message
def __setstate__(self, s):
self.exc_type, self.exc_value, self.exc_traceback, self.message = s
def __repr__(self):
if self.exc_type is None:
msg_str = repr(self.message)
if len(msg_str) > 70:
msg_str = msg_str[:66] + '...'
if msg_str[0] in ('"', "'"):
msg_str += msg_str[0]
return '<AnalysisLogEntry %s>' % msg_str
else:
msg_str = repr(self.message)
if len(msg_str) > 40:
msg_str = msg_str[:36] + '...'
if msg_str[0] in ('"', "'"):
msg_str += msg_str[0]
return '<AnalysisLogEntry %s with %s: %s>' % (msg_str, self.exc_type.__name__, self.exc_value)
class Analyses(object):
"""
This class contains functions for all the registered and runnable analyses,
"""
def __init__(self, p):
"""
Creates an Analyses object
:ivar p: A project
:type p: angr.Project
"""
self.project = p
self._registered_analyses = {}
self.reload_analyses()
def reload_analyses(self):
for analysis_name, analysis in registered_analyses.iteritems():
self._registered_analyses[analysis_name] = self._specialize_analysis(analysis, analysis_name)
def _specialize_analysis(self, analysis, name):
def make_analysis(*args, **kwargs): # pylint: disable=unused-argument
fail_fast = kwargs.pop('fail_fast', False)
kb = kwargs.pop('kb', self.project.kb)
progress_callback = kwargs.pop('progress_callback', None)
show_progressbar = kwargs.pop('show_progressbar', False)
oself = analysis.__new__(analysis)
oself.named_errors = {}
oself.errors = []
oself.log = []
oself._fail_fast = fail_fast
oself._name = name
oself.project = self.project
oself.kb = kb
oself._progress_callback = progress_callback
if oself._progress_callback is not None:
if not hasattr(oself._progress_callback, '__call__'):
raise AngrAnalysisError('The "progress_callback" parameter must be a None or a callable.')
oself._show_progressbar = show_progressbar
oself.__init__(*args, **kwargs)
return oself
make_analysis.__doc__ = analysis.__init__.__doc__
return make_analysis
def __getstate__(self):
return self.project
def __setstate__(self, s):
self.__init__(s)
def __getattr__(self, k):
r = super(Analyses, self).__getattribute__('_registered_analyses')
if k == '_registered_analyses':
return r
if k in r:
return r[k]
return super(Analyses, self).__getattribute__(k)
def __dir__(self):
return dir(Analyses) + self._registered_analyses.keys()
class Analysis(object):
"""
This class represents an analysis on the program.
:ivar project: The project for this analysis.
:type project: angr.Project
:ivar KnowledgeBase kb: The knowledgebase object.
:ivar callable _progress_callback: A callback function for receiving the progress of this analysis. It only takes
one argument, which is a float number from 0.0 to 100.0 indicating the current
progress.
:ivar bool _show_progressbar: If a progressbar should be shown during the analysis. It's independent from
_progress_callback.
:ivar progressbar.ProgressBar _progressbar: The progress bar object.
"""
project = None
kb = None
_fail_fast = None
_name = None
errors = []
named_errors = defaultdict(list)
_progress_callback = None
_show_progressbar = False
_progressbar = None
@contextlib.contextmanager
def _resilience(self, name=None, exception=Exception):
try:
yield
except exception: # pylint:disable=broad-except
if self._fail_fast:
raise
else:
error = AnalysisLogEntry("exception occurred", exc_info=True)
l.error("Caught and logged %s with resilience: %s", error.exc_type.__name__, error.exc_value)
if name is None:
self.errors.append(error)
else:
self.named_errors[name].append(error)
def _initialize_progressbar(self):
"""
Initialize the progressbar.
:return: None
"""
widgets = [progressbar.Percentage(),
' ',
progressbar.Bar(),
' ',
progressbar.Timer(),
' ',
progressbar.ETA()
]
self._progressbar = progressbar.ProgressBar(widgets=widgets, maxval=10000 * 100).start()
def _update_progress(self, percentage):
"""
Update the progress with a percentage, including updating the progressbar as well as calling the progress
callback.
:param float percentage: Percentage of the progressbar. from 0.0 to 100.0.
:return: None
"""
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
self._progressbar.update(percentage * 10000)
if self._progress_callback is not None:
self._progress_callback(percentage) # pylint:disable=not-callable
def _finish_progress(self):
"""
Mark the progressbar as finished.
:return: None
"""
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
if self._progressbar is not None:
self._progressbar.finish()
if self._progress_callback is not None:
self._progress_callback(100.0) # pylint:disable=not-callable
def __repr__(self):
return '<%s Analysis Result at %#x>' % (self._name, id(self))
|
{"/angr/surveyors/caller.py": ["/angr/surveyors/explorer.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/test_argv.py": ["/angr/__init__.py"], "/angr/path.py": ["/angr/errors.py", "/angr/path_history.py"], "/angr/simos.py": ["/angr/errors.py", "/angr/tablespecs.py"], "/tests/test_block_cache.py": ["/angr/__init__.py"], "/tests/test_signed_div.py": ["/angr/__init__.py"], "/angr/knowledge_base.py": ["/angr/knowledge/data.py"], "/angr/surveyors/executor.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_self_modifying_code.py": ["/angr/__init__.py"], "/tests/test_hook.py": ["/angr/__init__.py"], "/angr/surveyors/slicecutor.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/broken_variableseekr.py": ["/angr/__init__.py"], "/tests/test_argc_sym.py": ["/angr/__init__.py"], "/tests/test_cfg_path.py": ["/angr/__init__.py"], "/angr/analyses/veritesting.py": ["/angr/errors.py", "/angr/analysis.py", "/angr/path_group.py", "/angr/path.py"], "/angr/analyses/congruency_check.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/knowledge/__init__.py": ["/angr/knowledge/data.py"], "/tests/test_scanf.py": ["/angr/__init__.py"], "/tests/test_vfg_path.py": ["/angr/__init__.py"], "/tests/test_serialization.py": ["/angr/__init__.py"], "/tests/test_mem_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/__init__.py": ["/angr/exploration_techniques/explorer.py", "/angr/exploration_techniques/dfs.py", "/angr/exploration_techniques/veritesting.py", "/angr/exploration_techniques/oppologist.py", "/angr/errors.py"], "/angr/surveyors/__init__.py": ["/angr/surveyors/explorer.py", "/angr/surveyors/executor.py", "/angr/surveyors/escaper.py", "/angr/surveyors/slicecutor.py", "/angr/surveyors/caller.py"], "/tests/test_static_hooker.py": ["/angr/__init__.py"], "/tests/test_cfgfast.py": ["/angr/__init__.py"], "/tests/test_str_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/oppologist.py": ["/angr/errors.py", "/angr/exploration_techniques/__init__.py"], "/tests/test_argc.py": ["/angr/__init__.py"], "/angr/analyses/cdg.py": ["/angr/analysis.py"], "/angr/surveyors/escaper.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_checkbyte.py": ["/angr/__init__.py"], "/angr/analyses/static_hooker.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/analyses/forward_analysis.py": ["/angr/errors.py"], "/angr/analyses/__init__.py": ["/angr/analyses/cdg.py", "/angr/analyses/ddg.py", "/angr/analyses/girlscout.py", "/angr/analyses/veritesting.py", "/angr/analyses/dfg.py", "/angr/analyses/congruency_check.py", "/angr/analyses/static_hooker.py"], "/angr/exploration_techniques/dfs.py": ["/angr/exploration_techniques/__init__.py"], "/angr/__init__.py": ["/angr/project.py", "/angr/regmap.py", "/angr/path.py", "/angr/errors.py", "/angr/surveyor.py", "/angr/analyses/__init__.py", "/angr/analysis.py", "/angr/tablespecs.py", "/angr/simos.py", "/angr/path_group.py", "/angr/surveyors/caller.py", "/angr/log.py"], "/tests/test_echo.py": ["/angr/__init__.py"], "/tests/test_explorer.py": ["/angr/__init__.py"], "/angr/exploration_techniques/veritesting.py": ["/angr/exploration_techniques/__init__.py"], "/angr/path_group.py": ["/angr/errors.py", "/angr/path.py", "/angr/__init__.py"], "/angr/analyses/dfg.py": ["/angr/analysis.py"], "/tests/test_cle_gdb.py": ["/angr/__init__.py"], "/angr/surveyor.py": ["/angr/errors.py", "/angr/path.py", "/angr/surveyors/__init__.py"], "/angr/surveyors/explorer.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/angr/analysis.py": ["/angr/errors.py"], "/angr/factory.py": ["/angr/surveyors/caller.py", "/angr/lifter.py", "/angr/errors.py", "/angr/path.py", "/angr/path_group.py", "/angr/knowledge/__init__.py"], "/tests/test_strtol.py": ["/angr/__init__.py"], "/angr/exploration_techniques/explorer.py": ["/angr/exploration_techniques/__init__.py"], "/tests/test_veritesting.py": ["/angr/__init__.py"]}
|
38,478
|
MayfeelYang/angr
|
refs/heads/master
|
/angr/factory.py
|
from simuvex import SimIRSB, SimProcedures, SimUnicorn, SimState, BP_BEFORE, BP_AFTER, SimUnicornError
from simuvex import s_options as o, s_cc
from simuvex.s_errors import SimSegfaultError, SimReliftException
from .surveyors.caller import Callable
import logging
l = logging.getLogger('angr.factory')
class AngrObjectFactory(object):
"""
This factory provides access to important analysis elements.
"""
def __init__(self, project, translation_cache=False):
self._project = project
self._lifter = Lifter(project, cache=translation_cache)
self.block = self._lifter.lift
self.fresh_block = self._lifter.fresh_block
self._default_cc = s_cc.DefaultCC[project.arch.name]
def snippet(self, addr, jumpkind=None, **block_opts):
if self._project.is_hooked(addr) and jumpkind != 'Ijk_NoHook':
_, kwargs = self._project._sim_procedures[addr]
size = kwargs.get('length', 0)
return HookNode(addr, size, self._project.hooked_by(addr))
else:
return self.block(addr, **block_opts).codenode # pylint: disable=no-member
def sim_block(self, state, stmt_whitelist=None, last_stmt=None,
addr=None, opt_level=None, **block_opts):
"""
Returns a SimIRSB object with execution based on state.
:param state: The state to tick forward with this block.
The following parameters are optional:
:param stmt_whitelist: A list of stmt indexes to which to confine execution.
:param last_stmt: A statement index at which to stop execution.
:param addr: The address at which to start the block.
:param thumb: Whether the block should be lifted in ARM's THUMB mode.
:param backup_state: A state to read bytes from instead of using project memory.
:param opt_level: The VEX optimization level to use.
:param insn_bytes: A string of bytes to use for the block instead of the project.
:param max_size: The maximum size of the block, in bytes.
:param num_inst: The maximum number of instructions.
:param traceflags: traceflags to be passed to VEX. Default: 0
"""
if 'thumb' in block_opts:
raise AngrValueError('You are not allowed to pass in a thumb=x property to sim_block')
if addr is None:
addr = state.se.any_int(state.regs.ip)
if o.STRICT_PAGE_ACCESS in state.options:
try:
perms = state.memory.permissions(addr)
except KeyError:
raise SimSegfaultError(addr, 'exec-miss')
else:
if not perms.symbolic:
perms = perms.args[0]
if not perms & 4:
raise SimSegfaultError(addr, 'non-executable')
thumb = False
if addr % state.arch.instruction_alignment != 0:
if state.thumb:
thumb = True
else:
raise AngrExitError("Address %#x does not align to alignment %d "
"for architecture %s." % (addr,
state.arch.instruction_alignment,
state.arch.name))
if opt_level is None:
opt_level = 1 if o.OPTIMIZE_IR in state.options else 0
force_bbl_addr = block_opts.pop('force_bbl_addr', None)
while True:
bb = self.block(addr,
arch=state.arch,
opt_level=opt_level,
thumb=thumb,
backup_state=state,
**block_opts)
try:
return SimIRSB(state,
bb.vex,
addr=addr,
whitelist=stmt_whitelist,
last_stmt=last_stmt,
force_bbl_addr=force_bbl_addr)
except SimReliftException as e:
state = e.state
force_bbl_addr = state.scratch.bbl_addr
if 'insn_bytes' in block_opts:
raise AngrValueError("You cannot pass self-modifying code as insn_bytes!!!")
new_ip = state.scratch.ins_addr
if 'max_size' in block_opts:
block_opts['max_size'] -= new_ip - addr
if 'num_inst' in block_opts:
block_opts['num_inst'] -= state.scratch.num_insns
addr = new_ip
def sim_run(self, state, addr=None, jumpkind=None, extra_stop_points=None, **block_opts):
"""
Returns a simuvex SimRun object (supporting refs() and exits()), automatically choosing whether to create a
SimIRSB or a SimProcedure.
:param state: The state to analyze
:param jumpkind: optional, the jumpkind of the previous exit
:param addr: optional, an address to execute at instead of the state's ip
Additional keyword arguments will be passed directly into factory.sim_block if appropriate.
:param stmt_whitelist: a list of stmt indexes to which to confine execution.
:param last_stmt: a statement index at which to stop execution.
:param thumb: whether the block should be lifted in ARM's THUMB mode.
:param backup_state: a state to read bytes from instead of using project memory.
:param opt_level: the VEX optimization level to use.
:param insn_bytes: a string of bytes to use for the block instead of the project.
:param max_size: the maximum size of the block, in bytes.
:param num_inst: the maximum number of instructions.
:param extra_stop_points: addresses to stop at, other than hooked functions
:param traceflags: traceflags to be passed to VEX. Default: 0
"""
if addr is None:
addr = state.se.any_int(state.regs.ip)
if jumpkind is None:
jumpkind = state.scratch.jumpkind
if jumpkind == 'Ijk_Exit':
l.debug('Execution hit exit at %#x', addr)
return SimProcedures['stubs']['PathTerminator'](state, addr=addr)
if jumpkind.startswith("Ijk_Sys"):
l.debug("Invoking system call handler")
return self._project._simos.handle_syscall(state)
if jumpkind in ("Ijk_EmFail", "Ijk_MapFail") or "Ijk_Sig" in jumpkind:
raise AngrExitError("Cannot create run following jumpkind %s" % jumpkind)
if jumpkind == "Ijk_NoDecode" and not self._project.is_hooked(addr):
raise AngrExitError("IR decoding error at #%x. You can hook this instruction with a python replacement "
"using project.hook(%#x, your_function, length=length_of_instruction)." % (addr, addr))
elif self._project.is_hooked(addr) and jumpkind != 'Ijk_NoHook':
sim_proc_class, kwargs = self._project._sim_procedures[addr]
l.debug("Creating SimProcedure %s (originally at %#x)",
sim_proc_class.__name__, addr)
state._inspect('call', BP_BEFORE, function_name=sim_proc_class.__name__)
r = sim_proc_class(state, addr=addr, sim_kwargs=kwargs)
state._inspect('call', BP_AFTER, function_name=sim_proc_class.__name__)
l.debug("... %s created", r)
elif o.UNICORN in state.options and state.unicorn.check():
l.info('Creating SimUnicorn at %#x', addr)
stops = self._project._sim_procedures.keys()
if extra_stop_points is not None:
stops.extend(extra_stop_points)
try:
r = SimUnicorn(state, stop_points=stops)
except SimUnicornError:
r = self.sim_block(state, **block_opts)
else:
l.debug("Creating SimIRSB at 0x%x", addr)
r = self.sim_block(state, addr=addr, **block_opts)
# Peek and fix the IP for syscalls
if r.successors and r.successors[0].scratch.jumpkind.startswith('Ijk_Sys'):
self._fix_syscall_ip(r.successors[0])
return r
def blank_state(self, **kwargs):
"""
Returns a mostly-uninitialized state object. All parameters are optional.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: A dictionary of file names with associated preset SimFile objects.
:param concrete_fs: bool describing whether the host filesystem should be consulted when opening files.
:param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only
when concrete_fs is set to True.
:param kwargs: Any additional keyword args will be passed to the SimState constructor.
:return: The blank state.
:rtype: simuvex.s_state.SimState
"""
return self._project._simos.state_blank(**kwargs)
def entry_state(self, **kwargs):
"""
Returns a state object representing the program at its entry point. All parameters are optional.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: a dictionary of file names with associated preset SimFile objects.
:param concrete_fs: boolean describing whether the host filesystem should be consulted when opening files.
:param chroot: a path to use as a fake root directory, behaves similar to a real chroot. used only when
concrete_fs is set to True.
:param argc: a custom value to use for the program's argc. May be either an int or a bitvector. If
not provided, defaults to the length of args.
:param args: a list of values to use as the program's argv. May be mixed strings and bitvectors.
:param env: a dictionary to use as the environment for the program. Both keys and values may be
mixed strings and bitvectors.
:return: The entry state.
:rtype: simuvex.s_state.SimState
"""
return self._project._simos.state_entry(**kwargs)
def full_init_state(self, **kwargs):
"""
Very much like :meth:`entry_state()`, except that instead of starting execution at the program entry point,
execution begins at a special SimProcedure that plays the role of the dynamic loader, calling each of the
initializer functions that should be called before execution reaches the entry point.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: a dictionary of file names with associated preset SimFile objects.
:param concrete_fs: boolean describing whether the host filesystem should be consulted when opening files.
:param chroot: a path to use as a fake root directory, behaves similar to a real chroot. used only when
concrete_fs is set to True.
:param argc: a custom value to use for the program's argc. May be either an int or a bitvector. If
not provided, defaults to the length of args.
:param args: a list of values to use as arguments to the program. May be mixed strings and bitvectors.
:param env: a dictionary to use as the environment for the program. Both keys and values may be
mixed strings and bitvectors.
:return: The fully initialized state.
:rtype: simuvex.s_state.SimState
"""
return self._project._simos.state_full_init(**kwargs)
def call_state(self, addr, *args, **kwargs):
"""
Returns a state object initialized to the start of a given function, as if it were called with given parameters.
:param addr: The address the state should start at instead of the entry point.
:param args: Any additional positional arguments will be used as arguments to the function call.
The following parametrs are optional.
:param base_state: Use this SimState as the base for the new state instead of a blank state.
:param cc: Optionally provide a SimCC object to use a specific calling convention.
:param ret_addr: Use this address as the function's return target.
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
:param toc: The address of the table of contents for ppc64
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: A dictionary of file names with associated preset SimFile objects.
:param concrete_fs: bool describing whether the host filesystem should be consulted when opening files.
:param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only
when concrete_fs is set to True.
:param kwargs: Any additional keyword args will be passed to the SimState constructor.
:return: The state at the beginning of the function.
:rtype: simuvex.s_state.SimState
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `SimCC.PointerWrapper`. Any value
that can't fit in a register will be automatically put in a
PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the current stack pointer will be used, and it will be updated.
You might not like the results if you provide stack_base but not alloc_base.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequencial
allocations happen at increasing addresses.
"""
return self._project._simos.state_call(addr, *args, **kwargs)
def path(self, state=None, **options):
"""
Constructs a new path.
:param state: Optional - The state to start the new path at. If not provided, an
:meth:`entry_state()` will be constructed using any additional keyword arguments
provided.
:return: The new path.
:rtype: angr.path.Path
"""
if state is None:
state = self.entry_state(**options)
return Path(self._project, state)
def path_group(self, thing=None, **kwargs):
"""
Constructs a new path group.
:param thing: Optional - What to put in the new path group's active stash.
:param kwargs: Any additional keyword arguments will be passed to the PathGroup constructor
:returns: The new path group
:rtype: angr.path_group.PathGroup
Many different types can be passed to this method:
* If nothing is passed in, the path group is seeded with a path containing a state initialized for the program
entry point, i.e. :meth:`entry_state()`.
* If a :class:`simuvex.s_state.SimState` is passed in, the path group is seeded with a path wrapping that state.
* If a :class:`angr.path.Path` is passed in, the path group is seeded with that path.
* If a list is passed in, the list must contain only SimStates and Paths, each SimState will be wrapped in a
Path, and the whole list will be used to seed the path group.
"""
if thing is None:
thing = [self.path()]
if isinstance(thing, (list, tuple)):
thing = list(thing)
for i, val in enumerate(thing):
if isinstance(val, SimState):
thing[i] = self.path(val)
elif not isinstance(val, Path):
raise AngrError("Bad type to initialize path group: %s" % repr(val))
elif isinstance(thing, Path):
thing = [thing]
elif isinstance(thing, SimState):
thing = [self.path(thing)]
else:
raise AngrError("BadType to initialze path group: %s" % repr(thing))
return PathGroup(self._project, active_paths=thing, **kwargs)
def callable(self, addr, concrete_only=False, perform_merge=True, base_state=None, toc=None, cc=None):
"""
A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple paths
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.surveyors.caller.Callable
"""
return Callable(self._project,
addr=addr,
concrete_only=concrete_only,
perform_merge=perform_merge,
base_state=base_state,
toc=toc,
cc=cc)
def cc(self, args=None, ret_val=None, sp_delta=None, func_ty=None):
"""
Return a SimCC (calling convention) parametrized for this project and, optionally, a given function.
:param args: A list of argument storage locations, as SimFunctionArguments.
:param ret_val: The return value storage location, as a SimFunctionArgument.
:param sp_delta: Does this even matter??
:param func_ty: The protoype for the given function, as a SimType.
Relevant subclasses of SimFunctionArgument are SimRegArg and SimStackArg, and shortcuts to them can be found on
this `cc` object.
For stack arguments, offsets are relative to the stack pointer on function entry.
"""
return self._default_cc(arch=self._project.arch,
args=args,
ret_val=ret_val,
sp_delta=sp_delta,
func_ty=func_ty)
cc.SimRegArg = s_cc.SimRegArg
cc.SimStackArg = s_cc.SimStackArg
_default_cc = None
callable.PointerWrapper = s_cc.PointerWrapper
call_state.PointerWrapper = s_cc.PointerWrapper
#
# Private methods
#
def _fix_syscall_ip(self, state):
"""
Resolve syscall information from the state, get the IP address of the syscall SimProcedure, and set the IP of
the state accordingly. Don't do anything if the resolution fails.
:param simuvex.s_state.SimState state: the program state.
:return: None
"""
try:
_, syscall_addr, _, _ = self._project._simos.syscall_info(state)
# Fix the IP
state.ip = syscall_addr
except AngrUnsupportedSyscallError:
# the syscall is not supported. don't do anything
pass
from .lifter import Lifter
from .errors import AngrExitError, AngrError, AngrValueError, AngrUnsupportedSyscallError
from .path import Path
from .path_group import PathGroup
from .knowledge import HookNode
|
{"/angr/surveyors/caller.py": ["/angr/surveyors/explorer.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/test_argv.py": ["/angr/__init__.py"], "/angr/path.py": ["/angr/errors.py", "/angr/path_history.py"], "/angr/simos.py": ["/angr/errors.py", "/angr/tablespecs.py"], "/tests/test_block_cache.py": ["/angr/__init__.py"], "/tests/test_signed_div.py": ["/angr/__init__.py"], "/angr/knowledge_base.py": ["/angr/knowledge/data.py"], "/angr/surveyors/executor.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_self_modifying_code.py": ["/angr/__init__.py"], "/tests/test_hook.py": ["/angr/__init__.py"], "/angr/surveyors/slicecutor.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/broken_variableseekr.py": ["/angr/__init__.py"], "/tests/test_argc_sym.py": ["/angr/__init__.py"], "/tests/test_cfg_path.py": ["/angr/__init__.py"], "/angr/analyses/veritesting.py": ["/angr/errors.py", "/angr/analysis.py", "/angr/path_group.py", "/angr/path.py"], "/angr/analyses/congruency_check.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/knowledge/__init__.py": ["/angr/knowledge/data.py"], "/tests/test_scanf.py": ["/angr/__init__.py"], "/tests/test_vfg_path.py": ["/angr/__init__.py"], "/tests/test_serialization.py": ["/angr/__init__.py"], "/tests/test_mem_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/__init__.py": ["/angr/exploration_techniques/explorer.py", "/angr/exploration_techniques/dfs.py", "/angr/exploration_techniques/veritesting.py", "/angr/exploration_techniques/oppologist.py", "/angr/errors.py"], "/angr/surveyors/__init__.py": ["/angr/surveyors/explorer.py", "/angr/surveyors/executor.py", "/angr/surveyors/escaper.py", "/angr/surveyors/slicecutor.py", "/angr/surveyors/caller.py"], "/tests/test_static_hooker.py": ["/angr/__init__.py"], "/tests/test_cfgfast.py": ["/angr/__init__.py"], "/tests/test_str_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/oppologist.py": ["/angr/errors.py", "/angr/exploration_techniques/__init__.py"], "/tests/test_argc.py": ["/angr/__init__.py"], "/angr/analyses/cdg.py": ["/angr/analysis.py"], "/angr/surveyors/escaper.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_checkbyte.py": ["/angr/__init__.py"], "/angr/analyses/static_hooker.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/analyses/forward_analysis.py": ["/angr/errors.py"], "/angr/analyses/__init__.py": ["/angr/analyses/cdg.py", "/angr/analyses/ddg.py", "/angr/analyses/girlscout.py", "/angr/analyses/veritesting.py", "/angr/analyses/dfg.py", "/angr/analyses/congruency_check.py", "/angr/analyses/static_hooker.py"], "/angr/exploration_techniques/dfs.py": ["/angr/exploration_techniques/__init__.py"], "/angr/__init__.py": ["/angr/project.py", "/angr/regmap.py", "/angr/path.py", "/angr/errors.py", "/angr/surveyor.py", "/angr/analyses/__init__.py", "/angr/analysis.py", "/angr/tablespecs.py", "/angr/simos.py", "/angr/path_group.py", "/angr/surveyors/caller.py", "/angr/log.py"], "/tests/test_echo.py": ["/angr/__init__.py"], "/tests/test_explorer.py": ["/angr/__init__.py"], "/angr/exploration_techniques/veritesting.py": ["/angr/exploration_techniques/__init__.py"], "/angr/path_group.py": ["/angr/errors.py", "/angr/path.py", "/angr/__init__.py"], "/angr/analyses/dfg.py": ["/angr/analysis.py"], "/tests/test_cle_gdb.py": ["/angr/__init__.py"], "/angr/surveyor.py": ["/angr/errors.py", "/angr/path.py", "/angr/surveyors/__init__.py"], "/angr/surveyors/explorer.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/angr/analysis.py": ["/angr/errors.py"], "/angr/factory.py": ["/angr/surveyors/caller.py", "/angr/lifter.py", "/angr/errors.py", "/angr/path.py", "/angr/path_group.py", "/angr/knowledge/__init__.py"], "/tests/test_strtol.py": ["/angr/__init__.py"], "/angr/exploration_techniques/explorer.py": ["/angr/exploration_techniques/__init__.py"], "/tests/test_veritesting.py": ["/angr/__init__.py"]}
|
38,479
|
MayfeelYang/angr
|
refs/heads/master
|
/tests/test_strtol.py
|
import nose
import angr
import simuvex
import subprocess
import logging
l = logging.getLogger('angr.tests.strtol')
import os
test_location = str(os.path.dirname(os.path.realpath(__file__)))
def run_strtol(threads):
test_bin = os.path.join(test_location, "../../binaries/tests/x86_64/strtol_test")
b = angr.Project(test_bin)
initial_state = b.factory.entry_state(remove_options={simuvex.o.LAZY_SOLVES})
pg = b.factory.path_group(thing=initial_state, immutable=False, threads=threads)
# find the end of main
expected_outputs = {"base 8 worked\n", "base +8 worked\n", "0x worked\n", "+0x worked\n", "base +10 worked\n",
"base 10 worked\n", "base -8 worked\n", "-0x worked\n", "base -10 worked\n", "Nope\n"}
pg.explore(find=0x400804, num_find=len(expected_outputs))
nose.tools.assert_equal(len(pg.found), len(expected_outputs))
# check the outputs
pipe = subprocess.PIPE
for f in pg.found:
test_input = f.state.posix.dumps(0)
test_output = f.state.posix.dumps(1)
expected_outputs.remove(test_output)
# check the output works as expected
p = subprocess.Popen(test_bin, stdout=pipe, stderr=pipe, stdin=pipe)
ret = p.communicate(test_input)[0]
nose.tools.assert_equal(ret, test_output)
# check that all of the outputs were seen
nose.tools.assert_equal(len(expected_outputs), 0)
def test_strtol():
yield run_strtol, None
# yield run_strtol, 8
if __name__ == "__main__":
run_strtol(4)
|
{"/angr/surveyors/caller.py": ["/angr/surveyors/explorer.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/test_argv.py": ["/angr/__init__.py"], "/angr/path.py": ["/angr/errors.py", "/angr/path_history.py"], "/angr/simos.py": ["/angr/errors.py", "/angr/tablespecs.py"], "/tests/test_block_cache.py": ["/angr/__init__.py"], "/tests/test_signed_div.py": ["/angr/__init__.py"], "/angr/knowledge_base.py": ["/angr/knowledge/data.py"], "/angr/surveyors/executor.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_self_modifying_code.py": ["/angr/__init__.py"], "/tests/test_hook.py": ["/angr/__init__.py"], "/angr/surveyors/slicecutor.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/broken_variableseekr.py": ["/angr/__init__.py"], "/tests/test_argc_sym.py": ["/angr/__init__.py"], "/tests/test_cfg_path.py": ["/angr/__init__.py"], "/angr/analyses/veritesting.py": ["/angr/errors.py", "/angr/analysis.py", "/angr/path_group.py", "/angr/path.py"], "/angr/analyses/congruency_check.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/knowledge/__init__.py": ["/angr/knowledge/data.py"], "/tests/test_scanf.py": ["/angr/__init__.py"], "/tests/test_vfg_path.py": ["/angr/__init__.py"], "/tests/test_serialization.py": ["/angr/__init__.py"], "/tests/test_mem_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/__init__.py": ["/angr/exploration_techniques/explorer.py", "/angr/exploration_techniques/dfs.py", "/angr/exploration_techniques/veritesting.py", "/angr/exploration_techniques/oppologist.py", "/angr/errors.py"], "/angr/surveyors/__init__.py": ["/angr/surveyors/explorer.py", "/angr/surveyors/executor.py", "/angr/surveyors/escaper.py", "/angr/surveyors/slicecutor.py", "/angr/surveyors/caller.py"], "/tests/test_static_hooker.py": ["/angr/__init__.py"], "/tests/test_cfgfast.py": ["/angr/__init__.py"], "/tests/test_str_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/oppologist.py": ["/angr/errors.py", "/angr/exploration_techniques/__init__.py"], "/tests/test_argc.py": ["/angr/__init__.py"], "/angr/analyses/cdg.py": ["/angr/analysis.py"], "/angr/surveyors/escaper.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_checkbyte.py": ["/angr/__init__.py"], "/angr/analyses/static_hooker.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/analyses/forward_analysis.py": ["/angr/errors.py"], "/angr/analyses/__init__.py": ["/angr/analyses/cdg.py", "/angr/analyses/ddg.py", "/angr/analyses/girlscout.py", "/angr/analyses/veritesting.py", "/angr/analyses/dfg.py", "/angr/analyses/congruency_check.py", "/angr/analyses/static_hooker.py"], "/angr/exploration_techniques/dfs.py": ["/angr/exploration_techniques/__init__.py"], "/angr/__init__.py": ["/angr/project.py", "/angr/regmap.py", "/angr/path.py", "/angr/errors.py", "/angr/surveyor.py", "/angr/analyses/__init__.py", "/angr/analysis.py", "/angr/tablespecs.py", "/angr/simos.py", "/angr/path_group.py", "/angr/surveyors/caller.py", "/angr/log.py"], "/tests/test_echo.py": ["/angr/__init__.py"], "/tests/test_explorer.py": ["/angr/__init__.py"], "/angr/exploration_techniques/veritesting.py": ["/angr/exploration_techniques/__init__.py"], "/angr/path_group.py": ["/angr/errors.py", "/angr/path.py", "/angr/__init__.py"], "/angr/analyses/dfg.py": ["/angr/analysis.py"], "/tests/test_cle_gdb.py": ["/angr/__init__.py"], "/angr/surveyor.py": ["/angr/errors.py", "/angr/path.py", "/angr/surveyors/__init__.py"], "/angr/surveyors/explorer.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/angr/analysis.py": ["/angr/errors.py"], "/angr/factory.py": ["/angr/surveyors/caller.py", "/angr/lifter.py", "/angr/errors.py", "/angr/path.py", "/angr/path_group.py", "/angr/knowledge/__init__.py"], "/tests/test_strtol.py": ["/angr/__init__.py"], "/angr/exploration_techniques/explorer.py": ["/angr/exploration_techniques/__init__.py"], "/tests/test_veritesting.py": ["/angr/__init__.py"]}
|
38,480
|
MayfeelYang/angr
|
refs/heads/master
|
/angr/exploration_techniques/explorer.py
|
from . import ExplorationTechnique
class Explorer(ExplorationTechnique):
"""
Search for up to "num_find" paths that satisfy condition "find", avoiding condition "avoid". Stashes found paths into "find_stash' and avoided paths into "avoid_stash".
The "find" and "avoid" parameters may be any of:
- An address to find
- A set or list of addresses to find
- A function that takes a path and returns whether or not it matches.
If an angr CFG is passed in as the "cfg" parameter and "find" is either a number or a list or a set, then
any paths which cannot possibly reach a success state without going through a failure state will be
preemptively avoided.
If either the "find" or "avoid" parameter is a function returning a boolean, and a path triggers both conditions, it will be added to the find stash, unless "avoid_priority" is set to True.
"""
def __init__(self, find=None, avoid=None, find_stash='found', avoid_stash='avoid', cfg=None, num_find=1, avoid_priority=False):
super(Explorer, self).__init__()
self.find = self._condition_to_lambda(find)
self.avoid = self._condition_to_lambda(avoid)
self.find_stash = find_stash
self.avoid_stash = avoid_stash
self.cfg = cfg
self.ok_blocks = set()
self.num_find = num_find
self.avoid_priority = avoid_priority
if cfg is not None:
if isinstance(avoid, (int, long)):
avoid = (avoid,)
if not isinstance(avoid, (list, tuple)):
avoid = ()
if isinstance(find, (int, long)):
find = (find,)
if not isinstance(find, (list, tuple)):
self.cfg = None
return
# not a queue but a stack... it's just a worklist!
queue = sum((cfg.get_all_nodes(f) for f in find), [])
while len(queue) > 0:
n = queue.pop()
if n.addr in self.ok_blocks:
continue
if n.addr in avoid:
continue
self.ok_blocks.add(n.addr)
queue.extend(n.predecessors)
def setup(self, pg):
if not self.find_stash in pg.stashes: pg.stashes[self.find_stash] = []
if not self.avoid_stash in pg.stashes: pg.stashes[self.avoid_stash] = []
def filter(self, path):
rFind = self.find(path)
if rFind:
if not path.reachable:
return 'unsat'
rAvoid = self.avoid(path)
if rAvoid:
# if there is a conflict
if self.avoid_priority & ((type(rFind) is not set) | (type(rAvoid) is not set)):
# with avoid_priority and one of the conditions is not a set
return self.avoid_stash
if (type(rAvoid) is not set):
# rAvoid is False or self.avoid_priority is False
# Setting rAvoid to {} simplifies the rest of the code
rAvoid = {}
if type(rFind) is set:
while path.addr not in rFind:
if path.addr in rAvoid:
return self.avoid_stash
path = path.step(num_inst=1)[0]
if self.avoid_priority & (path.addr in rAvoid):
# Only occurs if the intersection of rAvoid and rFind is not empty
# Why would anyone want that?
return self.avoid_stash
return (self.find_stash, path)
if self.avoid(path): return self.avoid_stash
if self.cfg is not None and self.cfg.get_any_node(path.addr) is not None:
if path.addr not in self.ok_blocks: return self.avoid_stash
return None
def complete(self, pg):
return len(pg.stashes[self.find_stash]) >= self.num_find
|
{"/angr/surveyors/caller.py": ["/angr/surveyors/explorer.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/test_argv.py": ["/angr/__init__.py"], "/angr/path.py": ["/angr/errors.py", "/angr/path_history.py"], "/angr/simos.py": ["/angr/errors.py", "/angr/tablespecs.py"], "/tests/test_block_cache.py": ["/angr/__init__.py"], "/tests/test_signed_div.py": ["/angr/__init__.py"], "/angr/knowledge_base.py": ["/angr/knowledge/data.py"], "/angr/surveyors/executor.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_self_modifying_code.py": ["/angr/__init__.py"], "/tests/test_hook.py": ["/angr/__init__.py"], "/angr/surveyors/slicecutor.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/broken_variableseekr.py": ["/angr/__init__.py"], "/tests/test_argc_sym.py": ["/angr/__init__.py"], "/tests/test_cfg_path.py": ["/angr/__init__.py"], "/angr/analyses/veritesting.py": ["/angr/errors.py", "/angr/analysis.py", "/angr/path_group.py", "/angr/path.py"], "/angr/analyses/congruency_check.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/knowledge/__init__.py": ["/angr/knowledge/data.py"], "/tests/test_scanf.py": ["/angr/__init__.py"], "/tests/test_vfg_path.py": ["/angr/__init__.py"], "/tests/test_serialization.py": ["/angr/__init__.py"], "/tests/test_mem_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/__init__.py": ["/angr/exploration_techniques/explorer.py", "/angr/exploration_techniques/dfs.py", "/angr/exploration_techniques/veritesting.py", "/angr/exploration_techniques/oppologist.py", "/angr/errors.py"], "/angr/surveyors/__init__.py": ["/angr/surveyors/explorer.py", "/angr/surveyors/executor.py", "/angr/surveyors/escaper.py", "/angr/surveyors/slicecutor.py", "/angr/surveyors/caller.py"], "/tests/test_static_hooker.py": ["/angr/__init__.py"], "/tests/test_cfgfast.py": ["/angr/__init__.py"], "/tests/test_str_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/oppologist.py": ["/angr/errors.py", "/angr/exploration_techniques/__init__.py"], "/tests/test_argc.py": ["/angr/__init__.py"], "/angr/analyses/cdg.py": ["/angr/analysis.py"], "/angr/surveyors/escaper.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_checkbyte.py": ["/angr/__init__.py"], "/angr/analyses/static_hooker.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/analyses/forward_analysis.py": ["/angr/errors.py"], "/angr/analyses/__init__.py": ["/angr/analyses/cdg.py", "/angr/analyses/ddg.py", "/angr/analyses/girlscout.py", "/angr/analyses/veritesting.py", "/angr/analyses/dfg.py", "/angr/analyses/congruency_check.py", "/angr/analyses/static_hooker.py"], "/angr/exploration_techniques/dfs.py": ["/angr/exploration_techniques/__init__.py"], "/angr/__init__.py": ["/angr/project.py", "/angr/regmap.py", "/angr/path.py", "/angr/errors.py", "/angr/surveyor.py", "/angr/analyses/__init__.py", "/angr/analysis.py", "/angr/tablespecs.py", "/angr/simos.py", "/angr/path_group.py", "/angr/surveyors/caller.py", "/angr/log.py"], "/tests/test_echo.py": ["/angr/__init__.py"], "/tests/test_explorer.py": ["/angr/__init__.py"], "/angr/exploration_techniques/veritesting.py": ["/angr/exploration_techniques/__init__.py"], "/angr/path_group.py": ["/angr/errors.py", "/angr/path.py", "/angr/__init__.py"], "/angr/analyses/dfg.py": ["/angr/analysis.py"], "/tests/test_cle_gdb.py": ["/angr/__init__.py"], "/angr/surveyor.py": ["/angr/errors.py", "/angr/path.py", "/angr/surveyors/__init__.py"], "/angr/surveyors/explorer.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/angr/analysis.py": ["/angr/errors.py"], "/angr/factory.py": ["/angr/surveyors/caller.py", "/angr/lifter.py", "/angr/errors.py", "/angr/path.py", "/angr/path_group.py", "/angr/knowledge/__init__.py"], "/tests/test_strtol.py": ["/angr/__init__.py"], "/angr/exploration_techniques/explorer.py": ["/angr/exploration_techniques/__init__.py"], "/tests/test_veritesting.py": ["/angr/__init__.py"]}
|
38,481
|
MayfeelYang/angr
|
refs/heads/master
|
/tests/test_veritesting.py
|
import sys
import nose
import angr
import logging
l = logging.getLogger('angr_tests.veritesting')
import os
location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
addresses_veritesting_a = {
'x86_64': 0x400674
}
addresses_veritesting_b = {
'x86_64': 0x4006af
}
def run_veritesting_a(arch):
# TODO: Added timeout control, since a failed state merging will result in running for a long time
#logging.getLogger('angr.analyses.sse').setLevel(logging.DEBUG)
proj = angr.Project(os.path.join(os.path.join(location, arch), "veritesting_a"),
load_options={'auto_load_libs': False},
use_sim_procedures=True
)
ex = proj.surveyors.Explorer(find=(addresses_veritesting_a[arch], ), enable_veritesting=True)
r = ex.run()
nose.tools.assert_not_equal(len(r.found), 0)
# Make sure the input makes sense
for f in r.found:
input_str = f.state.plugins['posix'].dumps(0)
nose.tools.assert_equal(input_str.count('B'), 10)
def run_veritesting_b(arch):
#logging.getLogger('angr.analyses.sse').setLevel(logging.DEBUG)
#logging.getLogger('angr.surveyor').setLevel(logging.DEBUG)
#logging.getLogger('angr.surveyors.explorer').setLevel(logging.DEBUG)
proj = angr.Project(os.path.join(os.path.join(location, arch), "veritesting_b"),
load_options={'auto_load_libs': False},
use_sim_procedures=True
)
ex = proj.surveyors.Explorer(find=(addresses_veritesting_b[arch], ),
enable_veritesting=True,
veritesting_options={'enable_function_inlining': True})
r = ex.run()
nose.tools.assert_not_equal(len(r.found), 0)
# Make sure the input makes sense
for f in r.found:
input_str = f.state.plugins['posix'].dumps(0)
nose.tools.assert_equal(input_str.count('B'), 35)
def test_veritesting_a():
# This is the most basic test
for arch in addresses_veritesting_a.keys():
yield run_veritesting_a, arch
def test_veritesting_b():
# Advanced stuff - it tests for the ability to inline simple functions
# as well as simple syscalls like read/write
for arch in addresses_veritesting_b.keys():
yield run_veritesting_b, arch
if __name__ == "__main__":
#logging.getLogger('angr.analyses.veritesting').setLevel(logging.DEBUG)
if len(sys.argv) > 1:
for test_func, arch_name in globals()['test_%s' % sys.argv[1]]():
test_func(arch_name)
else:
for test_func, arch_name in test_veritesting_a():
test_func(arch_name)
for test_func, arch_name in test_veritesting_b():
test_func(arch_name)
|
{"/angr/surveyors/caller.py": ["/angr/surveyors/explorer.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/test_argv.py": ["/angr/__init__.py"], "/angr/path.py": ["/angr/errors.py", "/angr/path_history.py"], "/angr/simos.py": ["/angr/errors.py", "/angr/tablespecs.py"], "/tests/test_block_cache.py": ["/angr/__init__.py"], "/tests/test_signed_div.py": ["/angr/__init__.py"], "/angr/knowledge_base.py": ["/angr/knowledge/data.py"], "/angr/surveyors/executor.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_self_modifying_code.py": ["/angr/__init__.py"], "/tests/test_hook.py": ["/angr/__init__.py"], "/angr/surveyors/slicecutor.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/tests/broken_variableseekr.py": ["/angr/__init__.py"], "/tests/test_argc_sym.py": ["/angr/__init__.py"], "/tests/test_cfg_path.py": ["/angr/__init__.py"], "/angr/analyses/veritesting.py": ["/angr/errors.py", "/angr/analysis.py", "/angr/path_group.py", "/angr/path.py"], "/angr/analyses/congruency_check.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/knowledge/__init__.py": ["/angr/knowledge/data.py"], "/tests/test_scanf.py": ["/angr/__init__.py"], "/tests/test_vfg_path.py": ["/angr/__init__.py"], "/tests/test_serialization.py": ["/angr/__init__.py"], "/tests/test_mem_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/__init__.py": ["/angr/exploration_techniques/explorer.py", "/angr/exploration_techniques/dfs.py", "/angr/exploration_techniques/veritesting.py", "/angr/exploration_techniques/oppologist.py", "/angr/errors.py"], "/angr/surveyors/__init__.py": ["/angr/surveyors/explorer.py", "/angr/surveyors/executor.py", "/angr/surveyors/escaper.py", "/angr/surveyors/slicecutor.py", "/angr/surveyors/caller.py"], "/tests/test_static_hooker.py": ["/angr/__init__.py"], "/tests/test_cfgfast.py": ["/angr/__init__.py"], "/tests/test_str_funcs.py": ["/angr/__init__.py"], "/angr/exploration_techniques/oppologist.py": ["/angr/errors.py", "/angr/exploration_techniques/__init__.py"], "/tests/test_argc.py": ["/angr/__init__.py"], "/angr/analyses/cdg.py": ["/angr/analysis.py"], "/angr/surveyors/escaper.py": ["/angr/surveyor.py", "/angr/surveyors/__init__.py"], "/tests/test_checkbyte.py": ["/angr/__init__.py"], "/angr/analyses/static_hooker.py": ["/angr/analysis.py", "/angr/errors.py"], "/angr/analyses/forward_analysis.py": ["/angr/errors.py"], "/angr/analyses/__init__.py": ["/angr/analyses/cdg.py", "/angr/analyses/ddg.py", "/angr/analyses/girlscout.py", "/angr/analyses/veritesting.py", "/angr/analyses/dfg.py", "/angr/analyses/congruency_check.py", "/angr/analyses/static_hooker.py"], "/angr/exploration_techniques/dfs.py": ["/angr/exploration_techniques/__init__.py"], "/angr/__init__.py": ["/angr/project.py", "/angr/regmap.py", "/angr/path.py", "/angr/errors.py", "/angr/surveyor.py", "/angr/analyses/__init__.py", "/angr/analysis.py", "/angr/tablespecs.py", "/angr/simos.py", "/angr/path_group.py", "/angr/surveyors/caller.py", "/angr/log.py"], "/tests/test_echo.py": ["/angr/__init__.py"], "/tests/test_explorer.py": ["/angr/__init__.py"], "/angr/exploration_techniques/veritesting.py": ["/angr/exploration_techniques/__init__.py"], "/angr/path_group.py": ["/angr/errors.py", "/angr/path.py", "/angr/__init__.py"], "/angr/analyses/dfg.py": ["/angr/analysis.py"], "/tests/test_cle_gdb.py": ["/angr/__init__.py"], "/angr/surveyor.py": ["/angr/errors.py", "/angr/path.py", "/angr/surveyors/__init__.py"], "/angr/surveyors/explorer.py": ["/angr/surveyor.py", "/angr/errors.py", "/angr/surveyors/__init__.py"], "/angr/analysis.py": ["/angr/errors.py"], "/angr/factory.py": ["/angr/surveyors/caller.py", "/angr/lifter.py", "/angr/errors.py", "/angr/path.py", "/angr/path_group.py", "/angr/knowledge/__init__.py"], "/tests/test_strtol.py": ["/angr/__init__.py"], "/angr/exploration_techniques/explorer.py": ["/angr/exploration_techniques/__init__.py"], "/tests/test_veritesting.py": ["/angr/__init__.py"]}
|
38,520
|
d4rkspir1t/groupdetection-dgmg
|
refs/heads/main
|
/json_to_csv.py
|
import json
import csv
header = ['fr_no', 'x', 'y', 'w', 'h', 'cx', 'cy', 'cen_dep', 'dep_avg', 'orient', 'group']
f = open('yolo_db_orientation_20210810_cd_1.json')
data = json.load(f)
f.close()
print(data)
with open('yolo_db_orientation_20210810_cd_1.csv', 'w') as f:
csv_file = csv.writer(f, delimiter=';')
csv_file.writerow(header)
for item in data:
print(item)
print(data[item])
for box in data[item]:
row_data = [int(item)]
print(box)
for idx, value in enumerate(box):
if idx in [6, 7, 8]:
val = float(value)
else:
val = int(value)
row_data.append(val)
print(row_data)
csv_file.writerow(row_data)
# for key, val in data:
#
# f.writerow(item) # ← changed
#
# f.close()
|
{"/growl_tests_code.py": ["/growl_utils/utils.py"]}
|
38,521
|
d4rkspir1t/groupdetection-dgmg
|
refs/heads/main
|
/build_graph_rica.py
|
import pandas as pd
import os
import csv
import copy
from pprint import pprint
import math
import dgl
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.sparse as sp
from dgl.nn import SAGEConv
import itertools
from sklearn.metrics import roc_auc_score, accuracy_score
import dgl.dataloading as dgl_dl
import random
import datetime
import time
base_path_cpp = 'salsa/Annotation/salsa_cpp/'
base_path_ps = 'salsa/Annotation/salsa_cpp/'
person_log = 'geometryGT/'
fformation_log = 'fformationGT.csv'
class GraphSAGE(nn.Module):
def __init__(self, in_feats, h_feats):
super(GraphSAGE, self).__init__()
self.conv1 = SAGEConv(in_feats, h_feats, 'mean')
self.conv2 = SAGEConv(h_feats, h_feats, 'mean')
def forward(self, g, in_feat):
h = self.conv1(g, in_feat)
h = F.relu(h)
h = self.conv2(g, h)
return h
class MLPPredictor(nn.Module):
def __init__(self, h_feats):
super().__init__()
self.W1 = nn.Linear(h_feats * 2, h_feats)
self.W2 = nn.Linear(h_feats, 1)
def apply_edges(self, edges):
"""
Computes a scalar score for each edge of the given graph.
Parameters
----------
edges :
Has three members ``src``, ``dst`` and ``data``, each of
which is a dictionary representing the features of the
source nodes, the destination nodes, and the edges
themselves.
Returns
-------
dict
A dictionary of new edge features.
"""
h = torch.cat([edges.src['h'], edges.dst['h']], 1)
out_score = self.W2(F.relu(self.W1(h))).squeeze(1)
out_label = torch.round(torch.sigmoid(out_score))
# print(out_score, out_label)
out_dict = {'score': out_score, 'label': out_label}
return out_dict
def forward(self, g, h):
with g.local_scope():
g.ndata['h'] = h
g.apply_edges(self.apply_edges)
# return g.edata['score']
# print('executes', g.edata)
out_dict = dict(g.edata)
return out_dict, g
def compute_loss(pos_score, neg_score):
scores = torch.cat([pos_score, neg_score])
labels = torch.cat([torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])])
return F.binary_cross_entropy_with_logits(scores, labels)
def compute_auc(pos_score, neg_score):
scores = torch.cat([pos_score, neg_score]).numpy()
labels = torch.cat(
[torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])]).numpy()
return roc_auc_score(labels, scores)
def fetch_person_data(person_id, frame_ts, base_path):
f_name = str(person_id).rjust(2, '0') + '.csv'
f_path = os.path.join(base_path, person_log, f_name)
# print(f_path)
with open(f_path, 'r') as csvf:
csvrdr = csv.reader(csvf, delimiter=',')
for row in csvrdr:
frame = float(row[0])
data = row[1:]
# print(person_id, frame_ts, base_path, data)
if frame == frame_ts:
return data
def read_frame_data(base_p, extra_t=0):
ff_path = os.path.join(base_p, fformation_log)
frame_data = {}
with open(ff_path, 'r') as csvf:
csvrdr = csv.reader(csvf, delimiter=',')
for row in csvrdr:
frame = str(float(row[0]) + extra_t)
if frame not in frame_data.keys():
frame_data[frame] = []
group = []
for idx in row[1:]:
try:
group.append(int(idx))
except ValueError:
print('BAD INPUT: ', idx)
frame_data[frame].append(group)
return frame_data
def get_clusters(nodec, srcn, dstn):
clusters = {}
cluster_idx = 0
for node in range(nodec):
if node not in clusters.keys():
clusters[node] = -1
if node in srcn:
clusters[node] = cluster_idx
cluster_idx += 1
for idx, u in enumerate(list(srcn)):
if u == node:
clusters[int(dstn[idx])] = clusters[node]
return clusters
def swap_clusters(clusters):
swapped_clusters = {}
for key, val in clusters.items():
if val not in swapped_clusters.keys():
swapped_clusters[val] = []
swapped_clusters[val].append(key)
return swapped_clusters
extra_time = 10000
frame_data_cpp = read_frame_data(base_path_cpp, 0)
frame_data_ps = read_frame_data(base_path_ps, extra_time)
frame_data = {**frame_data_cpp, **frame_data_ps}
# pprint(frame_data)
frame_node_data = {}
frame_edge_data = {}
for frame_id, frame_info in frame_data.items():
node_data = []
group_id_tracker = 0
for group in frame_info:
if len(group) == 1:
group_id = -1
else:
group_id = group_id_tracker
group_id_tracker += 1
for person in group:
if float(frame_id) > extra_time:
c_frame_id = round(float(frame_id)-extra_time, 2)
data = fetch_person_data(person, c_frame_id, base_path_ps)
else:
data = fetch_person_data(person, float(frame_id), base_path_cpp)
pos_x = float(data[0])
pos_y = float(data[1])
body_pose = float(data[3])
rel_head_pose = float(data[4])
head_pose = body_pose + rel_head_pose
# math.degrees() for degrees instead of radians
# person_id 0, group_id 1, posx 2, posy 3, bodyp 4, rheadp 5, headp 6
# node_data.append([person, group_id, pos_x, pos_y, body_pose, rel_head_pose, round(head_pose, 4)])
node_data.append([person, group_id, pos_x, pos_y, round(head_pose, 4)])
# pprint(node_data)
# print(len(node_data))
frame_node_data[frame_id] = node_data
edge_data = []
for person_data in node_data:
person = person_data[0]
group = person_data[1]
for idx in range(len(node_data)):
if node_data[idx][0] != person and node_data[idx][1] != -1:
if group == node_data[idx][1]:
# src dst distance effort
distance = math.dist([person_data[2], person_data[3]], [node_data[idx][2], node_data[idx][3]])
angle_diff = person_data[-1] - (node_data[idx][-1] - math.pi)
if angle_diff > math.pi * 2:
# print('bullshit +\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
elif angle_diff < math.pi * -2:
# print('bullshit -\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
if angle_diff < 0:
effort = math.pi * 2 + angle_diff
else:
effort = angle_diff
# src dst dist eff
edge_data.append([person, node_data[idx][0], distance, effort])
# pprint(edge_data)
# print(len(edge_data))
frame_edge_data[frame_id] = edge_data
# break
# print(df_ff.head())
# for idx in df_ff.index:
# print(idx, df_ff.loc[idx]['time'], df_ff.loc[idx]['group'])
# print(len(frame_data_cpp.keys()), len(frame_data_ps.keys()), len(frame_node_data.keys()), len(frame_edge_data.keys()))
iters_cpp = 0
iters_ps = 0
all_graphs = []
for frame_id, val in frame_edge_data.items():
# print('FR ID: ', frame_id)
srcs = []
dsts = []
pos = {}
for entry in val:
srcs.append(entry[0]-1)
dsts.append(entry[1]-1)
feats = []
for person in frame_node_data[frame_id]:
pos[person[0]-1] = [person[2], person[3]]
feat = person[2:5]
# print(person[0])
feats.append(feat)
feats = torch.from_numpy(np.array(feats))
graph = dgl.graph((srcs, dsts), num_nodes=18)
missing = []
for idx in range(0, 18):
if idx not in pos.keys():
missing.append(idx)
# print(idx)
node_list = graph.nodes().tolist()
node_list.remove(idx)
# print(node_list)
graph = graph.subgraph(node_list)
if len(missing) > 0:
continue
# print(graph.number_of_nodes(), len(feats), len(frame_node_data[frame_id]))
draw_graph = False
graph.ndata['feat'] = feats.float()
# print(graph.ndata['feat'][:10])
# print('# nodes: %d, # edges: %d' % (graph.number_of_nodes(), graph.number_of_edges()))
if draw_graph:
nx_g = graph.to_networkx().to_undirected()
# pos = nx.kamada_kawai_layout(nx_g)
print(pos)
# should assign pos on -1:1 scale based on coordinates
try:
nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
except nx.exception.NetworkXError:
node_cs = []
for i in range(18):
if i not in pos.keys():
pos[i] = [0, 0]
node_cs.append('#541E1B')
else:
node_cs.append("#A0CBE2")
nx.draw(nx_g, pos, with_labels=True, node_color=node_cs)
if float(frame_id) < extra_time:
base_path = 'salsa/cpp_graphs'
iters_cpp += 1
name = '%d.png' % iters_cpp
graph_path = os.path.join(base_path, name.rjust(9, '0'))
else:
base_path = 'salsa/ps_graphs'
iters_ps += 1
name = '%d.png' % iters_ps
graph_path = os.path.join(base_path, name.rjust(9, '0'))
plt.savefig(graph_path)
plt.close()
# print('Edge count: %d, Node count: %d, Feature count: %d' % (graph.num_edges(), graph.num_nodes(), len(graph.ndata['feat'][0])))
all_graphs.append(graph)
# break
random.shuffle(all_graphs)
split_idx = math.ceil(len(all_graphs)*0.6)
train_graphs = all_graphs[:split_idx]
test_graphs = all_graphs[split_idx:]
train_bg = dgl.batch(train_graphs)
test_bg = dgl.batch(test_graphs)
# print(train_bg.batch_size)
# print(test_bg.batch_size)
param_opt = False
h_feats_list = [2, 3, 4, 5, 6, 10, 15, 20]
epochs_list = [10, 15, 20, 25, 30, 40, 50, 100, 150, 200, 250]
total = len(h_feats_list)*len(epochs_list)
iter_count = 10
count = 1
if param_opt:
for h_feats in h_feats_list:
for epochs in epochs_list:
# h_feats = 3False
# epochs = 100
model = GraphSAGE(train_bg.ndata['feat'].shape[1], h_feats)
# # You can replace DotPredictor with MLPPredictor.
pred = MLPPredictor(h_feats)
#
# # ----------- 3. set up loss and optimizer -------------- #
# # in this case, loss will in training loop
optimizer = torch.optim.Adam(itertools.chain(model.parameters(), pred.parameters()), lr=0.01)
auc_scores = []
for _ in range(iter_count):
for batched_graph in train_graphs:
u, v = batched_graph.edges()
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
try:
adj_neg = 1 - adj.todense() - np.eye(batched_graph.number_of_nodes())
except ValueError:
continue
neg_u, neg_v = np.where(adj_neg != 0)
train_pos_u, train_pos_v = u, v
train_neg_u, train_neg_v = neg_u, neg_v
train_pos_g = dgl.graph((train_pos_u, train_pos_v), num_nodes=batched_graph.number_of_nodes())
train_neg_g = dgl.graph((train_neg_u, train_neg_v), num_nodes=batched_graph.number_of_nodes())
#
# # ----------- 4. training -------------------------------- #
all_logits = []
for e in range(epochs):
# forward
h = model(batched_graph, batched_graph.ndata['feat'])
pos_score = pred(train_pos_g, h)[0]['score']
neg_score = pred(train_neg_g, h)[0]['score']
loss = compute_loss(pos_score, neg_score)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if e % 5 == 0:
# print('In epoch {}, loss: {}'.format(e, loss))
#
# # ----------- 5. check results ------------------------ #
#
for batched_graph in test_graphs:
u, v = batched_graph.edges()
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
try:
adj_neg = 1 - adj.todense() - np.eye(batched_graph.number_of_nodes())
except ValueError:
continue
neg_u, neg_v = np.where(adj_neg != 0)
test_pos_u, test_pos_v = u, v
test_neg_u, test_neg_v = neg_u, neg_v
test_pos_g = dgl.graph((test_pos_u, test_pos_v), num_nodes=batched_graph.number_of_nodes())
test_neg_g = dgl.graph((test_neg_u, test_neg_v), num_nodes=batched_graph.number_of_nodes())
with torch.no_grad():
pos_score = pred(test_pos_g, h)[0]['score']
neg_score = pred(test_neg_g, h)[0]['score']
auc = compute_auc(pos_score, neg_score)
# print('AUC', auc)
auc_scores.append(auc)
print('#%d of %d\t%d, %d\tTested on: %d, Avg AUC: %.4f, Stdev: %.4f' % (count, total, h_feats, epochs,
len(auc_scores), np.mean(auc_scores),
np.std(auc_scores)))
count += 1
model_output_tracker = pd.DataFrame(
list(zip([datetime.datetime.now()], [h_feats], [epochs], [len(auc_scores)], [np.mean(auc_scores)], [np.std(auc_scores)])),
columns=['time', 'feature_count', 'epoch_count', 'test_length', 'mean_auc', 'std_auc'])
if os.path.exists('model_output_tracker.csv'):
model_output_tracker.to_csv('model_output_tracker.csv', mode='a', index=False, header=False)
else:
model_output_tracker.to_csv('model_output_tracker.csv', mode='w', index=False, header=True)
else:
h_feats_list = [2, 3, 4]
epochs_list = [15, 25, 100, 150]
total = len(h_feats_list) * len(epochs_list)
count = 0
for h_feats in h_feats_list:
for epochs in epochs_list:
iteration = 0
while iteration != 10:
# h_feats = 3
# epochs = 20
model = GraphSAGE(train_bg.ndata['feat'].shape[1], h_feats)
# # You can replace DotPredictor with MLPPredictor.
pred = MLPPredictor(h_feats)
#
# # ----------- 3. set up loss and optimizer -------------- #
# # in this case, loss will in training loop
optimizer = torch.optim.Adam(itertools.chain(model.parameters(), pred.parameters()), lr=0.01)
start_t = datetime.datetime.now()
for batched_graph in train_graphs:
u, v = batched_graph.edges()
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
try:
adj_neg = 1 - adj.todense() - np.eye(batched_graph.number_of_nodes())
except ValueError:
continue
neg_u, neg_v = np.where(adj_neg != 0)
train_pos_u, train_pos_v = u, v
train_neg_u, train_neg_v = neg_u, neg_v
train_pos_g = dgl.graph((train_pos_u, train_pos_v), num_nodes=batched_graph.number_of_nodes())
train_neg_g = dgl.graph((train_neg_u, train_neg_v), num_nodes=batched_graph.number_of_nodes())
#
# # ----------- 4. training -------------------------------- #
all_logits = []
for e in range(epochs):
# forward
h = model(batched_graph, batched_graph.ndata['feat'])
pos_score = pred(train_pos_g, h)[0]['score']
neg_score = pred(train_neg_g, h)[0]['score']
loss = compute_loss(pos_score, neg_score)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if e % 5 == 0:
# print('In epoch {}, loss: {}'.format(e, loss))
# print('Training took: ', datetime.datetime.now()-start_t)
#
# # ----------- 5. check results ------------------------ #
#
start_t = datetime.datetime.now()
plot_tests = False
auc_scores = []
precision_scores = []
recall_scores = []
f1_scores = []
for batched_graph in test_graphs:
test_graph = copy.copy(batched_graph)
# print('Test graph', test_graph.ndata['feat'])
test_eids = test_graph.edges(form='eid')
test_graph.remove_edges(test_eids)
# print('Test graph', test_graph.num_nodes(), test_graph.num_edges())
# print(batched_graph.num_nodes(), batched_graph.num_edges())
# print(batched_graph.nodes())
u, v = batched_graph.edges()
u_t, v_t = test_graph.edges()
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
# adj_t = sp.coo_matrix((np.ones(len(u_t)), (u_t.numpy(), v_t.numpy())))
try:
adj_neg = 1 - adj.todense() - np.eye(batched_graph.number_of_nodes())
adj_t_neg = 1 - np.eye(test_graph.number_of_nodes())
except ValueError:
continue
neg_u, neg_v = np.where(adj_neg != 0)
neg_t_u, neg_t_v = np.where(adj_t_neg != 0)
test_pos_u, test_pos_v = u, v
test_neg_u, test_neg_v = neg_u, neg_v
test_pos_g = dgl.graph((test_pos_u, test_pos_v), num_nodes=batched_graph.number_of_nodes())
test_neg_g = dgl.graph((test_neg_u, test_neg_v), num_nodes=batched_graph.number_of_nodes())
test_full_graph = dgl.graph((neg_t_u, neg_t_v), num_nodes=test_graph.number_of_nodes())
# test_full_graph.ndata['feat'] = test_graph.ndata['feat']
# print('Test graph negative stats', test_full_graph.num_nodes(), test_full_graph.num_edges())
with torch.no_grad():
pos_out, pos_graph_out = pred(test_pos_g, h)
neg_out, neg_graph_out = pred(test_neg_g, h)
test_out, test_graph_out = pred(test_full_graph, h)
pos_score = pos_out['score']
neg_score = neg_out['score']
pos_labels = pos_out['label']
neg_labels = neg_out['label']
test_labels = test_out['label']
# print('Test labels: ', len(test_labels), test_labels)
pred_labels = torch.cat([pos_labels, neg_labels]).numpy()
scores = torch.cat([pos_score, neg_score]).numpy()
labels = torch.cat(
[torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])]).numpy()
auc = roc_auc_score(labels, scores)
# print(len(scores), '\n', pred_labels[:len(pos_labels)], '\n', pred_labels[len(pos_labels):])
# print('AUC', auc)
auc_scores.append(auc)
to_remove = []
for i in range(len(test_labels)):
if test_labels[i] == 0:
to_remove.append(i)
if plot_tests:
fig, (ax1, ax2) = plt.subplots(1, 2)
test_graph_out.remove_edges(to_remove)
original_nodec = batched_graph.num_nodes()
original_u, original_v = batched_graph.edges()
pred_nodec = test_graph_out.num_nodes()
pred_u, pred_v = test_graph_out.edges()
original_clusters = get_clusters(original_nodec, original_u, original_v)
pred_clusters = get_clusters(pred_nodec, pred_u, pred_v)
# pprint(original_clusters)
# pprint(pred_clusters)
swap_original_clusters = swap_clusters(original_clusters)
swap_pred_clusters = swap_clusters(pred_clusters)
# pprint(swap_original_clusters)
# pprint(swap_pred_clusters)
tp = 0
fp = 0
fn = 0
t = 2/3
t_ = 1-t
used_pred_clusters = [-1]
for key, cluster in swap_original_clusters.items():
if key == -1:
continue
else:
matched_clusters = {}
fullsize = len(cluster)
for pred_key, pred_cluster in swap_pred_clusters.items():
if pred_key == -1:
continue
match = 0
miss = 0
for node in cluster:
if node in pred_cluster:
match += 1
else:
miss += 1
if match > 0:
matched_clusters[pred_key] = [match, miss]
max_match = 0
best_match = {}
for match_key, match_val in matched_clusters.items():
if match_val[0] > max_match:
max_match = match_val[0]
best_match = {match_key: match_val}
if len(list(best_match.keys())) == 0:
continue
used_pred_clusters.append(list(best_match.keys())[0])
best_match_val = list(best_match.values())[0]
match = best_match_val[0]
miss = best_match_val[1]
if match / fullsize >= t and miss / fullsize <= t_:
tp += 1
verdict = 'tp'
else:
fn += 1
verdict = 'fn'
# print(key, match, miss, fullsize, verdict)
for key in swap_pred_clusters.keys():
if key not in used_pred_clusters:
fp += 1
# print('TP: %d, FN: %d, FP: %d' % (tp, fn, fp))
if tp + fp == 0:
precision = 0
else:
precision = tp / (tp + fp)
if tp + fn == 0:
recall = 0
else:
recall = tp / (tp + fn)
if precision + recall == 0:
continue
else:
f1 = 2 * (precision * recall) / (precision + recall)
precision_scores.append(precision)
recall_scores.append(recall)
f1_scores.append(f1)
if plot_tests:
nx_g = test_graph_out.to_networkx().to_undirected()
# pos = nx.kamada_kawai_layout(nx_g)
ax1 = plt.subplot(1,2,1)
nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
# ax1.margin(5)
ax2 = plt.subplot(1,2,2)
nx_g_original = batched_graph.to_networkx().to_undirected()
nx.draw(nx_g_original, pos, with_labels=True, node_color="#A0CBE2")
# ax2.margin(5)
# plt.show()
plt.close()
# if count == 3:
# break
# print('%d, %d\tTested on: %d, Avg AUC: %.4f, Stdev: %.4f' % (h_feats, epochs,
# len(auc_scores), np.mean(auc_scores),
# np.std(auc_scores)))
# print('Testing took: ', datetime.datetime.now()-start_t)
# print('Precision: %.4f, Recall: %.4f, F1: %.4f' % (np.mean(precision_scores), np.mean(recall_scores), np.mean(f1_scores)))
if len(f1_scores) > 0:
iteration += 1
model_output_tracker = pd.DataFrame(
list(zip([datetime.datetime.now()], [h_feats], [epochs], [len(f1_scores)],
[np.mean(precision_scores)], [np.mean(recall_scores)], [np.mean(f1_scores)])),
columns=['time', 'feature_count', 'epoch_count', 'test_length', 'mean_precision', 'mean_recall', 'mean_f1'])
if os.path.exists('model_output_tracker_f1_sel_onlyheadv2.csv'):
model_output_tracker.to_csv('model_output_tracker_f1_sel_onlyheadv2.csv', mode='a', index=False, header=False)
else:
model_output_tracker.to_csv('model_output_tracker_f1_sel_onlyheadv2.csv', mode='w', index=False, header=True)
count += 1
print('#%d of %d\t%d, %d' % (count, total, h_feats, epochs))
|
{"/growl_tests_code.py": ["/growl_utils/utils.py"]}
|
38,522
|
d4rkspir1t/groupdetection-dgmg
|
refs/heads/main
|
/growl_utils/utils.py
|
import os
import csv
from sklearn.metrics import roc_auc_score
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn import SAGEConv
person_log = 'geometryGT/'
fformation_log = 'fformationGT.csv'
class GraphSAGE(nn.Module):
def __init__(self, in_feats, h_feats):
super(GraphSAGE, self).__init__()
self.conv1 = SAGEConv(in_feats, h_feats, 'mean')
self.conv2 = SAGEConv(h_feats, h_feats, 'mean')
def forward(self, g, in_feat):
h = self.conv1(g, in_feat)
h = F.relu(h)
h = self.conv2(g, h)
return h
class MLPPredictor(nn.Module):
def __init__(self, h_feats):
super().__init__()
self.W1 = nn.Linear(h_feats * 2, h_feats)
self.W2 = nn.Linear(h_feats, 1)
def apply_edges(self, edges):
"""
Computes a scalar score for each edge of the given graph.
Parameters
----------
edges :
Has three members ``src``, ``dst`` and ``data``, each of
which is a dictionary representing the features of the
source nodes, the destination nodes, and the edges
themselves.
Returns
-------
dict
A dictionary of new edge features.
"""
h = torch.cat([edges.src['h'], edges.dst['h']], 1)
out_score = self.W2(F.relu(self.W1(h))).squeeze(1)
out_label = torch.round(torch.sigmoid(out_score))
# print(out_score, out_label)
out_dict = {'score': out_score, 'label': out_label}
return out_dict
def forward(self, g, h):
with g.local_scope():
g.ndata['h'] = h
g.apply_edges(self.apply_edges)
# return g.edata['score']
# print('executes', g.edata)
out_dict = dict(g.edata)
return out_dict, g
def compute_loss(pos_score, neg_score):
scores = torch.cat([pos_score, neg_score])
labels = torch.cat([torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])])
return F.binary_cross_entropy_with_logits(scores, labels)
def compute_loss_posonly(pos_score):
scores = torch.cat([pos_score])
labels = torch.cat([torch.ones(pos_score.shape[0])])
return F.binary_cross_entropy_with_logits(scores, labels)
def compute_auc(pos_score, neg_score):
scores = torch.cat([pos_score, neg_score]).numpy()
labels = torch.cat(
[torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])]).numpy()
return roc_auc_score(labels, scores)
def fetch_person_data(person_id, frame_ts, base_path):
f_name = str(person_id).rjust(2, '0') + '.csv'
f_path = os.path.join(base_path, person_log, f_name)
# print(f_path)
with open(f_path, 'r') as csvf:
csvrdr = csv.reader(csvf, delimiter=',')
for row in csvrdr:
frame = float(row[0])
data = row[1:]
# print(person_id, frame_ts, base_path, data)
if frame == round(frame_ts, 5):
return data
def read_frame_data(base_p, extra_t=0):
ff_path = os.path.join(base_p, fformation_log)
frame_data = {}
with open(ff_path, 'r') as csvf:
csvrdr = csv.reader(csvf, delimiter=',')
for row in csvrdr:
frame = str(float(row[0]) + extra_t)
if frame not in frame_data.keys():
frame_data[frame] = []
group = []
for idx in row[1:]:
try:
group.append(int(idx))
except ValueError:
print('BAD INPUT: ', idx)
frame_data[frame].append(group)
return frame_data
def read_rica_frdata(bpath, rel_side_dist=1, extra_t=0,):
frame_data = {}
with open(bpath, 'r') as csvf:
csvrdr = csv.reader(csvf, delimiter=';')
row_c = 0
for row in csvrdr:
row_c += 1
if row_c == 1:
continue
# print(row)
frame = int(row[0]) + extra_t
# print(frame, int(row[5])*rel_side_dist, float(row[7]), row[9], row[10]) # or 8 , frno, x, y, rot, label
if frame not in frame_data.keys():
frame_data[frame] = []
frame_data[frame].append([frame, int(row[5])*rel_side_dist, float(row[7]), float(row[9]), int(row[10])])
# if row_c == 6:
# break
return frame_data
def get_clusters(nodec, srcn, dstn):
clusters = {}
cluster_idx = 0
for node in range(nodec):
if node not in clusters.keys():
clusters[node] = -1
if node in srcn:
clusters[node] = cluster_idx
cluster_idx += 1
for idx, u in enumerate(list(srcn)):
if u == node:
clusters[int(dstn[idx])] = clusters[node]
return clusters
def swap_clusters(clusters):
swapped_clusters = {}
for key, val in clusters.items():
if val not in swapped_clusters.keys():
swapped_clusters[val] = []
swapped_clusters[val].append(key)
return swapped_clusters
|
{"/growl_tests_code.py": ["/growl_utils/utils.py"]}
|
38,523
|
d4rkspir1t/groupdetection-dgmg
|
refs/heads/main
|
/build_graph.py
|
import pandas as pd
import os
import csv
import copy
from pprint import pprint
import math
import dgl
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.sparse as sp
from dgl.nn import SAGEConv
import itertools
from sklearn.metrics import roc_auc_score, accuracy_score
import dgl.dataloading as dgl_dl
import random
import datetime
import time
base_path_cpp = 'salsa/Annotation/salsa_cpp/'
base_path_ps = 'salsa/Annotation/salsa_ps/'
base_path_rica = './gt_db_orientation_20210412_cd_1.csv'
person_log = 'geometryGT/'
fformation_log = 'fformationGT.csv'
class GraphSAGE(nn.Module):
def __init__(self, in_feats, h_feats):
super(GraphSAGE, self).__init__()
self.conv1 = SAGEConv(in_feats, h_feats, 'mean')
self.conv2 = SAGEConv(h_feats, h_feats, 'mean')
def forward(self, g, in_feat):
h = self.conv1(g, in_feat)
h = F.relu(h)
h = self.conv2(g, h)
return h
class MLPPredictor(nn.Module):
def __init__(self, h_feats):
super().__init__()
self.W1 = nn.Linear(h_feats * 2, h_feats)
self.W2 = nn.Linear(h_feats, 1)
def apply_edges(self, edges):
"""
Computes a scalar score for each edge of the given graph.
Parameters
----------
edges :
Has three members ``src``, ``dst`` and ``data``, each of
which is a dictionary representing the features of the
source nodes, the destination nodes, and the edges
themselves.
Returns
-------
dict
A dictionary of new edge features.
"""
h = torch.cat([edges.src['h'], edges.dst['h']], 1)
out_score = self.W2(F.relu(self.W1(h))).squeeze(1)
out_label = torch.round(torch.sigmoid(out_score))
# print(out_score, out_label)
out_dict = {'score': out_score, 'label': out_label}
return out_dict
def forward(self, g, h):
with g.local_scope():
g.ndata['h'] = h
g.apply_edges(self.apply_edges)
# return g.edata['score']
# print('executes', g.edata)
out_dict = dict(g.edata)
return out_dict, g
def compute_loss(pos_score, neg_score):
scores = torch.cat([pos_score, neg_score])
labels = torch.cat([torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])])
return F.binary_cross_entropy_with_logits(scores, labels)
def compute_auc(pos_score, neg_score):
scores = torch.cat([pos_score, neg_score]).numpy()
labels = torch.cat(
[torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])]).numpy()
return roc_auc_score(labels, scores)
def fetch_person_data(person_id, frame_ts, base_path):
f_name = str(person_id).rjust(2, '0') + '.csv'
f_path = os.path.join(base_path, person_log, f_name)
# print(f_path)
with open(f_path, 'r') as csvf:
csvrdr = csv.reader(csvf, delimiter=',')
for row in csvrdr:
frame = float(row[0])
data = row[1:]
# print(person_id, frame_ts, base_path, data)
if frame == frame_ts:
return data
def read_frame_data(base_p, extra_t=0):
ff_path = os.path.join(base_p, fformation_log)
frame_data = {}
with open(ff_path, 'r') as csvf:
csvrdr = csv.reader(csvf, delimiter=',')
for row in csvrdr:
frame = str(float(row[0]) + extra_t)
if frame not in frame_data.keys():
frame_data[frame] = []
group = []
for idx in row[1:]:
try:
group.append(int(idx))
except ValueError:
print('BAD INPUT: ', idx)
frame_data[frame].append(group)
return frame_data
def read_rica_frdata(bpath, rel_side_dist=1, extra_t=0,):
frame_data = {}
with open(bpath, 'r') as csvf:
csvrdr = csv.reader(csvf, delimiter=';')
row_c = 0
for row in csvrdr:
row_c += 1
if row_c == 1:
continue
# print(row)
frame = int(row[0]) + extra_t
# print(frame, int(row[5])*rel_side_dist, float(row[7]), row[9], row[10]) # or 8 , frno, x, y, rot, label
if frame not in frame_data.keys():
frame_data[frame] = []
frame_data[frame].append([frame, int(row[5])*rel_side_dist, float(row[7]), float(row[9]), int(row[10])])
# if row_c == 6:
# break
return frame_data
def get_clusters(nodec, srcn, dstn):
clusters = {}
cluster_idx = 0
for node in range(nodec):
if node not in clusters.keys():
clusters[node] = -1
if node in srcn:
clusters[node] = cluster_idx
cluster_idx += 1
for idx, u in enumerate(list(srcn)):
if u == node:
clusters[int(dstn[idx])] = clusters[node]
return clusters
def swap_clusters(clusters):
swapped_clusters = {}
for key, val in clusters.items():
if val not in swapped_clusters.keys():
swapped_clusters[val] = []
swapped_clusters[val].append(key)
return swapped_clusters
extra_time = 10000
# frame_data_cpp = read_frame_data(base_path_cpp, 0)
frame_data_ps = read_frame_data(base_path_ps, extra_time)
# frame_data = {**frame_data_cpp, **frame_data_ps}
frame_data = frame_data_ps
# pprint(frame_data)
frame_node_data = {}
frame_edge_data = {}
max_side_dist = 0
for frame_id, frame_info in frame_data.items():
node_data = []
group_id_tracker = 0
for group in frame_info:
if len(group) == 1:
group_id = -1
else:
group_id = group_id_tracker
group_id_tracker += 1
for person in group:
if float(frame_id) > extra_time:
c_frame_id = round(float(frame_id)-extra_time, 2)
data = fetch_person_data(person, c_frame_id, base_path_ps)
else:
data = fetch_person_data(person, float(frame_id), base_path_cpp)
pos_x = float(data[0])
if pos_x > max_side_dist:
max_side_dist = pos_x
pos_y = float(data[1])
body_pose = float(data[3])
rel_head_pose = float(data[4])
head_pose = body_pose + rel_head_pose
# math.degrees() for degrees instead of radians
# person_id 0, group_id 1, posx 2, posy 3, bodyp 4, rheadp 5, headp 6
# node_data.append([person, group_id, pos_x, pos_y, body_pose, rel_head_pose, round(head_pose, 4)])
# ABLATION
# ===============================================================
node_data.append([person, group_id, pos_x, pos_y, round(head_pose, 4)])
# node_data.append([person, group_id, pos_x, pos_y])
# ===============================================================
# pprint(node_data)
# print(len(node_data))
frame_node_data[frame_id] = node_data
edge_data = []
for person_data in node_data:
person = person_data[0]
group = person_data[1]
for idx in range(len(node_data)):
if node_data[idx][0] != person and node_data[idx][1] != -1:
if group == node_data[idx][1]:
# src dst distance effort
distance = math.dist([person_data[2], person_data[3]], [node_data[idx][2], node_data[idx][3]])
# ABLATION
# ===============================================================
angle_diff = person_data[-1] - (node_data[idx][-1] - math.pi)
if angle_diff > math.pi * 2:
# print('bullshit +\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
elif angle_diff < math.pi * -2:
# print('bullshit -\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
if angle_diff < 0:
effort = math.pi * 2 + angle_diff
else:
effort = angle_diff
# src dst dist eff
edge_data.append([person, node_data[idx][0], distance, effort])
# edge_data.append([person, node_data[idx][0], distance])
# ===============================================================
# pprint(edge_data)
# print(len(edge_data))
frame_edge_data[frame_id] = edge_data
# break
# print(df_ff.head())
# for idx in df_ff.index:
# print(idx, df_ff.loc[idx]['time'], df_ff.loc[idx]['group'])
# print(len(frame_data_cpp.keys()), len(frame_data_ps.keys()), len(frame_node_data.keys()), len(frame_edge_data.keys()))
iters_cpp = 0
iters_ps = 0
all_graphs = []
skipped = 0
for frame_id, val in frame_edge_data.items():
print('FR ID: ', frame_id)
if float(frame_id) >= extra_time:
custom_node_count = 21
else:
# continue
custom_node_count = 18
srcs = []
dsts = []
pos = {}
for entry in val:
srcs.append(entry[0]-1)
dsts.append(entry[1]-1)
feats = []
for person in frame_node_data[frame_id]:
pos[person[0]-1] = [person[2], person[3]]
feat = person[2:5]
# print(person[0])
feats.append(feat)
feats = torch.from_numpy(np.array(feats))
try:
graph = dgl.graph((srcs, dsts), num_nodes=len(frame_node_data[frame_id]))
except dgl._ffi.base.DGLError:
skipped += 1
continue
print(graph.number_of_nodes(), len(feats), len(frame_node_data[frame_id]))
draw_graph = False
graph.ndata['feat'] = feats.float()
# print(graph.ndata['feat'][:10])
# print('# nodes: %d, # edges: %d' % (graph.number_of_nodes(), graph.number_of_edges()))
if draw_graph:
nx_g = graph.to_networkx().to_undirected()
# pos = nx.kamada_kawai_layout(nx_g)
print(pos)
# should assign pos on -1:1 scale based on coordinates
try:
nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
except nx.exception.NetworkXError:
node_cs = []
for i in range(graph.number_of_nodes()):
if i not in pos.keys():
pos[i] = [0, 0]
node_cs.append('#541E1B')
else:
node_cs.append("#A0CBE2")
nx.draw(nx_g, pos, with_labels=True, node_color=node_cs)
if float(frame_id) < extra_time:
base_path = 'salsa/cpp_graphs'
iters_cpp += 1
name = '%d.png' % iters_cpp
graph_path = os.path.join(base_path, name.rjust(9, '0'))
else:
base_path = 'salsa/ps_graphs'
iters_ps += 1
name = '%d.png' % iters_ps
graph_path = os.path.join(base_path, name.rjust(9, '0'))
plt.savefig(graph_path)
plt.close()
# print('Edge count: %d, Node count: %d, Feature count: %d' % (graph.num_edges(), graph.num_nodes(), len(graph.ndata['feat'][0])))
all_graphs.append([frame_id, graph])
# break
print('Skipped: ', skipped)
# exit()
random.seed(21)
random.shuffle(all_graphs)
split_idx = math.ceil(len(all_graphs)*0.1)
train_graphs = []
# test_graphs = []
train_idx = []
test_idx = []
for x in range(len(all_graphs)):
if x < split_idx:
train_graphs.append(all_graphs[x][1])
train_idx.append(all_graphs[x][0])
# else:
# test_graphs.append(all_graphs[x][1])
# test_idx.append(all_graphs[x][0])
train_frame_node_data = {}
for frame_id, frame_val in frame_node_data.items():
if frame_id in train_idx:
train_frame_node_data[frame_id] = frame_val
train_bg = dgl.batch(train_graphs)
# test_bg = dgl.batch(test_graphs)
print('idx outputs', len(train_idx), len(test_idx))
# print(train_bg.batch_size)
# print(test_bg.batch_size)
output_mats = True
if output_mats:
frame_node_data = {}
frame_edge_data = {}
trained = 0
not_trained = 0
frame_data_cpp = read_frame_data(base_path_cpp, 0)
frame_data_ps = read_frame_data(base_path_ps, extra_time)
rica_test = False
if max_side_dist == 0:
rel_dist = 1
else:
rel_dist = max_side_dist/640
# frame_data_rica = read_rica_frdata(base_path_rica, rel_dist, extra_time*2)
# frame_data = frame_data_rica
# exit()
# frame_data = {**frame_data_cpp, **frame_data_ps}
# frame_data = frame_data_cpp
frame_data = frame_data_ps
if rica_test:
for frame_id, frame_info in frame_data.items():
not_trained += 1
node_data = []
person_count = 1
for person in frame_info:
# print(frame, int(row[5])*rel_side_dist, float(row[7]), row[9], row[10]) # or 8 , frno, x, y, rot, label
pos_x = person[1]
pos_y = person[2]
head_pose = person[3]
group_id = int(person[4])
if group_id == 0:
group_id = -1
# ABLATION
# ===============================================================
node_data.append([person_count, group_id, round(pos_x, 2), round(pos_y, 2), round(head_pose, 4)])
# node_data.append([person_count, group_id, round(pos_x, 2), round(pos_y, 2)])
# ===============================================================
person_count += 1
frame_node_data[frame_id] = node_data
edge_data = []
for person_data in node_data:
person = person_data[0]
group = person_data[1]
for idx in range(len(node_data)):
if node_data[idx][0] != person and node_data[idx][1] != -1:
if group == node_data[idx][1]:
# src dst distance effort
distance = math.dist([person_data[2], person_data[3]],
[node_data[idx][2], node_data[idx][3]])
# ABLATION
# ===============================================================
angle_diff = person_data[-1] - (node_data[idx][-1] - math.pi)
if angle_diff > math.pi * 2:
# print('bullshit +\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
elif angle_diff < math.pi * -2:
# print('bullshit -\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
if angle_diff < 0:
effort = math.pi * 2 + angle_diff
else:
effort = angle_diff
# src dst dist eff
edge_data.append([person, node_data[idx][0], distance, effort])
# edge_data.append([person, node_data[idx][0], distance])
# ===============================================================
frame_edge_data[frame_id] = edge_data
# exit()
else:
for frame_id, frame_info in frame_data.items():
if frame_id in train_idx:
trained += 1
else:
not_trained += 1
node_data = []
group_id_tracker = 0
for group in frame_info:
if len(group) == 1:
group_id = -1
else:
group_id = group_id_tracker
group_id_tracker += 1
for person in group:
if float(frame_id) > extra_time:
c_frame_id = round(float(frame_id)-extra_time, 2)
data = fetch_person_data(person, c_frame_id, base_path_ps)
else:
data = fetch_person_data(person, float(frame_id), base_path_cpp)
pos_x = float(data[0])
pos_y = float(data[1])
body_pose = float(data[3])
rel_head_pose = float(data[4])
head_pose = body_pose + rel_head_pose
# math.degrees() for degrees instead of radians
# person_id 0, group_id 1, posx 2, posy 3, bodyp 4, rheadp 5, headp 6
# node_data.append([person, group_id, pos_x, pos_y, body_pose, rel_head_pose, round(head_pose, 4)])
# ABLATION
# ===============================================================
node_data.append([person, group_id, pos_x, pos_y, round(head_pose, 4)])
# node_data.append([person, group_id, pos_x, pos_y])
# ===============================================================
# pprint(node_data)
# print(len(node_data))
frame_node_data[frame_id] = node_data
edge_data = []
for person_data in node_data:
person = person_data[0]
group = person_data[1]
for idx in range(len(node_data)):
if node_data[idx][0] != person and node_data[idx][1] != -1:
if group == node_data[idx][1]:
# src dst distance effort
distance = math.dist([person_data[2], person_data[3]],
[node_data[idx][2], node_data[idx][3]])
# ABLATION
# ===============================================================
angle_diff = person_data[-1] - (node_data[idx][-1] - math.pi)
if angle_diff > math.pi * 2:
# print('bullshit +\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
elif angle_diff < math.pi * -2:
# print('bullshit -\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
if angle_diff < 0:
effort = math.pi * 2 + angle_diff
else:
effort = angle_diff
# src dst dist eff
edge_data.append([person, node_data[idx][0], distance, effort])
# edge_data.append([person, node_data[idx][0], distance])
# ===============================================================
# pprint(edge_data)
# print(len(edge_data))
frame_edge_data[frame_id] = edge_data
print(trained, not_trained)
# ------------------------------------------------------------------------------------------------------------------
# OUTPUT TO MAT
# ------------------------------------------------------------------------------------------------------------------
# import scipy.io as sio
# gt_timestamp_list = range(len(list(frame_node_data.keys())))
# # timestamp_list = range(len(list(train_frame_node_data.keys())) + len(list(frame_node_data.keys())))
# groundtruth_mat_holder = {'GTtimestamp': gt_timestamp_list}
# features_mat_holder = {'timestamp': gt_timestamp_list}
#
# groups_per_frame = []
# features_per_frame = []
#
# for frame_id, people_val in frame_node_data.items():
# groups_dict = {}
# groups_list = []
# features = []
# individual_start_idx = 100
# for person in people_val:
# # print('Person ', person)
# if person[1] == -1:
# groups_dict[individual_start_idx] = [person[0]]
# individual_start_idx += 1
# else:
# if person[1] not in groups_dict.keys():
# groups_dict[person[1]] = []
# groups_dict[person[1]].append(person[0])
# # if person[1] not in groups_dict.keys():
# # groups_dict[person[1]] = []
# # groups_dict[person[1]].append(person[0])
# features.append([person[0], person[2], person[3], math.radians(person[4])])
# # for group_val in groups_dict.values():
# # groups_list.append(np.array(list(group_val)))
# # groups_list = np.array(groups_list)
# groups_list = np.array(list(groups_dict.values()))
# groups_per_frame.append(groups_list)
# features_per_frame.append(np.array(features))
# # break
# groundtruth_mat_holder['GTgroups'] = np.array(groups_per_frame)
# features_mat_holder['features'] = np.array(features_per_frame)
# print('SAVING!')
# # print(groups_per_frame[716], '\n---\n', groups_per_frame[930])
# sio.savemat('groundtruth.mat', groundtruth_mat_holder)
# sio.savemat('features.mat', features_mat_holder)
# exit()
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
test_graphs = []
skipped = 0
for frame_id, val in frame_edge_data.items():
print('FR ID: ', frame_id)
srcs = []
dsts = []
pos = {}
for entry in val:
# print(val)
# print(entry)
srcs.append(entry[0] - 1)
dsts.append(entry[1] - 1)
feats = []
for person in frame_node_data[frame_id]:
pos[person[0] - 1] = [person[2], person[3]]
feat = person[2:5]
# print(person[0])
feats.append(feat)
feats = torch.from_numpy(np.array(feats))
try:
graph = dgl.graph((srcs, dsts), num_nodes=len(frame_node_data[frame_id]))
except dgl._ffi.base.DGLError:
skipped += 1
continue
# print(graph.number_of_nodes(), len(feats), len(frame_node_data[frame_id]))
draw_graph = False
if draw_graph:
nx_g = graph.to_networkx().to_undirected()
# pos = nx.kamada_kawai_layout(nx_g)
print(pos)
# should assign pos on -1:1 scale based on coordinates
try:
nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
except nx.exception.NetworkXError:
node_cs = []
for i in range(graph.number_of_nodes()):
if i not in pos.keys():
pos[i] = [0, 0]
node_cs.append('#541E1B')
else:
node_cs.append("#A0CBE2")
nx.draw(nx_g, pos, with_labels=True, node_color=node_cs)
if float(frame_id) < extra_time:
base_path = 'rica_graphs'
iters_cpp += 1
name = '%d.png' % iters_cpp
graph_path = os.path.join(base_path, name.rjust(9, '0'))
else:
base_path = 'rica_graphs'
iters_ps += 1
name = '%d.png' % iters_ps
graph_path = os.path.join(base_path, name.rjust(9, '0'))
plt.savefig(graph_path)
plt.close()
graph.ndata['feat'] = feats.float()
# print(graph.ndata['feat'][:10])
test_graphs.append(graph)
# break
print('Skipped: ', skipped)
test_bg = dgl.batch(test_graphs)
print('final test graph count ', len(test_graphs))
# exit()
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
iter_count = 15
# h_feats_list = [2, 3, 4, 5, 6, 10, 15, 20]
# epochs_list = [10, 15, 20, 25, 30, 40, 50, 100, 150, 200, 250]
h_feats_list = [15]
epochs_list = [150]
total = len(h_feats_list) * len(epochs_list)
count = 0
for h_feats in h_feats_list:
for epochs in epochs_list:
# if h_feats != 20 and h_feats != 15:
# continue
# if h_feats == 15 and epochs != 250:
# continue
# if h_feats in [2, 3, 4] and epochs in [15, 25, 100, 150]:
# continue
iteration = 0
while iteration != iter_count:
# h_feats = 3
# epochs = 20
# model = GraphSAGE(test_bg.ndata['feat'].shape[1], h_feats)
model = GraphSAGE(train_bg.ndata['feat'].shape[1], h_feats)
# model = GraphSAGE(3, h_feats)
# # You can replace DotPredictor with MLPPredictor.
pred = MLPPredictor(h_feats)
#
# # ----------- 3. set up loss and optimizer -------------- #
# # in this case, loss will in training loop
optimizer = torch.optim.Adam(itertools.chain(model.parameters(), pred.parameters()), lr=0.01)
start_t = datetime.datetime.now()
for batched_graph in train_graphs:
u, v = batched_graph.edges()
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
try:
adj_neg = 1 - adj.todense() - np.eye(batched_graph.number_of_nodes())
except ValueError:
continue
neg_u, neg_v = np.where(adj_neg != 0)
train_pos_u, train_pos_v = u, v
train_neg_u, train_neg_v = neg_u, neg_v
train_pos_g = dgl.graph((train_pos_u, train_pos_v), num_nodes=batched_graph.number_of_nodes())
train_neg_g = dgl.graph((train_neg_u, train_neg_v), num_nodes=batched_graph.number_of_nodes())
#
# # ----------- 4. training -------------------------------- #
all_logits = []
for e in range(epochs):
# forward
# print('FEAT COUNT', len(batched_graph.ndata['feat']))
h = model(batched_graph, batched_graph.ndata['feat'])
pos_score = pred(train_pos_g, h)[0]['score']
neg_score = pred(train_neg_g, h)[0]['score']
loss = compute_loss(pos_score, neg_score)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if e % 5 == 0:
# print('In epoch {}, loss: {}'.format(e, loss))
# print('Training took: ', datetime.datetime.now()-start_t)
#
# # ----------- 5. check results ------------------------ #
#
start_t = datetime.datetime.now()
plot_tests = True
auc_scores = []
precision_scores = []
recall_scores = []
f1_scores = []
print('Starting tests', len(test_graphs))
test_c = 0
for batched_graph in test_graphs:
test_c += 1
test_graph = copy.copy(batched_graph)
# print('Test graph', test_graph.ndata['feat'])
test_eids = test_graph.edges(form='eid')
test_graph.remove_edges(test_eids)
print('Test graph', test_graph.num_nodes(), test_graph.num_edges(), len(test_graph.ndata['feat']),
'Bat graph', batched_graph.num_nodes(), batched_graph.num_edges(), len(batched_graph.ndata['feat']))
# print(batched_graph.num_nodes(), batched_graph.num_edges())
# print(batched_graph.nodes())
u, v = batched_graph.edges()
u_t, v_t = test_graph.edges()
# print('Test graph', test_graph.num_nodes(), test_graph.num_edges(), len(test_graph.ndata['feat']))
# continue
try:
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
except ValueError:
continue
# adj_t = sp.coo_matrix((np.ones(len(u_t)), (u_t.numpy(), v_t.numpy())))
try:
adj_neg = 1 - adj.todense() - np.eye(batched_graph.number_of_nodes())
adj_t_neg = 1 - np.eye(test_graph.number_of_nodes())
except ValueError:
continue
neg_u, neg_v = np.where(adj_neg != 0)
neg_t_u, neg_t_v = np.where(adj_t_neg != 0)
test_pos_u, test_pos_v = u, v
test_neg_u, test_neg_v = neg_u, neg_v
test_pos_g = dgl.graph((test_pos_u, test_pos_v), num_nodes=batched_graph.num_nodes())
test_neg_g = dgl.graph((test_neg_u, test_neg_v), num_nodes=batched_graph.num_nodes())
test_full_graph = dgl.graph((neg_t_u, neg_t_v), num_nodes=test_graph.num_nodes())
# test_full_graph.ndata['feat'] = test_graph.ndata['feat']
# print('Test graph negative stats', test_full_graph.num_nodes(), test_full_graph.num_edges())
with torch.no_grad():
# print(h)
# h = model(batched_graph, batched_graph.ndata['feat'])
# pos_out, pos_graph_out = pred(test_pos_g, h)
# neg_out, neg_graph_out = pred(test_neg_g, h)
# h = model(test_graph, test_graph.ndata['feat'])
test_out, test_graph_out = pred(test_full_graph, h)
# pos_score = pos_out['score']
# neg_score = neg_out['score']
# pos_labels = pos_out['label']
# neg_labels = neg_out['label']
test_labels = test_out['label']
# print('Test labels: ', len(test_labels), test_labels)
# pred_labels = torch.cat([pos_labels, neg_labels]).numpy()
# scores = torch.cat([pos_score, neg_score]).numpy()
# labels = torch.cat(
# [torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])]).numpy()
# auc = roc_auc_score(labels, scores)
# print(len(scores), '\n', pred_labels[:len(pos_labels)], '\n', pred_labels[len(pos_labels):])
# print('AUC', auc)
# auc_scores.append(auc)
to_remove = []
for i in range(len(test_labels)):
if test_labels[i] == 0:
to_remove.append(i)
# if plot_tests and test_c in [1, 150, 300]:
# fig, (ax1, ax2) = plt.subplots(1, 2)
test_graph_out.remove_edges(to_remove)
original_nodec = batched_graph.num_nodes()
original_u, original_v = batched_graph.edges()
pred_nodec = test_graph_out.num_nodes()
pred_u, pred_v = test_graph_out.edges()
original_clusters = get_clusters(original_nodec, original_u, original_v)
pred_clusters = get_clusters(pred_nodec, pred_u, pred_v)
# pprint(original_clusters)
# pprint(pred_clusters)
swap_original_clusters = swap_clusters(original_clusters)
swap_pred_clusters = swap_clusters(pred_clusters)
# pprint(swap_original_clusters)
# pprint(swap_pred_clusters)
tp = 0
fp = 0
fn = 0
t = 2/3
t_ = 1-t
used_pred_clusters = [-1]
for key, cluster in swap_original_clusters.items():
if key == -1:
continue
else:
matched_clusters = {}
fullsize = len(cluster)
for pred_key, pred_cluster in swap_pred_clusters.items():
if pred_key == -1:
continue
match = 0
miss = 0
for node in cluster:
if node in pred_cluster:
match += 1
else:
miss += 1
if match > 0:
matched_clusters[pred_key] = [match, miss]
max_match = 0
best_match = {}
for match_key, match_val in matched_clusters.items():
if match_val[0] > max_match:
max_match = match_val[0]
best_match = {match_key: match_val}
if len(list(best_match.keys())) == 0:
continue
used_pred_clusters.append(list(best_match.keys())[0])
best_match_val = list(best_match.values())[0]
match = best_match_val[0]
miss = best_match_val[1]
if match / fullsize >= t and miss / fullsize <= t_:
tp += 1
verdict = 'tp'
else:
fn += 1
verdict = 'fn'
# print(key, match, miss, fullsize, verdict)
for key in swap_pred_clusters.keys():
if key not in used_pred_clusters:
fp += 1
# print('TP: %d, FN: %d, FP: %d' % (tp, fn, fp))
if tp + fp == 0:
precision = 0
else:
precision = tp / (tp + fp)
if tp + fn == 0:
recall = 0
else:
recall = tp / (tp + fn)
if precision + recall == 0:
continue
else:
f1 = 2 * (precision * recall) / (precision + recall)
precision_scores.append(precision)
recall_scores.append(recall)
f1_scores.append(f1)
if plot_tests:
nx_g = test_graph_out.to_networkx().to_undirected()
# pos = nx.kamada_kawai_layout(nx_g)
print(pos)
# should assign pos on -1:1 scale based on coordinates
try:
nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
except nx.exception.NetworkXError:
pass
# node_cs = []
# for i in range(graph.number_of_nodes()):
# if i not in pos.keys():
# pos[i] = [0, 0]
# node_cs.append('#541E1B')
# else:
# node_cs.append("#A0CBE2")
# nx.draw(nx_g, pos, with_labels=True, node_color=node_cs)
plt.savefig(os.path.join('./quickplots', '%d.png' % test_c))
# plt.show()
plt.close()
# nx_g = test_graph_out.to_networkx().to_undirected()
# # pos = nx.kamada_kawai_layout(nx_g)
# ax1 = plt.subplot(1,2,1)
# nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
# # ax1.margin(5)
# ax2 = plt.subplot(1,2,2)
# nx_g_original = batched_graph.to_networkx().to_undirected()
# nx.draw(nx_g_original, pos, with_labels=True, node_color="#A0CBE2")
#
# # ax2.margin(5)
# plt.show()
# plt.close()
# if count == 3:
# break
# print('%d, %d\tTested on: %d, Avg AUC: %.4f, Stdev: %.4f' % (h_feats, epochs,
# len(auc_scores), np.mean(auc_scores),
# np.std(auc_scores)))
# print('Testing took: ', datetime.datetime.now()-start_t)
# print('Precision: %.4f, Recall: %.4f, F1: %.4f' % (np.mean(precision_scores), np.mean(recall_scores), np.mean(f1_scores)))
if len(f1_scores) > 0:
iteration += 1
model_output_tracker = pd.DataFrame(
list(zip([datetime.datetime.now()], [h_feats], [epochs], [len(f1_scores)],
[np.mean(precision_scores)], [np.mean(recall_scores)], [np.mean(f1_scores)])),
columns=['time', 'feature_count', 'epoch_count', 'test_length', 'mean_precision', 'mean_recall', 'mean_f1'])
if os.path.exists('model_output_tracker_f1_sel_onlyheadv2_20210430_ps.csv'):
model_output_tracker.to_csv('model_output_tracker_f1_sel_onlyheadv2_20210430_ps.csv', mode='a', index=False, header=False)
else:
model_output_tracker.to_csv('model_output_tracker_f1_sel_onlyheadv2_20210430_ps.csv', mode='w', index=False, header=True)
if np.mean(f1_scores) > 0.64:
break
count += 1
print('#%d of %d\t%d, %d' % (count, total, h_feats, epochs))
|
{"/growl_tests_code.py": ["/growl_utils/utils.py"]}
|
38,524
|
d4rkspir1t/groupdetection-dgmg
|
refs/heads/main
|
/growl_tests_code.py
|
import pandas as pd
import os
import csv
import copy
from pprint import pprint
import math
import dgl
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.sparse as sp
from dgl.nn import SAGEConv
import itertools
from sklearn.metrics import roc_auc_score, accuracy_score
import dgl.dataloading as dgl_dl
import random
import datetime
import time
import growl_utils.utils as ut
import json
base_path_cpp = 'salsa/Annotation/salsa_cpp/'
base_path_ps = 'salsa/Annotation/salsa_ps/'
base_path_rica_gt = './gt_db_orientation_20210412_cd_1.csv'
base_path_rica = './yolo_db_orientation_20210810_cd_1.csv'
person_log = 'geometryGT/'
fformation_log = 'fformationGT.csv'
extra_time = 10000
frame_data_ps = ut.read_frame_data(base_path_ps, 0)
frame_data_cpp = ut.read_frame_data(base_path_cpp, extra_time)
salsa_ps_keys = list(frame_data_ps.keys())
# print(salsa_ps_keys)
random.shuffle(salsa_ps_keys)
split_idx = math.ceil(len(salsa_ps_keys)*0.6)
# print(salsa_ps_keys)
train_set_keys = salsa_ps_keys[:split_idx]
test_set_keys = salsa_ps_keys[split_idx:]
# print(len(train_set_keys), len(test_set_keys))
train_dict = dict((k, frame_data_ps[k]) for k in train_set_keys)
# test_dict = dict((k, frame_data_ps[k]) for k in test_set_keys)
# remaining_dict = dict((k, frame_data_ps[k]) for k in test_set_keys)
# test_dict = {**remaining_dict, **frame_data_cpp}
# test_dict = frame_data_cpp
# print(len(train_dict.keys()), len(test_dict.keys()))
train_node_data = {}
train_edge_data = {}
max_side_dist = 0
for frame_id, frame_info in train_dict.items():
node_data = []
group_id_tracker = 0
for group in frame_info:
if len(group) == 1:
group_id = -1
else:
group_id = group_id_tracker
group_id_tracker += 1
for person in group:
data = ut.fetch_person_data(person, float(frame_id), base_path_ps)
pos_x = float(data[0])
if pos_x > max_side_dist:
max_side_dist = pos_x
pos_y = float(data[1])
body_pose = float(data[3])
rel_head_pose = float(data[4])
head_pose = body_pose + rel_head_pose
# math.degrees() for degrees instead of radians
# person_id 0, group_id 1, posx 2, posy 3, bodyp 4, rheadp 5, headp 6
# node_data.append([person, group_id, pos_x, pos_y, body_pose, rel_head_pose, round(head_pose, 4)])
# ABLATION
# ===============================================================
node_data.append([person, group_id, pos_x, pos_y, round(head_pose, 4)])
# node_data.append([person, group_id, pos_x, pos_y])
# ===============================================================
# pprint(node_data)
# print(len(node_data))
train_node_data[frame_id] = node_data
edge_data = []
for person_data in node_data:
person = person_data[0]
group = person_data[1]
for idx in range(len(node_data)):
if node_data[idx][0] != person and node_data[idx][1] != -1:
if group == node_data[idx][1]:
# src dst distance effort
distance = math.dist([person_data[2], person_data[3]], [node_data[idx][2], node_data[idx][3]])
# ABLATION
# ===============================================================
angle_diff = person_data[-1] - (node_data[idx][-1] - math.pi)
if angle_diff > math.pi * 2:
# print('bullshit +\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
elif angle_diff < math.pi * -2:
# print('bullshit -\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
if angle_diff < 0:
effort = math.pi * 2 + angle_diff
else:
effort = angle_diff
# src dst dist eff
edge_data.append([person, node_data[idx][0], distance, effort])
# edge_data.append([person, node_data[idx][0], distance])
# ===============================================================
# pprint(edge_data)
# print(len(edge_data))
train_edge_data[frame_id] = edge_data
rica_test = True
if max_side_dist == 0:
rel_dist = 1
else:
rel_dist = max_side_dist / 640
frame_data_rica = ut.read_rica_frdata(base_path_rica, rel_dist, 0)
frame_data_rica_gt = ut.read_rica_frdata(base_path_rica_gt, rel_dist, 0)
test_dict = frame_data_rica
test_node_data = {}
test_edge_data = {}
for frame_id, frame_info in test_dict.items():
if rica_test:
node_data = []
person_count = 1
for person in frame_info:
# print(frame, int(row[5])*rel_side_dist, float(row[7]), row[9], row[10]) # or 8 , frno, x, y, rot, label
pos_x = person[1]
pos_y = person[2]
head_pose = person[3]
group_id = int(person[4])
if group_id == 0:
group_id = -1
# ABLATION
# ===============================================================
node_data.append([person_count, group_id, round(pos_x, 2), round(pos_y, 2), round(head_pose, 4)])
# node_data.append([person_count, group_id, round(pos_x, 2), round(pos_y, 2)])
# ===============================================================
person_count += 1
test_node_data[frame_id] = node_data
edge_data = []
for person_data in node_data:
person = person_data[0]
group = person_data[1]
for idx in range(len(node_data)):
if node_data[idx][0] != person and node_data[idx][1] != -1:
if group == node_data[idx][1]:
# src dst distance effort
distance = math.dist([person_data[2], person_data[3]],
[node_data[idx][2], node_data[idx][3]])
# ABLATION
# ===============================================================
angle_diff = person_data[-1] - (node_data[idx][-1] - math.pi)
if angle_diff > math.pi * 2:
# print('bullshit +\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
elif angle_diff < math.pi * -2:
# print('bullshit -\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
if angle_diff < 0:
effort = math.pi * 2 + angle_diff
else:
effort = angle_diff
# src dst dist eff
edge_data.append([person, node_data[idx][0], distance, effort])
# edge_data.append([person, node_data[idx][0], distance])
# ===============================================================
test_edge_data[frame_id] = edge_data
else:
node_data = []
group_id_tracker = 0
for group in frame_info:
if len(group) == 1:
group_id = -1
else:
group_id = group_id_tracker
group_id_tracker += 1
for person in group:
if float(frame_id) < extra_time:
data = ut.fetch_person_data(person, float(frame_id), base_path_ps)
else:
sub_id = float(frame_id)-extra_time
# print(sub_id)
data = ut.fetch_person_data(person, float(sub_id), base_path_cpp)
# print(data)
pos_x = float(data[0])
if pos_x > max_side_dist:
max_side_dist = pos_x
pos_y = float(data[1])
body_pose = float(data[3])
rel_head_pose = float(data[4])
head_pose = body_pose + rel_head_pose
# math.degrees() for degrees instead of radians
# person_id 0, group_id 1, posx 2, posy 3, bodyp 4, rheadp 5, headp 6
# node_data.append([person, group_id, pos_x, pos_y, body_pose, rel_head_pose, round(head_pose, 4)])
# ABLATION
# ===============================================================
node_data.append([person, group_id, pos_x, pos_y, round(head_pose, 4)])
# node_data.append([person, group_id, pos_x, pos_y])
# ===============================================================
# pprint(node_data)
# print(len(node_data))
test_node_data[frame_id] = node_data
edge_data = []
for person_data in node_data:
person = person_data[0]
group = person_data[1]
for idx in range(len(node_data)):
if node_data[idx][0] != person and node_data[idx][1] != -1:
if group == node_data[idx][1]:
# src dst distance effort
distance = math.dist([person_data[2], person_data[3]], [node_data[idx][2], node_data[idx][3]])
# ABLATION
# ===============================================================
angle_diff = person_data[-1] - (node_data[idx][-1] - math.pi)
if angle_diff > math.pi * 2:
# print('bullshit +\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
elif angle_diff < math.pi * -2:
# print('bullshit -\t', angle_diff)
angle_diff = angle_diff % (math.pi * 2)
# print('\tcorrected: ', angle_diff)
if angle_diff < 0:
effort = math.pi * 2 + angle_diff
else:
effort = angle_diff
# src dst dist eff
edge_data.append([person, node_data[idx][0], distance, effort])
# edge_data.append([person, node_data[idx][0], distance])
# ===============================================================
# pprint(edge_data)
# print(len(edge_data))
test_edge_data[frame_id] = edge_data
train_graphs = []
test_graphs = []
test_graph_frame_ids = []
skipped = 0
iters_ps = 0
for frame_id, val in train_edge_data.items():
# print('FR ID: ', frame_id)
if float(frame_id) >= extra_time:
custom_node_count = 21
else:
# continue
custom_node_count = 18
srcs = []
dsts = []
pos = {}
for entry in val:
srcs.append(entry[0]-1)
dsts.append(entry[1]-1)
feats = []
for person in train_node_data[frame_id]:
pos[person[0]-1] = [person[2], person[3]]
feat = person[2:5]
# print(person[0])
feats.append(feat)
feats = torch.from_numpy(np.array(feats))
try:
graph = dgl.graph((srcs, dsts), num_nodes=len(train_node_data[frame_id]))
except dgl._ffi.base.DGLError:
skipped += 1
continue
# print(graph.number_of_nodes(), len(feats), len(train_node_data[frame_id]))
draw_graph = False
graph.ndata['feat'] = feats.float()
# print(graph.ndata['feat'][:10])
# print('# nodes: %d, # edges: %d' % (graph.number_of_nodes(), graph.number_of_edges()))
if draw_graph:
nx_g = graph.to_networkx().to_undirected()
# pos = nx.kamada_kawai_layout(nx_g)
print(pos)
# should assign pos on -1:1 scale based on coordinates
try:
nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
except nx.exception.NetworkXError:
node_cs = []
for i in range(graph.number_of_nodes()):
if i not in pos.keys():
pos[i] = [0, 0]
node_cs.append('#541E1B')
else:
node_cs.append("#A0CBE2")
nx.draw(nx_g, pos, with_labels=True, node_color=node_cs)
base_path = 'salsa/ps_graphs'
iters_ps += 1
name = '%d.png' % iters_ps
graph_path = os.path.join(base_path, name.rjust(9, '0'))
plt.savefig(graph_path)
plt.close()
train_graphs.append(graph)
print('Skipped: ', skipped)
skipped = 0
iters_ps = 0
for frame_id, val in test_edge_data.items():
# print('FR ID: ', frame_id)
if float(frame_id) >= extra_time:
custom_node_count = 21
else:
# continue
custom_node_count = 18
srcs = []
dsts = []
pos = {}
for entry in val:
srcs.append(entry[0]-1)
dsts.append(entry[1]-1)
feats = []
for person in test_node_data[frame_id]:
pos[person[0]-1] = [person[2], person[3]]
feat = person[2:5]
# print(person[0])
feats.append(feat)
feats = torch.from_numpy(np.array(feats))
try:
graph = dgl.graph((srcs, dsts), num_nodes=len(test_node_data[frame_id]))
except dgl._ffi.base.DGLError:
skipped += 1
continue
# print(graph.number_of_nodes(), len(feats), len(train_node_data[frame_id]))
draw_graph = False
graph.ndata['feat'] = feats.float()
# print(graph.ndata['feat'][:10])
# print('# nodes: %d, # edges: %d' % (graph.number_of_nodes(), graph.number_of_edges()))
if draw_graph:
nx_g = graph.to_networkx().to_undirected()
# pos = nx.kamada_kawai_layout(nx_g)
print(pos)
# should assign pos on -1:1 scale based on coordinates
try:
nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
except nx.exception.NetworkXError:
node_cs = []
for i in range(graph.number_of_nodes()):
if i not in pos.keys():
pos[i] = [0, 0]
node_cs.append('#541E1B')
else:
node_cs.append("#A0CBE2")
nx.draw(nx_g, pos, with_labels=True, node_color=node_cs)
base_path = 'salsa/ps_graphs'
iters_ps += 1
name = '%d.png' % iters_ps
graph_path = os.path.join(base_path, name.rjust(9, '0'))
plt.savefig(graph_path)
plt.close()
test_graphs.append(graph)
test_graph_frame_ids.append(frame_id)
print('Skipped: ', skipped)
count = 0
plot_tests = False
train_set = train_graphs
test_set = test_graphs
h_feats = 20
epochs = 100
random_graph = random.sample(train_set, 1)[0]
# print(random_graph)
model = ut.GraphSAGE(random_graph.ndata['feat'].shape[1], h_feats)
pred = ut.MLPPredictor(h_feats)
optimizer = torch.optim.Adam(itertools.chain(model.parameters(), pred.parameters()), lr=0.01)
pos_edge_count = 0
neg_edge_count = 0
for single_train_graph in train_set:
u, v = single_train_graph.edges()
# ABLATION 2
# ===============================================================
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
try:
adj_neg = 1 - adj.todense() - np.eye(single_train_graph.num_nodes())
except ValueError:
continue
neg_u, neg_v = np.where(adj_neg != 0)
train_pos_u, train_pos_v = u, v
train_neg_u, train_neg_v = neg_u, neg_v
train_pos_g = dgl.graph((train_pos_u, train_pos_v), num_nodes=single_train_graph.num_nodes())
train_neg_g = dgl.graph((train_neg_u, train_neg_v), num_nodes=single_train_graph.num_nodes())
pos_edge_count += len(u)
neg_edge_count += len(neg_u)
# train_pos_u, train_pos_v = u, v
# train_pos_g = dgl.graph((train_pos_u, train_pos_v), num_nodes=single_train_graph.num_nodes())
# ===============================================================
#
# # ----------- 4. training -------------------------------- #
all_logits = []
for e in range(epochs):
# forward
# print('FEAT COUNT', len(batched_graph.ndata['feat']))
h = model(single_train_graph, single_train_graph.ndata['feat'])
pos_score = pred(train_pos_g, h)[0]['score']
# ABLATION 2
# ===============================================================
neg_score = pred(train_neg_g, h)[0]['score']
loss = ut.compute_loss(pos_score, neg_score)
# loss = ut.compute_loss_posonly(pos_score)
# ===============================================================
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('+ edge c', pos_edge_count)
print('- edge c', neg_edge_count)
# exit()
# TEST
auc_scores = []
precision_scores = []
recall_scores = []
f1_scores = []
print('Starting tests', len(test_set))
test_c = 0
for single_val_idx, single_val_graph in enumerate(test_set):
test_c += 1
val_graph = copy.copy(single_val_graph)
# print('Test graph', test_graph.ndata['feat'])
test_eids = val_graph.edges(form='eid')
val_graph.remove_edges(test_eids)
u, v = single_val_graph.edges()
u_t, v_t = val_graph.edges()
# ABLATION 2
# ===============================================================
try:
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
except ValueError:
continue
try:
adj_neg = 1 - adj.todense() - np.eye(single_val_graph.num_nodes())
adj_t_neg = 1 - np.eye(val_graph.num_nodes())
except ValueError:
continue
neg_u, neg_v = np.where(adj_neg != 0)
neg_t_u, neg_t_v = np.where(adj_t_neg != 0)
test_pos_u, test_pos_v = u, v
test_neg_u, test_neg_v = neg_u, neg_v
test_full_graph = dgl.graph((neg_t_u, neg_t_v), num_nodes=val_graph.num_nodes())
# test_full_graph = dgl.graph((u_t, v_t), num_nodes=val_graph.num_nodes())
# ===============================================================
with torch.no_grad():
h = model(single_val_graph, single_val_graph.ndata['feat'])
test_out, test_graph_out = pred(test_full_graph, h)
test_labels = test_out['label']
to_remove = []
for i in range(len(test_labels)):
if test_labels[i] == 0:
to_remove.append(i)
test_graph_out.remove_edges(to_remove)
original_nodec = single_val_graph.num_nodes()
original_u, original_v = single_val_graph.edges()
pred_nodec = test_graph_out.num_nodes()
pred_u, pred_v = test_graph_out.edges()
original_clusters = ut.get_clusters(original_nodec, original_u, original_v)
pred_clusters = ut.get_clusters(pred_nodec, pred_u, pred_v)
swap_original_clusters = ut.swap_clusters(original_clusters)
swap_pred_clusters = ut.swap_clusters(pred_clusters)
tp = 0
fp = 0
fn = 0
t = 2 / 3
t_ = 1 - t
used_pred_clusters = [-1]
for key, cluster in swap_original_clusters.items():
if key == -1:
continue
else:
matched_clusters = {}
fullsize = len(cluster)
for pred_key, pred_cluster in swap_pred_clusters.items():
if pred_key == -1:
continue
match = 0
miss = 0
for node in cluster:
if node in pred_cluster:
match += 1
else:
miss += 1
# ==================================================================================================
# INCL. MISSES DUE TO YOLO NOT RECOGNISING PEOPLE
# ==================================================================================================
# for rgt_key, rgt_val in frame_data_rica_gt.items():
# print(rgt_key, len(rgt_val))
rgt_node_count = len(frame_data_rica_gt[test_graph_frame_ids[single_val_idx]])
ryo_node_count = len(frame_data_rica[test_graph_frame_ids[single_val_idx]])
miss += rgt_node_count - ryo_node_count
# ==================================================================================================
# ==================================================================================================
if match > 0:
matched_clusters[pred_key] = [match, miss]
max_match = 0
best_match = {}
for match_key, match_val in matched_clusters.items():
if match_val[0] > max_match:
max_match = match_val[0]
best_match = {match_key: match_val}
if len(list(best_match.keys())) == 0:
continue
used_pred_clusters.append(list(best_match.keys())[0])
best_match_val = list(best_match.values())[0]
match = best_match_val[0]
miss = best_match_val[1]
if match / fullsize >= t and miss / fullsize <= t_:
tp += 1
verdict = 'tp'
else:
fn += 1
verdict = 'fn'
# print(key, match, miss, fullsize, verdict)
for key in swap_pred_clusters.keys():
if key not in used_pred_clusters:
fp += 1
# print('TP: %d, FN: %d, FP: %d' % (tp, fn, fp))
if tp + fp == 0:
precision = 0
else:
precision = tp / (tp + fp)
if tp + fn == 0:
recall = 0
else:
recall = tp / (tp + fn)
if precision + recall == 0:
f1 = 0.01
precision_scores.append(precision)
recall_scores.append(recall)
f1_scores.append(f1)
else:
f1 = 2 * (precision * recall) / (precision + recall)
precision_scores.append(precision)
recall_scores.append(recall)
f1_scores.append(f1)
if plot_tests:
nx_g = test_graph_out.to_networkx().to_undirected()
# pos = nx.kamada_kawai_layout(nx_g)
# print(pos)
# should assign pos on -1:1 scale based on coordinates
try:
nx.draw(nx_g, pos, with_labels=True, node_color="#A0CBE2")
except nx.exception.NetworkXError:
pass
plt.savefig(os.path.join('./quickplots', '%d.png' % test_c))
# plt.show()
plt.close()
if len(f1_scores) > 0:
tracker_file_path = 'growl_param_analysis/test_rica_20100_model_output_f1_20210812_misses.csv'
model_output_tracker = pd.DataFrame(
list(zip([datetime.datetime.now()], [h_feats], [epochs], [len(f1_scores)],
[np.mean(precision_scores)], [np.mean(recall_scores)], [np.mean(f1_scores)])),
columns=['time', 'feature_count', 'epoch_count', 'test_length', 'mean_precision', 'mean_recall',
'mean_f1'])
if os.path.exists(tracker_file_path):
model_output_tracker.to_csv(tracker_file_path, mode='a',
index=False, header=False)
else:
model_output_tracker.to_csv(tracker_file_path, mode='w',
index=False, header=True)
|
{"/growl_tests_code.py": ["/growl_utils/utils.py"]}
|
38,525
|
d4rkspir1t/groupdetection-dgmg
|
refs/heads/main
|
/extract_orientation.py
|
import csv
import json
import math
import numpy as np
import os
import matplotlib.pyplot as plt
from PIL import Image
import cv2
from pprint import pprint
from deep_orientation import beyer # noqa # pylint: disable=unused-import
from deep_orientation import mobilenet_v2 # noqa # pylint: disable=unused-import
from deep_orientation import beyer_mod_relu # noqa # pylint: disable=unused-import
from deep_orientation.inputs import INPUT_TYPES
from deep_orientation.inputs import INPUT_DEPTH, INPUT_RGB, INPUT_DEPTH_AND_RGB
from deep_orientation.outputs import OUTPUT_TYPES
from deep_orientation.outputs import (OUTPUT_REGRESSION, OUTPUT_CLASSIFICATION,
OUTPUT_BITERNION)
import deep_orientation.preprocessing as pre
import deep_orientation.postprocessing as post
import tensorflow as tf
import tensorflow.keras.backend as K
import utils.img as img_utils
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
tf.get_logger().setLevel('ERROR')
def load_network(model_name, weights_filepath,
input_type, input_height, input_width,
output_type,
sampling=False,
**kwargs):
# load model --------------------------------------------------------------
model_module = globals()[model_name]
model_kwargs = {}
if model_name == 'mobilenet_v2' and 'mobilenet_v2_alpha' in kwargs:
model_kwargs['alpha'] = kwargs.get('mobilenet_v2_alpha')
if output_type == OUTPUT_CLASSIFICATION:
assert 'n_classes' in kwargs
model_kwargs['n_classes'] = kwargs.get('n_classes')
model = model_module.get_model(input_type=input_type,
input_shape=(input_height, input_width),
output_type=output_type,
sampling=sampling,
**model_kwargs)
# load weights ------------------------------------------------------------
model.load_weights(weights_filepath)
return model
def calc_weighted_avg_depth(path, w, h, cx , cy):
# print(path)
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
image_arr = np.asanyarray(image)
redu_width = w * 0.9
redu_height = h * 0.9
x_start = int(cx - (redu_width / 2))
x_end = int(cx + (redu_width / 2))
if x_end+1 >= 640:
x_end = 638
y_start = int(cy - (redu_height / 2))
y_end = int(cy + (redu_height / 2))
if y_end+1 >= 480:
y_end = 478
values = []
weights = []
centroid_depth = []
for i in range(y_start, y_end+1):
for j in range(x_start, x_end+1):
values.append(image_arr[i][j])
distx = abs(cx - j)
disty = abs(cy - i)
dist = np.sqrt((distx*distx+disty*disty))
if dist == 0:
weights.append(1.1)
centroid_depth = image_arr[i][j]
else:
weights.append(1 / dist)
weighted_avg = np.average(values, weights=weights)
# print(weighted_avg)
return weighted_avg, centroid_depth
def calc_orientation(dep_path, x, y, ex, ey):
image = cv2.imread(dep_path, cv2.IMREAD_GRAYSCALE)
# get width and height of the image
h, w = image.shape[:2]
person = image[y: ey, x: ex]
shape = (126, 48)
mask = person > 0
mask_resized = pre.resize_mask(mask.astype('uint8')*255, shape) > 0
depth = pre.resize_depth_img(person, shape)
depth = depth[..., None]
depth = pre.preprocess_img(
depth,
mask=mask_resized,
scale01='standardize' == 'scale01',
standardize='standardize'== 'standardize',
zero_mean=True,
unit_variance=True)
# cv2.imshow('person', depth)
# cv2.waitKey(0)
if K.image_data_format() == 'channels_last':
axes = 'b01c'
else:
axes = 'bc01'
depth = img_utils.dimshuffle(depth, '01c', axes)
global model
dep_out = model.predict(depth, batch_size=1)
output = post.biternion2deg(dep_out)
# print('\t\tOUTPUT:', output)
return output
def write_data(fr_no, x, y, w, h, cx, cy, cen_dep, dep_avg, orientation, group):
output = 'gt_db_orientation_20210412_cd_1.csv'
# output = 'yolov4_adaria_db.csv'
if not os.path.isfile(output):
with open(output, 'w') as csv_f:
csv_writer = csv.writer(csv_f, delimiter=';', lineterminator='\n')
header = ['fr_no', 'x', 'y', 'w', 'h', 'cx', 'cy', 'cen_dep', 'dep_avg', 'orient', 'group']
csv_writer.writerow(header)
with open(output, 'a') as csv_f:
csv_writer = csv.writer(csv_f, delimiter=';', lineterminator='\n')
info = [fr_no, x, y, w, h, cx, cy, cen_dep, dep_avg, orientation, group]
csv_writer.writerow(info)
def get_iou_score(box_a, box_b):
# determine the (x, y)-coordinates of the intersection rectangle
x_a = max(box_a[0], box_b[0])
y_a = max(box_a[1], box_b[1])
x_b = min(box_a[2], box_b[2])
y_b = min(box_a[3], box_b[3])
# compute the area of intersection rectangle
interArea = max(0, x_b - x_a + 1) * max(0, y_b - y_a + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
boxBArea = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
model = load_network('beyer_mod_relu', '/home/viktor/Desktop/deep-orientation/trained_networks/beyer_mod_relu__depth__126x48__biternion__0_030000__1/weights_valid_0268',
'depth', 126, 48,
'biternion',
sampling=1 > 1,
n_classes=8,
mobilenet_v2_alpha=1.0)
# path = '../skeleton_tests/images/rndRICA'
path = '/home/viktor/Documents/annotated_segs/'
dep_path = '/home/viktor/Documents/exports/rgbd_dep_ts'
files = []
for (dirpath, dirnames, filenames) in os.walk(path):
files.extend(filenames)
rgb_to_dep = {}
img_rel = '/home/viktor/Desktop/group-recognition-scripts-kclsair/data_merging/export_files/rgb_to_dep.csv'
with open('/home/viktor/Desktop/group-recognition-scripts-kclsair/data_extraction/gtmerged_frdata.json') as json_file:
gt_frdata = json.load(json_file)
with open('/home/viktor/Desktop/group-recognition-scripts-kclsair/human_tracking/yolo_db_20210304.csv') as infile:
reader = csv.reader(infile)
yolo_frdata = {}
first = True
for row in reader:
if first:
first = False
else:
# print(row)
bbox_data = row[0].split(';')
frno = bbox_data[0]
if frno not in yolo_frdata.keys():
yolo_frdata[int(frno)] = []
yolo_frdata[int(frno)].append(bbox_data[1:])
# print(yolo_frdata)
with open(img_rel, 'r') as csv_f:
csv_reader = csv.reader(csv_f, delimiter=';')
for row in csv_reader:
try:
rgb_frno = int(row[1].split('frame')[-1].split('.')[0])
except:
continue
rgb_to_dep[rgb_frno] = row[3]
# print(rgb_to_dep.keys())
# gt_frame_list = list(gt_frdata.keys())
# print(gt_frame_list)
# count = 0
# for file in files:
# count += 1
# if count % 1000 == 0:
# print('Done with %d of %d' % (count, len(files)))
# frame_no = int(os.path.splitext(file)[0].split('frame')[-1])
# if str(frame_no) not in gt_frame_list:
# continue
# depth_image_path = os.path.join(dep_path, rgb_to_dep[file])
# depth_image_raw = cv2.imread(depth_image_path, cv2.IMREAD_GRAYSCALE)
depth_path_prefix = '/home/viktor/Documents/exports/rgbd_dep_ts'
output_dict = {}
for fr_no, boxes in gt_frdata.items():
print(fr_no)
fr_no = int(fr_no)
output_dict[fr_no] = []
for bbox in boxes:
try:
x = int(bbox['x1'])
y = int(bbox['y1'])
w = int(bbox['x2'])-x
h = int(bbox['y2'])-y
label = int(bbox['label'])
except TypeError:
x = int(bbox[0])
y = int(bbox[1])
w = int(bbox[2])
h = int(bbox[3])
label = int(bbox[7])
cx = int((x + (x + w)) / 2.0)
cy = int((y + (y + h)) / 2.0)
print('\t', x, y, w, h, cx, cy, label)
dep_path = os.path.join(depth_path_prefix, rgb_to_dep[fr_no])
# ===========================================================
# YOLO OUTPUT CHANGES
# w_avg, cdep = calc_weighted_avg_depth(dep_path, w, h, cx, cy)
# ori = calc_orientation(dep_path, x, y, x+w, y+h)[0]
# print('\t\t\t', w_avg, ori)
# group = label
# output_dict[fr_no].append(list(
# [str(x), str(y), str(w), str(h), str(cx), str(cy), "%.2f" % cdep, "%.2f" % w_avg, "%.2f" % ori,
# str(group)]))
# ===========================================================
try:
yolo_frame = yolo_frdata[fr_no]
except:
continue
for ybox in yolo_frame:
yo_x = int(ybox[0])
yo_y = int(ybox[1])
yo_w = int(ybox[2])
yo_h = int(ybox[3])
yo_cx = int(ybox[4])
yo_cy = int(ybox[5])
w_avg = float(ybox[6])
_, cdep = calc_weighted_avg_depth(dep_path, yo_w, yo_h, yo_cx, yo_cy)
box_a = [x, y, x + w, y + h]
box_b = [yo_x, yo_y, yo_x + yo_w, yo_y + yo_h]
iou = get_iou_score(box_a, box_b)
# print('IOU', iou)
if iou > 0.15:
print('FOUND ONE')
try:
ori = calc_orientation(dep_path, yo_x, yo_y, yo_x + yo_w, yo_y + yo_h)[0]
except:
continue
print('\t\t\t', w_avg, ori)
group = label
output_dict[fr_no].append(list([str(yo_x), str(yo_y), str(yo_w), str(yo_h), str(yo_cx), str(yo_cy), "%.2f" % cdep, "%.2f" % w_avg, "%.2f" % ori, str(group)]))
# ===========================================================
# ===========================================================
# break
print('---')
# print(output_dict)
with open('yolo_db_orientation_20210810_cd_1.json', 'w') as outfile:
json.dump(output_dict, outfile, indent=4)
|
{"/growl_tests_code.py": ["/growl_utils/utils.py"]}
|
38,526
|
d4rkspir1t/groupdetection-dgmg
|
refs/heads/main
|
/retest_growl.py
|
from subprocess import Popen
import sys
import time
filename = 'growl_tests_code.py'
iter = 0
while True:
print("\nStarting " + filename)
p = Popen("python " + filename, shell=True)
p.wait()
iter += 1
time.sleep(60)
print('DONE ', iter, ' STEPS')
if iter == 30:
break
|
{"/growl_tests_code.py": ["/growl_utils/utils.py"]}
|
38,539
|
vongola12324/HWReviewers
|
refs/heads/master
|
/main.py
|
import importlib.util
import os
import codecs
from subprocess import *
from pathlib import Path
from stu import *
from chardet.universaldetector import UniversalDetector
from datetime import datetime
# Homework config
# TODO: Use a json to save config
availableExt = ('.c',)
compiler = {
'.c': 'gcc-6',
'.cpp': 'g++-6'
}
# Select input folder
source_path = None
if importlib.util.find_spec('tkinter') is not None:
import tkinter.filedialog
import tkinter
# print("Please select your input folder:")
# while source_path is None:
root = tkinter.Tk()
root.update()
source_path = tkinter.filedialog.askdirectory(title="Choose Homework source", mustexist=True)
root.destroy()
else:
while source_path is None:
source_path = input('Please select your input folder: ')
if not Path(source_path).exists():
source_path = None
output_path = str(Path(source_path).joinpath('out'))
Path(output_path).mkdir(exist_ok=True)
print("Input: " + source_path)
print("Output: " + output_path)
# Get all homework file
files = []
for ext in availableExt:
files.extend(Path(source_path).glob('*' + ext))
# For each file, Compile and Run
students = []
for file in files:
file_path = str(file)
# Add user data
students.append(Student(file.stem))
# Check encoding
detector = UniversalDetector()
with open(file_path, 'rb') as fin:
line = fin.read()
detector.reset()
detector.feed(line)
if detector.done:
break
detector.close()
file_encoding = str(detector.result.get("encoding"))
if file_encoding.lower() != "utf-8":
BLOCKSIZE = 1048576 # or some other, desired size in bytes
with codecs.open(file_path, "r", file_encoding.lower()) as sourceFile:
with codecs.open(file_path + "utf8.c", "w", "utf-8") as targetFile:
while True:
contents = sourceFile.read(BLOCKSIZE)
if not contents:
break
targetFile.write(contents)
os.remove(file_path)
os.rename(file_path + "utf8.c", file_path)
# Compile
pipeline = None
binary = str(Path(output_path).joinpath(students[-1].nid + '.bin'))
print("===============================")
print(students[-1].name + " " + students[-1].nid + " (" + str(files.index(file)+1) + "/" + str(len(files)) + ")")
print("===============================")
try:
pipeline = run(
[compiler[file.suffix] + " \"" + file_path + "\" -o " + binary],
stdout=PIPE, shell=True)
if pipeline.stdout:
print(pipeline.stdout.decode("utf-8"))
if pipeline.returncode == 0:
students[-1].status = Status.compile_success
print("Compile successfully.")
else:
raise Exception('Compile Error!')
except:
students[-1].status = Status.compile_fail
# Run
writefile = False
if students[-1].status == Status.compile_success:
os.system(binary)
students[-1].status = Status.run_success
# Check Success
if students[-1].status == Status.run_success:
if writefile:
# TODO: add auto current
pass
else:
print()
current = input('Does this homework current?[Y/n] ')
if current == 'n' or current == 'N':
students[-1].status = Status.run_not_current
else:
students[-1].status = Status.run_current
# Log all students
print()
print(">> Status: ")
fout = open(str(Path(output_path).joinpath(str(datetime.now())+".log")), 'w+')
for student in students:
fout.write("{0}{1}: {2}\n".format(student.nid, student.name, student.status))
print("{0}{1}: {2}".format(student.nid, student.name, student.status))
fout.close()
|
{"/main.py": ["/stu.py"]}
|
38,540
|
vongola12324/HWReviewers
|
refs/heads/master
|
/stu.py
|
from enum import Enum
class Student:
def __init__(self, filename=None):
self.filename = filename
temp = filename.split('_')
self.name = temp[0]
self.nid = temp[-1]
self.status = Status.waiting
class Status(Enum):
# Error:
can_not_compile = -1
can_not_run = -2
compile_fail = -3
run_fail = -4
other = -99
# Info
waiting = 0
compile_success = 1
run_success = 2
run_current = 3
run_not_current = 4
|
{"/main.py": ["/stu.py"]}
|
38,563
|
zw-team/UNet-Crowd
|
refs/heads/master
|
/eval/eval_whole.py
|
import random
import math
import numpy as np
import sys
from PIL import Image
import time
import torch
from utils import show
import scipy.io as scio
def eval_model(config, eval_loader, modules, if_show_sample=False):
net = modules['model'].eval()
ae_batch = modules['ae']
se_batch = modules['se']
ground_truth_dir_path=config['gt_path_t']
MAE_ = []
MSE_ = []
rand_number = random.randint(0, config['eval_num'] - 1)
counter = 0
time_cost = 0
for eval_img_index, eval_img, eval_gt in eval_loader:
eval_gt_shape = eval_gt.shape
start = time.time()
with torch.no_grad():
eval_prediction = net(eval_img)
torch.cuda.empty_cache()
torch.cuda.synchronize()
end = time.time()
time_cost += (end - start)
gt_path = ground_truth_dir_path + "/GT_IMG_" + str(eval_img_index.cpu().numpy()[0]) + ".mat"
gt_counts = len(scio.loadmat(gt_path)['image_info'][0][0][0][0][0])
batch_ae = ae_batch(eval_prediction, gt_counts).data.cpu().numpy()
batch_se = se_batch(eval_prediction, gt_counts).data.cpu().numpy()
validate_pred_map = np.squeeze(eval_prediction.permute(0, 2, 3, 1).data.cpu().numpy())
validate_gt_map = np.squeeze(eval_gt.permute(0, 2, 3, 1).data.cpu().numpy())
pred_counts = np.sum(validate_pred_map)
if rand_number == counter and if_show_sample:
origin_image = Image.open(config['img_path_t'] + "/IMG_" + str(eval_img_index.numpy()[0]) + ".jpg")
show(origin_image, validate_gt_map, validate_pred_map, eval_img_index.numpy()[0])
sys.stdout.write('The gt counts of the above sample:{}, and the pred counts:{}\n'.format(gt_counts, pred_counts))
MAE_.append(batch_ae)
MSE_.append(batch_se)
counter += 1
# calculate the validate loss, validate MAE and validate RMSE
MAE_ = np.reshape(MAE_, [-1])
MSE_ = np.reshape(MSE_, [-1])
validate_MAE = np.mean(MAE_)
validate_RMSE = np.sqrt(np.mean(MSE_))
return validate_MAE, validate_RMSE, time_cost
|
{"/net/Encoder.py": ["/net/MagnifyCell.py"], "/net/MNet.py": ["/net/MagnifyCell.py", "/net/Decoder.py", "/net/Encoder.py"], "/net/Decoder.py": ["/net/MagnifyCell.py"]}
|
38,564
|
zw-team/UNet-Crowd
|
refs/heads/master
|
/net/Encoder.py
|
import torch
import torch.nn as nn
from net.MagnifyCell import BasicMagnifyCell
from net.BasicConv2d import BasicConv2d
class Encoder(nn.Module):
def __init__(self, pretrain=True, IF_CELL=True, IF_BN=True, **kwargs):
super(Encoder, self).__init__()
self.B1_C2 = nn.Sequential(
BasicMagnifyCell(3, 64, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(3, 64, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(64, 64, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(64, 64, 3, 1, 1, if_Bn=IF_BN)
)
self.B2_C2 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
BasicMagnifyCell(64, 128, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(64, 128, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(128, 128, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(128, 128, 3, 1, 1, if_Bn=IF_BN)
)
self.B3_C3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
BasicMagnifyCell(128, 256, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(128, 256, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(256, 256, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(256, 256, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(256, 256, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(256, 256, 3, 1, 1, if_Bn=IF_BN)
)
self.B4_C3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
BasicMagnifyCell(256, 512, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(256, 512, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(512, 512, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(512, 512, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(512, 512, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(512, 512, 3, 1, 1, if_Bn=IF_BN)
)
self.B5_C3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
BasicMagnifyCell(512, 512, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(512, 512, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(512, 512, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(512, 512, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(512, 512, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(512, 512, 3, 1, 1, if_Bn=IF_BN)
)
if pretrain == False:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
else:
if IF_BN:
self.load_state_dict(
torch.load(
"/home/zzn/PycharmProjects/MagNet/StateDicts/vggbn16_10conv2d_statedict.pkl"
)
)
else:
self.load_state_dict(
torch.load(
"/home/zzn/PycharmProjects/MagNet/StateDicts/vgg16_10conv2d_statedict.pkl"
)
)
def forward(self, x):
B1_C2_output = self.B1_C2(x)
B2_C2_output = self.B2_C2(B1_C2_output)
B3_C3_output = self.B3_C3(B2_C2_output)
B4_C3_output = self.B4_C3(B3_C3_output)
B5_C3_output = self.B5_C3(B4_C3_output)
return B5_C3_output, B4_C3_output, B3_C3_output, B2_C2_output
|
{"/net/Encoder.py": ["/net/MagnifyCell.py"], "/net/MNet.py": ["/net/MagnifyCell.py", "/net/Decoder.py", "/net/Encoder.py"], "/net/Decoder.py": ["/net/MagnifyCell.py"]}
|
38,565
|
zw-team/UNet-Crowd
|
refs/heads/master
|
/op_wrapper/HybridMSE.py
|
import torch
import torch.nn as nn
import torch.nn.functional as functional
from torch.autograd import Function
from torch.nn import Module
class HybridMSEFunction(Function):
@staticmethod
def forward(ctx, *args):
if len(args) != 3:
print("wrong input parameters number, check the input")
return
pred = args[0]
gt = args[1]
ctx.resolutions = args[2]
# loss = torch.pow(pred.sum() - gt.sum(), 2) / 160000
loss = torch.pow(pred - gt, 2).sum()
ctx.save_for_backward(pred, gt)
return loss
@staticmethod
def backward(ctx, *grad_outputs):
if len(grad_outputs) != 1:
print("Wrong output number, check your output")
return
pred, gt = ctx.saved_tensors
grad_weights = pred - gt
grad_pred = grad_weights
for i in ctx.resolutions:
ds = functional.avg_pool2d(grad_weights, kernel_size=i, stride=i) * i
up = functional.interpolate(ds, scale_factor=i, mode='nearest')
sign = ((torch.abs(grad_pred) - torch.abs(up)) < 0).float()
grad_pred = grad_pred * sign + up * (1 - sign)
grad_pred *= grad_outputs[0]
return grad_pred, None, None
class HybridMSELoss(Module):
def __init__(self, resolutions):
super(HybridMSELoss, self).__init__()
self.resolutions = resolutions
def forward(self, pred, gt):
return HybridMSEFunction.apply(pred, gt, self.resolutions)
|
{"/net/Encoder.py": ["/net/MagnifyCell.py"], "/net/MNet.py": ["/net/MagnifyCell.py", "/net/Decoder.py", "/net/Encoder.py"], "/net/Decoder.py": ["/net/MagnifyCell.py"]}
|
38,566
|
zw-team/UNet-Crowd
|
refs/heads/master
|
/eval/eval_by_cropping.py
|
import random
import math
import numpy as np
import sys
from PIL import Image
from utils import *
import time
import torch
import scipy.io as scio
def eval_model(config, eval_loader, modules, if_show_sample=False):
net = modules['model'].eval()
ae_batch = modules['ae']
se_batch = modules['se']
atten = modules['shape']
criterion = modules['loss']
MAE_ = []
MSE_ = []
loss_ = []
time_cost = 0
rand_number = random.randint(0, config['eval_num'] - 1)
counter = 0
for eval_img_index, eval_img, eval_gt in eval_loader:
start = time.time()
eval_patchs = torch.squeeze(eval_img)
if config['stage'] == 'shape':
eval_gt = (eval_gt > 0.001).float()
eval_gt_shape = eval_gt.shape
prediction_map = torch.zeros(eval_gt_shape).cuda()
with torch.no_grad():
if config['stage'] == 'shape':
eval_prediction = net(eval_patchs).clamp(0, 1)
else:
eval_prediction = net(eval_patchs)
eval_patchs_shape = eval_prediction.shape
torch.cuda.empty_cache()
# print(eval_patchs_shape, eval_gt_shape)
for i in range(3):
for j in range(3):
start_h = math.floor(eval_patchs_shape[2] / 4)
start_w = math.floor(eval_patchs_shape[3] / 4)
valid_h = eval_patchs_shape[2] // 2
valid_w = eval_patchs_shape[3] // 2
h_pred = math.floor(3 * eval_patchs_shape[2] /
4) + (eval_patchs_shape[2] // 2) * (i - 1)
w_pred = math.floor(3 * eval_patchs_shape[3] /
4) + (eval_patchs_shape[3] // 2) * (j - 1)
if i == 0:
valid_h = math.floor(3 * eval_patchs_shape[2] / 4)
start_h = 0
h_pred = 0
elif i == 2:
valid_h = math.ceil(3 * eval_patchs_shape[2] / 4)
if j == 0:
valid_w = math.floor(3 * eval_patchs_shape[3] / 4)
start_w = 0
w_pred = 0
elif j == 2:
valid_w = math.ceil(3 * eval_patchs_shape[3] / 4)
prediction_map[:, :, h_pred:h_pred + valid_h, w_pred:w_pred +
valid_w] += eval_prediction[
i * 3 + j:i * 3 + j +
1, :, start_h:start_h +
valid_h, start_w:start_w + valid_w]
torch.cuda.synchronize()
end = time.time()
time_cost += (end - start)
gt_path = config['gt_path_t'] + "/GT_IMG_" + str(
eval_img_index.cpu().numpy()[0]) + ".mat"
loss = criterion(prediction_map, eval_gt)
loss_.append(loss.data.item())
gt_counts = len(scio.loadmat(gt_path)['image_info'][0][0][0][0][0])
batch_ae = ae_batch(prediction_map, gt_counts).data.cpu().numpy()
batch_se = se_batch(prediction_map, gt_counts).data.cpu().numpy()
validate_pred_map = np.squeeze(
prediction_map.permute(0, 2, 3, 1).data.cpu().numpy())
validate_gt_map = np.squeeze(
eval_gt.permute(0, 2, 3, 1).data.cpu().numpy())
pred_counts = np.sum(validate_pred_map)
# random show 1 sample
rate = abs(gt_counts - pred_counts) / gt_counts
# if rand_number == counter and if_show_sample:
if rate > 0.3:
origin_image = Image.open(config['img_path_t'] + "/IMG_" +
str(eval_img_index.numpy()[0]) + ".jpg")
show(origin_image, validate_gt_map, validate_pred_map,
eval_img_index.numpy()[0])
sys.stdout.write(
'The gt counts of the above sample:{}, and the pred counts:{}\n'
.format(gt_counts, pred_counts))
MAE_.append(batch_ae)
MSE_.append(batch_se)
counter += 1
# calculate the validate loss, validate MAE and validate RMSE
MAE_ = np.reshape(MAE_, [-1])
MSE_ = np.reshape(MSE_, [-1])
loss_ = np.reshape(loss_, [-1])
validate_MAE = np.mean(MAE_)
validate_RMSE = np.sqrt(np.mean(MSE_))
validate_loss = np.mean(loss_)
return validate_MAE, validate_RMSE, validate_loss, time_cost
|
{"/net/Encoder.py": ["/net/MagnifyCell.py"], "/net/MNet.py": ["/net/MagnifyCell.py", "/net/Decoder.py", "/net/Encoder.py"], "/net/Decoder.py": ["/net/MagnifyCell.py"]}
|
38,567
|
zw-team/UNet-Crowd
|
refs/heads/master
|
/net/MNet.py
|
import torch
import torch.nn as nn
import torchvision.models as model
import torch.nn.functional as functional
from net.MagnifyCell import BasicMagnifyCell
from net.Decoder import Decoder
from net.Encoder import Encoder
class MNet(nn.Module):
def __init__(self, pretrain=True, IF_EN_CELL=False, IF_DE_CELL=False, IF_BN=True, **kwargs):
super(MNet, self).__init__()
self.encoder = Encoder(pretrain, IF_EN_CELL, IF_BN, **kwargs)
self.decoder = Decoder(IF_DE_CELL, IF_BN, **kwargs)
self.stage = kwargs['stage']
if self.stage == 'shape':
self.output = torch.nn.Sigmoid()
else:
self.output = torch.nn.ReLU(inplace=True)
def forward(self, x):
B5_C3, B4_C3, B3_C3, B2_C2 = self.encoder(x)
output = self.decoder(B5_C3, B4_C3, B3_C3, B2_C2)
output = self.output(output)
return output
|
{"/net/Encoder.py": ["/net/MagnifyCell.py"], "/net/MNet.py": ["/net/MagnifyCell.py", "/net/Decoder.py", "/net/Encoder.py"], "/net/Decoder.py": ["/net/MagnifyCell.py"]}
|
38,568
|
zw-team/UNet-Crowd
|
refs/heads/master
|
/net/MagnifyCell.py
|
import torch
import torch.nn as nn
import torch.nn.functional as functional
class BasicMagnifyCell(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, magnify_factor=1, pooling_size=2, if_Bn=True, activation=nn.ReLU(inplace=True)):
super(BasicMagnifyCell, self).__init__()
self.mag_factor = magnify_factor
self.downSampling = torch.nn.MaxPool2d(kernel_size=pooling_size, stride=pooling_size)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.if_Bn = if_Bn
if self.if_Bn:
self.Bn = nn.BatchNorm2d(out_channels)
self.activation = activation
def forward(self, x):
down_x = self.downSampling(x)
coarse_grain = functional.interpolate(down_x, size=x.shape[2:4], mode="bilinear", align_corners=True)
fine_grain = x - coarse_grain
mag_x = x + fine_grain * self.mag_factor
output = self.conv2d(mag_x)
if self.if_Bn:
output = self.Bn(output)
output = self.activation(output)
return output
|
{"/net/Encoder.py": ["/net/MagnifyCell.py"], "/net/MNet.py": ["/net/MagnifyCell.py", "/net/Decoder.py", "/net/Encoder.py"], "/net/Decoder.py": ["/net/MagnifyCell.py"]}
|
38,569
|
zw-team/UNet-Crowd
|
refs/heads/master
|
/net/Decoder.py
|
import torch
import torch.nn as nn
import torch.nn.functional as functional
from net.MagnifyCell import BasicMagnifyCell
from net.BasicConv2d import BasicConv2d
class Decoder(nn.Module):
def __init__(self, IF_CELL=True, IF_BN=True, **kwargs):
super(Decoder, self).__init__()
self.Decoder_Block_1 = nn.Sequential(
BasicConv2d(1024, 256, 1, 1, 0, if_Bn=IF_BN),
BasicMagnifyCell(256, 256, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(256, 256, 3, 1, 1, if_Bn=IF_BN)
)
self.Decoder_Block_2 = nn.Sequential(
BasicConv2d(512, 128, 1, 1, 0, if_Bn=IF_BN),
BasicMagnifyCell(128, 128, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(128, 128, 3, 1, 1, if_Bn=IF_BN)
)
self.Decoder_Block_3 = nn.Sequential(
BasicConv2d(256, 64, 1, 1, 0, if_Bn=IF_BN),
BasicMagnifyCell(64, 64, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(64, 64, 3, 1, 1, if_Bn=IF_BN),
BasicMagnifyCell(64, 32, 3, 1, 1, if_Bn=IF_BN, **kwargs) if IF_CELL else BasicConv2d(64, 32, 3, 1, 1, if_Bn=IF_BN),
nn.Conv2d(32, 1, 1, 1, 0)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, B5_C3, B4_C3, B3_C3, B2_C2):
concat_1 = torch.cat(
[functional.interpolate(
B5_C3,
size=B4_C3.shape[2:4],
mode="bilinear",
align_corners=True),
B4_C3],
dim=1)
concat_2 = torch.cat(
[functional.interpolate(
self.Decoder_Block_1(concat_1),
size=B3_C3.shape[2:4],
mode="bilinear",
align_corners=True),
B3_C3],
dim=1)
concat_3 = torch.cat(
[functional.interpolate(
self.Decoder_Block_2(concat_2),
size=B2_C2.shape[2:4],
mode="bilinear",
align_corners=True),
B2_C2],
dim=1)
return self.Decoder_Block_3(concat_3)
|
{"/net/Encoder.py": ["/net/MagnifyCell.py"], "/net/MNet.py": ["/net/MagnifyCell.py", "/net/Decoder.py", "/net/Encoder.py"], "/net/Decoder.py": ["/net/MagnifyCell.py"]}
|
38,601
|
panzhigang1989/pyNew
|
refs/heads/master
|
/tests/conftest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/7/11 11:06 下午
# @Author : Aries
# @File : conftest.py
# @Software: IntelliJ IDEA
# 参数和结合fixture使用
# 1。传入值和数据
# 2。传入fixture方法 将数据传入到fixture方法中,
# fixture使用request接受数据,方法体中使用request.param使用数据
import pytest
import yaml
@pytest.fixture()
def calc():
print("开始计算")
# yield 激活fixture的teardown方法
# print(request.param[0])
yield
print("计算结束")
# session每个项目执行一次 module每个py文件执行一次
@pytest.fixture()
def login():
print("登陆")
# yield 激活fixture的teardown方法
# print(request.param[0])
yield ['username', 'password']
print("teardown")
|
{"/tests/test_calc.py": ["/pythoncode/calc.py"]}
|
38,602
|
panzhigang1989/pyNew
|
refs/heads/master
|
/pythoncode/calc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/7/11 10:47 上午
# @Author : Aries
# @File : calc.py
# @Software: IntelliJ IDEA
# 计算器功能
from decimal import Decimal
class Calculator:
# 加法
def add(self, a, b):
return Decimal(str(a)) + Decimal(str(b))
# 除法
def div(self, a, b):
if b == 0:
print('除数不能为0')
return 0
else:
return Decimal(a / b).quantize(Decimal('0.00'))
# 减法
def decrease(self, a, b):
return Decimal(a-b).quantize(Decimal('0.00'))
# 乘法
def multiplication(self, a, b):
return Decimal(a * b).quantize(Decimal('0.00'))
|
{"/tests/test_calc.py": ["/pythoncode/calc.py"]}
|
38,603
|
panzhigang1989/pyNew
|
refs/heads/master
|
/pythoncode/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/7/11 11:08 上午
# @Author : Aries
# @File : __init__.py.py
# @Software: IntelliJ IDEA
#
|
{"/tests/test_calc.py": ["/pythoncode/calc.py"]}
|
38,604
|
panzhigang1989/pyNew
|
refs/heads/master
|
/tests/test_calc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/7/11 11:46 下午
# @Author : Aries
# @File : test_calc.py
# @Software: IntelliJ IDEA
#
import pytest
import sys
import yaml
sys.path.append("..")
print(sys.path)
from pythoncode.calc import Calculator
from decimal import Decimal
class TestCalc:
# 类里面执行
def setup_class(self):
print("类级别setup")
self.calc = Calculator()
def teardown_class(self):
print("类级别teardown")
@pytest.mark.parametrize('list1', yaml.safe_load(open('./../calc.yml'))['add'])
def test_add(self, list1, calc):
add_result = self.calc.add(list1[0], list1[1])
print(f'实际结果为:{add_result}, 类型为{type(add_result)}')
assert Decimal(list1[2]).quantize(Decimal('0.00')) == add_result
@pytest.mark.parametrize('list1', yaml.safe_load(open('./../calc.yml'))['div'])
def test_div(self, list1, calc):
assert Decimal(list1[2]).quantize(Decimal('0.00')) == self.calc.div(list1[0], list1[1])
@pytest.mark.parametrize('list1', yaml.safe_load(open('./../calc.yml'))['decrease'])
def test_decrease(self, list1, calc):
assert Decimal(list1[2]).quantize(Decimal('0.00')) == self.calc.decrease(list1[0], list1[1])
@pytest.mark.parametrize('list1', yaml.safe_load(open('./../calc.yml'))['multiplication'])
def test_multiplication(self, list1, calc):
print(list1)
assert Decimal(list1[2]).quantize(Decimal('0.00')) == self.calc.multiplication(list1[0], list1[1])
|
{"/tests/test_calc.py": ["/pythoncode/calc.py"]}
|
38,634
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/components/configuration_wizard/params_feeder.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameters feeder module."""
import os
from typing import Any, Dict, List, Optional, Union
from lpot.ux.utils.exceptions import ClientErrorException
from lpot.ux.utils.utils import (
check_module,
framework_extensions,
is_model_file,
load_dataloader_config,
load_help_lpot_params,
load_model_config,
load_transforms_config,
)
class Feeder:
"""Parameters feeder class."""
def __init__(self, data: Dict[str, Any]) -> None:
"""Initialize parameters feeder class."""
self.param: Optional[str] = data.get("param")
self.config: Dict[str, Any] = data.get("config", {})
def feed(self) -> Dict[str, Any]:
"""Feed the parameters."""
param_mapper = {
"framework": self.get_frameworks,
"domain": self.get_domains,
"model": self.get_models,
"dataloader": self.get_dataloaders,
"transform": self.get_transforms,
"objective": self.get_objectives,
"strategy": self.get_strategies,
"quantization_approach": self.get_quantization_approaches,
"metric": self.get_metrics,
}
if self.param is None:
raise ClientErrorException("Parameter not defined.")
get_param = param_mapper.get(self.param, None)
if get_param is None:
raise ClientErrorException(
f"Could not found method for {self.param} parameter.",
)
return {
self.param: get_param(),
}
@staticmethod
def get_frameworks() -> List[dict]:
"""Get list of available frameworks."""
frameworks = []
models_config = load_model_config()
for framework in models_config.keys():
if framework.startswith("__help__"):
continue
if framework not in framework_extensions.keys():
continue
help_msg = models_config.get(f"__help__{framework}", "")
frameworks.append({"name": framework, "help": help_msg})
return frameworks
def get_domains(self) -> List[Dict[str, Any]]:
"""Get list of available domains."""
if self.config is None:
raise ClientErrorException("Config not found.")
framework = self.config.get("framework", None)
if framework is None:
raise ClientErrorException("Framework not set.")
models_config = load_model_config()
domains = []
for domain in models_config.get(framework, {}).keys():
if domain.startswith("__help__"):
continue
help_msg = models_config.get(framework, {}).get(f"__help__{domain}", "")
domains.append(
{
"name": domain,
"help": help_msg,
},
)
return domains
def get_models(self) -> List[Dict[str, Any]]:
"""Get list of models."""
if self.config is None:
raise ClientErrorException("Config not found.")
framework = self.config.get("framework", None)
if framework is None:
raise ClientErrorException("Framework not set.")
domain = self.config.get("domain", None)
if domain is None:
raise ClientErrorException("Domain not set.")
models_config = load_model_config()
raw_models_dict = models_config.get(framework, {}).get(domain, {})
models = []
for model in raw_models_dict.keys():
if model.startswith("__help__"):
continue
help_msg = raw_models_dict.get(f"__help__{model}", "")
models.append({"name": model, "help": help_msg})
return models
def get_available_models(self, workspace_path: str) -> List[str]:
"""Get list of available models in workspace."""
available_models = []
all_models = self.get_models()
for filename in os.listdir(workspace_path):
name = os.path.splitext(filename)[0]
if (
os.path.isfile(os.path.join(workspace_path, filename))
and name in all_models
and is_model_file(filename)
):
available_models.append(filename)
return available_models
def get_dataloaders(self) -> List[Dict[str, Any]]:
"""Get available dataloaders."""
if self.config is None:
raise ClientErrorException("Config not found.")
framework = self.config.get("framework", None)
if framework is None:
raise ClientErrorException("Framework not set.")
for fw_dataloader in load_dataloader_config():
if fw_dataloader.get("name") == framework:
return fw_dataloader.get("params", [])
return []
def get_transforms(self) -> List[Dict[str, Any]]:
"""Get available transforms."""
if self.config is None:
raise ClientErrorException("Config not found.")
framework = self.config.get("framework", None)
if framework is None:
raise ClientErrorException("Framework not set.")
for fw_transforms in load_transforms_config():
if fw_transforms.get("name") == framework:
return fw_transforms.get("params", [])
return []
@staticmethod
def get_objectives() -> List[dict]:
"""Get list of supported objectives."""
check_module("lpot")
from lpot.objective import OBJECTIVES
help_dict = load_help_lpot_params("objectives")
objectives = []
for objective in OBJECTIVES.keys():
help_msg = help_dict.get(f"__help__{objective}", "")
objectives.append({"name": objective, "help": help_msg})
return objectives
@staticmethod
def get_strategies() -> List[Dict[str, Any]]:
"""Get list of supported strategies."""
check_module("lpot")
from lpot.strategy import STRATEGIES
help_dict = load_help_lpot_params("strategies")
strategies = []
for strategy in STRATEGIES.keys():
help_msg = help_dict.get(f"__help__{strategy}", "")
strategies.append({"name": strategy, "help": help_msg})
return strategies
def get_quantization_approaches(self) -> List[Dict[str, Any]]:
"""Get list of supported quantization approaches."""
approaches = [
{
"name": "post_training_static_quant",
"help": "help placeholder for post_training_static_quant",
},
]
framework = self.config.get("framework", None)
if framework in ["pytorch", "onnxrt"]:
approaches.append(
{
"name": "post_training_dynamic_quant",
"help": f"help placeholder for {framework} post_training_dynamic_quant",
},
)
return approaches
def get_metrics(self) -> List[Dict[str, Any]]:
"""Get list of possible metrics."""
check_module("lpot")
framework = self.config.get("framework", None)
if framework is None:
raise ClientErrorException("Framework not set.")
if framework == "pytorch":
check_module("ignite")
else:
check_module(framework)
from lpot.metric.metric import framework_metrics
help_dict = load_help_lpot_params("metrics")
if framework == "onnxrt":
raw_metric_list = list(
framework_metrics.get("onnxrt_qlinearops")().metrics.keys(),
)
else:
raw_metric_list = list(framework_metrics.get(framework)().metrics.keys())
raw_metric_list += ["custom"]
metrics_updated = update_metric_parameters(raw_metric_list)
for metric, value in metrics_updated.copy().items():
if isinstance(value, dict):
for key in value.copy().keys():
help_msg_key = f"__help__{key}"
metrics_updated[metric][help_msg_key] = help_dict.get(
metric,
{},
).get(help_msg_key, "")
metrics_updated[f"__help__{metric}"] = help_dict.get(
f"__help__{metric}",
"",
)
return self._parse_help_in_dict(metrics_updated)
def _parse_help_in_dict(self, data: dict) -> list:
parsed_list = []
for key, value in data.items():
if key.startswith("__help__"):
continue
if isinstance(value, dict):
parsed_list.append(
{
"name": key,
"help": data.get(f"__help__{key}", ""),
"params": self._parse_help_in_dict(value),
},
)
else:
parsed_list.append(
{
"name": key,
"help": data.get(f"__help__{key}", ""),
"value": value,
},
)
return parsed_list
def update_metric_parameters(metric_list: List[str]) -> Dict[str, Any]:
"""Add parameters to metrics."""
metrics: Dict[str, Any] = {}
for metric in metric_list:
if metric == "topk":
metrics.update({metric: {"k": [1, 5]}})
elif metric == "COCOmAP":
metrics.update({metric: {"anno_path": ""}})
elif metric in ["MSE", "RMSE", "MAE"]:
metrics.update({metric: {"compare_label": True}})
else:
metrics.update({metric: None})
return metrics
def get_possible_values(data: dict) -> Dict[str, List[Any]]:
"""
Get list of possible values for specified scenario.
Example expected data:
{
"param": "dataloader",
"config": {
"framework": "tensorflow"
}
}
"""
feeder = Feeder(data)
return convert_to_v1_api(feeder.feed())
def convert_to_v1_api(data: Dict[str, Any]) -> Dict[str, Any]:
"""Convert new API into old (without "help")."""
data_v1 = {}
for key, value in data.items():
if isinstance(value, list):
data_v1[key] = _convert_to_v1_api_list(value)
else:
data_v1[key] = value
return data_v1
def _convert_to_v1_api_list(data: list) -> Union[List[str], Dict[str, Any]]:
"""Convert values in list with "help" args into dict or list, based on content."""
data_v1_dict = {}
data_v1_list = []
for item in data:
if isinstance(item, dict):
if "params" in item.keys():
params = item["params"]
if isinstance(params, list):
data_v1_dict[item["name"]] = _convert_to_v1_api_list(params)
else:
raise TypeError(
f"Type of params could be only type of list, not {type(params)}.",
)
elif "value" in item.keys():
data_v1_dict[item["name"]] = item["value"]
else:
data_v1_list.append(item["name"])
if data_v1_dict and not data_v1_list:
return data_v1_dict
elif data_v1_list and not data_v1_dict:
return data_v1_list
else:
raise Exception("Could not determine return type, error in input data.")
def get_possible_values_v2(data: dict) -> Dict[str, List[Any]]:
"""Get list of possible values for specified scenario with "help" information."""
feeder = Feeder(data)
return feeder.feed()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,635
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/data/datasets/coco_dataset.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from PIL import Image
from lpot.utils.utility import LazyImport
from .dataset import dataset_registry, IterableDataset, Dataset
tf = LazyImport('tensorflow')
mx = LazyImport('mxnet')
torch = LazyImport('torch')
@dataset_registry(dataset_type="COCORecord", framework="tensorflow", dataset_format='')
class COCORecordDataset(IterableDataset):
"""Configuration for Coco dataset."""
def __new__(cls, root, num_cores=28, transform=None, filter=filter):
record_iterator = tf.compat.v1.python_io.tf_record_iterator(root)
example = tf.train.SequenceExample()
for element in record_iterator:
example.ParseFromString(element)
break
feature = example.context.feature
if len(feature['image/object/class/text'].bytes_list.value) == 0 \
and len(feature['image/object/class/label'].int64_list.value) == 0:
raise ValueError("Tfrecord format is incorrect, please refer\
'https://github.com/tensorflow/models/blob/master/research/\
object_detection/dataset_tools/create_coco_tf_record.py' to\
create correct tfrecord")
# pylint: disable=no-name-in-module
from tensorflow.python.data.experimental import parallel_interleave
tfrecord_paths = [root]
ds = tf.data.TFRecordDataset.list_files(tfrecord_paths)
ds = ds.apply(
parallel_interleave(tf.data.TFRecordDataset,
cycle_length=num_cores,
block_length=5,
sloppy=True,
buffer_output_elements=10000,
prefetch_input_elements=10000))
if transform is not None:
ds = ds.map(transform, num_parallel_calls=None)
if filter is not None:
ds = ds.filter(filter)
ds = ds.prefetch(buffer_size=1000)
ds.batch(1)
return ds
@dataset_registry(dataset_type="COCORaw", framework="onnxrt_qlinearops, \
onnxrt_integerops", dataset_format='')
class COCORaw(Dataset):
"""Configuration for Coco raw dataset."""
def __init__(self, root, img_dir='val2017', \
anno_dir='annotations/instances_val2017.json', transform=None, filter=filter):
import json
import os
import numpy as np
from pycocotools.coco import COCO
from lpot.metric.coco_label_map import category_map
self.image_list = []
self.transform = transform
self.filter = filter
img_path = os.path.join(root, img_dir)
anno_path = os.path.join(root, anno_dir)
coco = COCO(anno_path)
img_ids = coco.getImgIds()
cat_ids = coco.getCatIds()
for idx, img_id in enumerate(img_ids):
img_info = {}
bboxes = []
labels = []
ids = []
img_detail = coco.loadImgs(img_id)[0]
ids.append(img_detail['file_name'].encode('utf-8'))
pic_height = img_detail['height']
pic_width = img_detail['width']
ann_ids = coco.getAnnIds(imgIds=img_id,catIds=cat_ids)
anns = coco.loadAnns(ann_ids)
for ann in anns:
bbox = ann['bbox']
if len(bbox) == 0:
continue
bbox = [bbox[0]/float(pic_width), bbox[1]/float(pic_height),\
bbox[2]/float(pic_width), bbox[3]/float(pic_height)]
bboxes.append([bbox[1], bbox[0], bbox[1]+bbox[3], bbox[0]+bbox[2]])
labels.append(category_map[ann['category_id']].encode('utf8'))
img_file = os.path.join(img_path, img_detail['file_name'])
if not os.path.exists(img_file) or len(bboxes) == 0:
continue
self.image_list.append(
(img_file, [np.array(bboxes), np.array(labels), np.array([]),\
np.array(img_detail['file_name'].encode('utf-8'))]))
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
sample = self.image_list[index]
label = sample[1]
with Image.open(sample[0]) as image:
image = np.array(image.convert('RGB'))
if self.transform is not None:
image, label = self.transform((image, label))
return (image, label)
@dataset_registry(dataset_type="COCORaw", framework="pytorch", dataset_format='')
class PytorchCOCORaw(COCORaw):
def __getitem__(self, index):
sample = self.image_list[index]
label = sample[1]
with Image.open(sample[0]) as image:
image = image.convert('RGB')
if self.transform is not None:
image, label = self.transform((image, label))
image = np.array(image)
return (image, label)
@dataset_registry(dataset_type="COCORaw", framework="mxnet", dataset_format='')
class MXNetCOCORaw(COCORaw):
def __getitem__(self, index):
sample = self.image_list[index]
label = sample[1]
image = mx.image.imread(sample[0])
if self.transform is not None:
image, label = self.transform((image, label))
return (image, label)
@dataset_registry(dataset_type="COCORaw", framework="tensorflow", dataset_format='')
class TensorflowCOCORaw(COCORaw):
def __getitem__(self, index):
sample = self.image_list[index]
label = sample[1]
with Image.open(sample[0]) as image:
image = np.array(image)
if self.transform is not None:
image, label = self.transform((image, label))
if type(image).__name__ == 'Tensor':
with tf.compat.v1.Session() as sess:
image = sess.run(image)
elif type(image).__name__ == 'EagerTensor':
image = image.numpy()
return (image, label)
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,636
|
SnehalA/lpot
|
refs/heads/master
|
/examples/helloworld/tf_example5/test.py
|
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
def main():
import lpot
from lpot import common
quantizer = lpot.Quantization('./conf.yaml')
quantizer.model = common.Model("./mobilenet_v1_1.0_224_frozen.pb")
quantized_model = quantizer()
# Optional, run benchmark
from lpot import Benchmark
evaluator = Benchmark('./conf.yaml')
evaluator.model = common.Model(quantized_model)
results = evaluator()
batch_size = 1
for mode, result in results.items():
acc, batch_size, result_list = result
latency = np.array(result_list).mean() / batch_size
print('Accuracy is {:.3f}'.format(acc))
print('Latency: {:.3f} ms'.format(latency * 1000))
if __name__ == "__main__":
main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,637
|
SnehalA/lpot
|
refs/heads/master
|
/examples/helloworld/tf_example3/test.py
|
import tensorflow as tf
import time
import numpy as np
tf.compat.v1.disable_eager_execution()
def main():
import lpot
from lpot import common
quantizer = lpot.Quantization('./conf.yaml')
# Get graph from slim checkpoint
from tf_slim.nets import inception
model_func = inception.inception_v1
arg_scope = inception.inception_v1_arg_scope()
kwargs = {'num_classes': 1001}
inputs_shape = [None, 224, 224, 3]
images = tf.compat.v1.placeholder(name='input', \
dtype=tf.float32, shape=inputs_shape)
# Do quantization
quantizer.model = common.Model('./inception_v1.ckpt')
quantized_model = quantizer()
if __name__ == "__main__":
main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,638
|
SnehalA/lpot
|
refs/heads/master
|
/test/test_tensorflow_query_yaml.py
|
#
# -*- coding: utf-8 -*-
#
import unittest
import yaml
import os
from lpot.adaptor.tensorflow import TensorflowQuery
class TestTFQueryYaml(unittest.TestCase):
@classmethod
def setUpClass(self):
self.tf_yaml_path = os.path.join(os.getcwd() + "/../lpot/adaptor/tensorflow.yaml")
with open(self.tf_yaml_path) as f:
self.content = yaml.safe_load(f)
self.query_handler = TensorflowQuery(local_config_file=self.tf_yaml_path)
def test_unique_version(self):
registered_version_name = [i['version']['name'] for i in self.content]
self.assertEqual(len(registered_version_name), len(set(registered_version_name)))
def test_int8_sequences(self):
patterns = self.query_handler.get_eightbit_patterns()
has_conv2d = bool('Conv2D' in patterns)
has_matmul = bool('MatMul' in patterns)
self.assertEqual(has_conv2d, True)
self.assertEqual(has_matmul, True)
self.assertGreaterEqual(len(patterns['Conv2D']), 13)
self.assertGreaterEqual(len(patterns['MatMul']), 3)
self.assertEqual(len(patterns['ConcatV2']), 1)
self.assertEqual(len(patterns['MaxPool']), 1)
self.assertEqual(len(patterns['AvgPool']), 1)
def test_convert_internal_patterns(self):
internal_patterns = self.query_handler.generate_internal_patterns()
self.assertEqual([['MaxPool']] in internal_patterns, True)
self.assertEqual([['ConcatV2']] in internal_patterns, True)
self.assertEqual([['AvgPool']] in internal_patterns, True)
self.assertEqual([['MatMul'], ('BiasAdd',), ('Relu',)] in internal_patterns, True)
if __name__ == '__main__':
unittest.main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,639
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/policy/policy.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
POLICIES = {}
def policy_registry(cls):
"""The class decorator used to register all PrunePolicy subclasses.
Args:
cls (class): The class of register.
Returns:
cls: The class of register.
"""
assert cls.__name__.endswith(
'PrunePolicy'
), "The name of subclass of PrunePolicy should end with \'PrunePolicy\' substring."
if cls.__name__[:-len('PrunePolicy')].lower() in POLICIES:
raise ValueError('Cannot have two policies with the same name')
POLICIES[cls.__name__[:-len('PrunePolicy')].lower()] = cls
return cls
class PrunePolicy:
def __init__(self, model, local_config, global_config):
"""The base clase of Prune policies
Args:
model (object): The original model (currently PyTorchModel instance).
local_config (Conf): configs specific for this pruning instance
global_config (Conf): global configs which may be overwritten by
local_config
"""
self.model = model
self.tensor_dims = [4]
if local_config.method:
self.method = local_config.method
else:
self.method = "per_tensor"
if local_config.init_sparsity:
self.init_sparsity = local_config["init_sparsity"]
else:
self.init_sparsity = global_config.pruning["init_sparsity"]
if local_config.target_sparsity:
self.target_sparsity = local_config.target_sparsity
else:
self.target_sparsity = global_config.pruning.target_sparsity
self.start_epoch = global_config.pruning["start_epoch"]
self.end_epoch = global_config.pruning["end_epoch"]
self.freq = global_config.pruning["frequency"]
if local_config.weights:
self.weights = local_config.weights
else:
self.weights = self.model.get_all_weight_names()
self.is_last_epoch = False
self.masks = {}
def on_epoch_begin(self, epoch):
raise NotImplementedError
def on_batch_begin(self, batch_id):
raise NotImplementedError
def on_epoch_end(self):
raise NotImplementedError
def on_batch_end(self):
raise NotImplementedError
def update_sparsity(self, epoch):
""" update sparsity goals according to epoch numbers
Args:
epoch (int): the epoch number
Returns:
sprsity (float): sparsity target in this epoch
"""
if self.start_epoch == self.end_epoch:
return self.init_sparsity
if epoch < self.start_epoch:
return 0
if epoch > self.end_epoch:
return self.target_sparsity
return self.init_sparsity + (self.target_sparsity - self.init_sparsity) * (
(epoch - self.start_epoch) // self.freq) * self.freq / \
(self.end_epoch - self.start_epoch)
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,640
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/utils/workload/workload.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workload module."""
import json
import os
from pathlib import Path
from typing import Any, Dict
from lpot.ux.utils.exceptions import ClientErrorException
from lpot.ux.utils.json_serializer import JsonSerializer
from lpot.ux.utils.logger import log
from lpot.ux.utils.utils import (
get_file_extension,
get_framework_from_path,
get_predefined_config_path,
)
from lpot.ux.utils.workload.config import Config
class Workload(JsonSerializer):
"""Workload class."""
def __init__(self, data: Dict[str, Any]):
"""Initialize Workload class."""
super().__init__()
self.config: Config = Config()
self.id: str = str(data.get("id", ""))
if not self.id:
raise ClientErrorException("Workload ID not specified.")
self.model_path: str = data.get("model_path", "")
if not self.model_path:
raise ClientErrorException("Model path is not defined!")
self.model_name = Path(self.model_path).stem
self.domain: str = data.get("domain", None)
if not self.domain:
raise ClientErrorException("Domain is not defined!")
self.framework: str = data.get(
"framework",
get_framework_from_path(self.model_path),
)
self.predefined_config_path = data.get(
"config_path",
get_predefined_config_path(self.framework, self.domain),
)
self.workspace_path = data.get(
"workspace_path",
os.path.dirname(self.model_path),
)
self.workload_path = data.get(
"workload_path",
os.path.join(
self.workspace_path,
"workloads",
f"{self.model_name}_{self.id}",
),
)
self.set_workspace()
self.config_name = "config.yaml"
self.config_path = os.path.join(
self.workload_path,
self.config_name,
)
model_output_name = (
self.model_name + "_int8." + get_file_extension(self.model_path)
)
self.model_output_path = os.path.join(
self.workload_path,
model_output_name,
)
self.eval_dataset_path: str = data.get("eval_dataset_path", "")
self.calib_dataset_path: str = data.get("eval_dataset_path", "")
self.set_dataset_paths(data)
for dataset_path in [self.eval_dataset_path, self.calib_dataset_path]:
if dataset_path != "no_dataset_location" and not os.path.exists(
dataset_path,
):
raise ClientErrorException(
f'Could not found dataset in specified location: "{dataset_path}".',
)
if not os.path.isfile(self.model_path):
raise ClientErrorException(
f'Could not found model in specified location: "{self.model_path}".',
)
self.accuracy_goal: float = data.get("accuracy_goal", 0.01)
if not os.path.isfile(self.config_path):
self.config.load(self.predefined_config_path)
else:
self.config.load(self.config_path)
self.config.model.name = self.model_name
self.config.set_evaluation_dataset_path(self.eval_dataset_path)
self.config.set_quantization_dataset_path(self.calib_dataset_path)
self.config.set_workspace(self.workload_path)
self.config.set_accuracy_goal(self.accuracy_goal)
def set_dataset_paths(self, data: dict) -> None:
"""Set calibration and evaluation dataset path."""
if data.get("evaluation", {}).get("dataset_path"):
self.eval_dataset_path = data.get("evaluation", {}).get("dataset_path")
if data.get("quantization", {}).get("dataset_path"):
self.calib_dataset_path = data.get("quantization", {}).get("dataset_path")
if not self.eval_dataset_path:
self.eval_dataset_path = data.get("dataset_path", "")
if not self.calib_dataset_path:
self.calib_dataset_path = data.get("dataset_path", "")
def set_workspace(self) -> None:
"""Create (if missing) necessary folders for workloads."""
os.makedirs(self.workspace_path, exist_ok=True)
os.makedirs(self.workload_path, exist_ok=True)
def dump(self) -> None:
"""Dump workload to yaml."""
json_path = os.path.join(self.workload_path, "workload.json")
with open(json_path, "w") as f:
json.dump(self.serialize(), f, indent=4)
log.debug(f"Successfully saved workload to {json_path}")
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,641
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_common.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import tensor_util
class QuantizeGraphHelper():
"""
This class contains several staticmethod functions.
"""
node_name_cache = {}
node_name_port_cache = {}
def __init__(self):
pass
def _recursive_graph_sorting(self, node_name):
if node_name in self.op_list or not self.node_name_mapping[
node_name].input:
return
for input_name in self.node_name_mapping[node_name].input:
if input_name not in self.node_name_mapping:
continue
else:
self._recursive_graph_sorting((input_name))
if node_name not in self.op_list:
self.op_list.append(node_name)
return
def _get_op_list(self, output_node_names):
for output_name in output_node_names:
self._recursive_graph_sorting(output_name)
def get_sorted_graph(self, input_graph, input_node_names, output_node_names):
"""Return a sorted graphdef object.Sometimes the input graphdef was composed of
the randome nodedef objects, we reorder the graph to make the parsing more easier.
Args:
input_graph (graphdef]): the input graphdef object
input_node_names (string list): the input node names
output_node_names (string list): the output node names
Returns:
[type]: [description]
"""
self.node_name_mapping = {}
self.op_list = [input_node_name for input_node_name in input_node_names]
for node in input_graph.node:
self.node_name_mapping[node.name] = node
self._get_op_list(output_node_names)
self.op_list.extend(
set(self.node_name_mapping.keys()) - set(self.op_list))
self.out_graph_def = graph_pb2.GraphDef()
for i in self.op_list:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(self.node_name_mapping[i])
self.out_graph_def.node.extend([new_node])
return self.out_graph_def
@staticmethod
def split_shared_inputs(input_graph_def):
"""
Split shared inputs(like weights and bias) of the graph.
:param in_graph: input graph file.
:return: path to ouput graph file.
"""
node_map = {}
for node in input_graph_def.node:
if node.name not in node_map:
node_map[node.name] = node
output_graph_def = graph_pb2.GraphDef()
is_shared_input = False
# map of: input_name - op_name
input_map = {}
for node_name in node_map.keys():
node = node_map[node_name]
for input_idx, input_node_name in enumerate(node.input):
if node_map[QuantizeGraphHelper.node_name_from_input(
input_node_name)].op == 'Const':
# is shared and current node is not the first one
# sharing the input
if input_node_name in input_map.keys():
is_shared_input = True
input_map[input_node_name].append(node.name)
new_input_node = node_def_pb2.NodeDef()
new_input_node.CopyFrom(node_map[input_node_name])
new_input_node.name = input_node_name + '_' + str(
len(input_map[input_node_name]))
node.input[input_idx] = new_input_node.name
output_graph_def.node.extend([new_input_node])
else:
input_map[input_node_name] = [node.name]
output_graph_def.node.extend([node])
return output_graph_def if is_shared_input else input_graph_def
@staticmethod
def remove_training_nodes(input_graph, protected_nodes=[],
types_to_splice=['Identity', 'CheckNumerics']):
"""Prunes out nodes that aren't needed for inference.
Args:
input_graph: Model to analyze and prune.
types_to_splice: An optional list of types of nodes to be removed
unconditionally.
Returns:
A optimized graphdef object.
"""
input_nodes = input_graph.node
control_input_names = set()
node_names_with_control_input = set()
for node in input_nodes:
for node_input in node.input:
if "^" in node_input:
control_input_names.add(node_input.replace("^", ""))
node_names_with_control_input.add(node.name)
names_to_splice = {}
for node in input_nodes:
if node.op in types_to_splice:
# We don't want to remove nodes that have control edge inputs, because
# they might be involved in subtle dependency issues that removing them
# will jeopardize.
if node.name not in node_names_with_control_input:
names_to_splice[node.name] = node.input[0]
# We also don't want to remove nodes which are used as control edge inputs.
names_to_splice = {
name: value
for name, value in names_to_splice.items()
if name not in control_input_names
}
nodes_after_splicing = []
for node in input_nodes:
if node.name in names_to_splice and node.name not in protected_nodes:
continue
if node.name in protected_nodes and node.name in types_to_splice:
nodes_after_splicing.append(node)
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
while input_name in names_to_splice:
full_input_name = names_to_splice[input_name]
input_name = re.sub(r"^\^", "", full_input_name)
new_node.input.append(full_input_name)
nodes_after_splicing.append(new_node)
output_graph = graph_pb2.GraphDef()
output_graph.node.extend(nodes_after_splicing)
return output_graph
@staticmethod
def create_node(op, name, inputs):
"""Create a nodedef object
Args:
op (string): op type
name (string): op name
inputs (string list): op's inputs name
Returns:
nodedef: the created nodedef object
"""
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
@staticmethod
def create_constant_node(name, value, dtype, shape=None, device='cpu'):
"""create constant node.
Args:
name (string): op name
value (np.array): input data
dtype (datatype): data type of the input value
shape (int list, optional): the value's shape. Defaults to None.
device (str, optional): the device type, it may be the 'cpu' or 'gpu'.
Defaults to 'cpu'.
Returns:
[type]: [description]
"""
node = QuantizeGraphHelper.create_node("Const" if device == 'cpu' else "HostConst", name,
[])
QuantizeGraphHelper.set_attr_dtype(node, "dtype", dtype)
QuantizeGraphHelper.set_attr_tensor(node, "value", value, dtype, shape)
return node
@staticmethod
def copy_attr(node, key, attr_value):
"""Copy the specified attr value to node.
Args:
node (nodedef): a nodedef object
key (string): string name
attr_value (any): the specified attribute value
"""
node.attr[key].CopyFrom(attr_value)
@staticmethod
def set_attr_dtype(node, key, value):
"""Set the attribute data type
"""
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(type=value.as_datatype_enum))
@staticmethod
def set_attr_tensor(node, key, value, dtype, shape=None):
"""Set the tensor value to specified attribute field.
Args:
node (nodedef): the target nodedef object
key (string): attribute name
value (np.array): the content
dtype (dtypes): data type
shape (int list, optional): the input tensor's shape. Defaults to None.
"""
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape)))
@staticmethod
def set_attr_string(node, key, value):
"""Set the node's attr which data type is string.
"""
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(s=value))
@staticmethod
def set_attr_bool(node, key, value):
"""Set the node's attr which data type is bool.
"""
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(b=value))
@staticmethod
def set_attr_int(node, key, value):
"""Set the node's attr which data type is int.
"""
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(i=value))
@staticmethod
def set_attr_float(node, key, value):
"""Set the node's attr which data type is float.
"""
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(f=value))
@staticmethod
def node_name_from_input(node_name):
"""Static method that get the valid node name from input name.
Args:
node_name (string): node name defined in the input field.
Returns:
string: node's name
"""
if node_name not in QuantizeGraphHelper.node_name_cache:
key = node_name
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
QuantizeGraphHelper.node_name_cache[key] = node_name
return node_name
return QuantizeGraphHelper.node_name_cache[node_name]
@staticmethod
def unique_node_name_from_input(node_name):
"""Get the node name from other node name's input field.
"""
return node_name.replace(":", "__port__").replace("^", "__hat__")
@staticmethod
def ensure_tensor_name_has_port(node_name):
"""Makes sure that a tensor name has :0 if no explicit port exists."""
if node_name not in QuantizeGraphHelper.node_name_port_cache:
key = node_name
m = re.search(r"(.*):\d+$", node_name)
if not m:
node_name = node_name + ":0"
QuantizeGraphHelper.node_name_port_cache[key] = node_name
return node_name
return QuantizeGraphHelper.node_name_port_cache[node_name]
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,642
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/adaptor/onnxrt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import yaml
import logging
from collections import OrderedDict
import numpy as np
from .adaptor import adaptor_registry, Adaptor
from .query import QueryBackendCapability
from ..utils.utility import LazyImport, dump_elapsed_time
onnx = LazyImport("onnx")
ort = LazyImport("onnxruntime")
logger = logging.getLogger()
class ONNXRTAdaptor(Adaptor):
"""The ONNXRT adaptor layer, do onnx-rt quantization, calibration, inspect layer tensors.
Args:
framework_specific_info (dict): framework specific configuration for quantization.
"""
def __init__(self, framework_specific_info):
super(ONNXRTAdaptor, self).__init__(framework_specific_info)
self.__config_dict = {}
self.quantizable_ops = []
self.logger = logger
self.static = framework_specific_info["approach"] == "post_training_static_quant"
self.backend = framework_specific_info["backend"]
self.work_space = framework_specific_info["workspace_path"]
if not os.path.exists(self.work_space):
os.makedirs(self.work_space)
self.pre_optimized_model = None
self.quantizable_op_types = self._query_quantizable_op_types()
self.evaluate_nums = 0
@dump_elapsed_time("Pass quantize model")
def quantize(self, tune_cfg, model, dataLoader, q_func=None):
"""The function is used to do calibration and quanitization in post-training
quantization.
Args:
tune_cfg (dict): quantization config.
model (object): model need to do quantization.
dataloader (object): calibration dataset.
q_func (optional): training function for quantization aware training mode,
unimplement yet for onnx.
Returns:
(dict): quantized model
"""
model = self.pre_optimized_model if self.pre_optimized_model else model
ort_version = [int(i) for i in ort.__version__.split(".")]
if ort_version < [1, 5, 2]:
logger.warning('quantize input need onnxruntime version > 1.5.2')
return model
if model.model.opset_import[0].version < 11:
logger.warning('quantize input need model opset >= 11')
from .ox_utils.onnx_quantizer import ONNXQuantizer
from onnxruntime.quantization.quant_utils import QuantizationMode
backend = QuantizationMode.QLinearOps if self.backend == \
"qlinearops" else QuantizationMode.IntegerOps
model = copy.deepcopy(model)
self.quantizable_ops = self._query_quantizable_ops(model.model)
q_config = self._cfg_to_qconfig(tune_cfg)
if self.static:
quantize_params = self._get_quantize_params(model.model, dataLoader, q_config)
else:
quantize_params = None
quantizer = ONNXQuantizer(model.model,
q_config,
backend,
self.static,
quantize_params,
self.quantizable_op_types)
quantizer.quantize_model()
model.model = quantizer.model.model
return model
def _get_quantize_params(self, model, dataloader, q_config):
from .ox_utils.onnx_calibrate import calibrate
black_nodes = [node for node in q_config if q_config[node]=='fp32']
white_nodes = [node for node in q_config if q_config[node]!='fp32']
quantize_params = calibrate(model, dataloader, self.quantizable_op_types, \
black_nodes=black_nodes, white_nodes=white_nodes, \
augmented_model_path=os.path.join(self.work_space, 'augmented_model.onnx'))
return quantize_params
def _pre_optimize(self, model, level=1):
# TODO hardcoded to GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_options.optimized_model_filepath = os.path.join(self.work_space, \
"Optimized_model.onnx")
session = ort.InferenceSession(model.model.SerializeToString(), sess_options)
tmp_model = onnx.load(sess_options.optimized_model_filepath)
model.model = self._replace_gemm_with_matmul(tmp_model).model
self.pre_optimized_model = model
def _replace_gemm_with_matmul(self, model):
new_nodes = []
from .ox_utils.onnx_model import ONNXModel
model = ONNXModel(model)
for node in model.nodes():
if node.op_type == 'Gemm':
alpha = 1.0
beta = 1.0
transA = 0
transB = 0
for attr in node.attribute:
if attr.name == 'alpha':
alpha = onnx.helper.get_attribute_value(attr)
elif attr.name == 'beta':
beta = onnx.helper.get_attribute_value(attr)
elif attr.name == 'transA':
transA = onnx.helper.get_attribute_value(attr)
elif attr.name == 'transB':
transB = onnx.helper.get_attribute_value(attr)
if alpha == 1.0 and beta == 1.0 and transA == 0:
inputB = node.input[1]
if transB == 1:
B = model.get_initializer(node.input[1])
if B:
# assume B is not used by any other node
B_array = onnx.numpy_helper.to_array(B)
B_trans = onnx.numpy_helper.from_array(B_array.T)
B_trans.name = B.name
model.remove_initializer(B)
model.add_initializer(B_trans)
else:
inputB += '_Transposed'
transpose_node = onnx.helper.make_node('Transpose',
inputs=[node.input[1]],
outputs=[inputB],
name=node.name+'_Transpose')
new_nodes.append(transpose_node)
matmul_node = onnx.helper.make_node('MatMul',
inputs=[node.input[0], inputB],
outputs=[node.output[0] + ('_MatMul' if len(node.input)>2 else '')],
name=node.name + '_MatMul')
new_nodes.append(matmul_node)
if len(node.input) > 2:
add_node = onnx.helper.make_node('Add',
inputs=[node.output[0] + '_MatMul', node.input[2]],
outputs=node.output,
name=node.name + '_Add')
new_nodes.append(add_node)
# unsupported
else:
new_nodes.append(node)
# not GEMM
else:
new_nodes.append(node)
model.graph().ClearField('node')
model.graph().node.extend(new_nodes)
return model
def query_fw_capability(self, model):
"""The function is used to query framework capability.
TODO: will be replaced by framework query API
Args:
model: onnx model
Returns:
(dict): quantization capability
"""
# optype_wise and op_wise capability
self._pre_optimize(model)
quantizable_ops = self._query_quantizable_ops(self.pre_optimized_model.model)
optype_wise = OrderedDict()
special_config_types = list(self.query_handler.get_quantization_capability()\
['int8'].keys()) # pylint: disable=no-member
default_config = self.query_handler.get_quantization_capability()[\
'int8']['default'] # pylint: disable=no-member
op_wise = OrderedDict()
for _, op in enumerate(quantizable_ops):
if op.op_type not in special_config_types:
op_capability = default_config
else:
op_capability = \
self.query_handler.get_quantization_capability()[\
'int8'][op.op_type] # pylint: disable=no-member
if op.op_type not in optype_wise.keys():
optype_wise[op.op_type] = copy.deepcopy(op_capability)
op_wise.update(
{(op.name, op.op_type): copy.deepcopy(op_capability)})
return {'optypewise': optype_wise, 'opwise': op_wise}
def _cfg_to_qconfig(self, tune_cfg):
nodes_config = {}
granularity = 'per_tensor'
algorithm = 'minmax'
scheme = 'sym'
from onnx import onnx_pb as onnx_proto
for _, op in enumerate(self.quantizable_ops):
if tune_cfg['op'][(op.name, op.op_type)
]['activation']['dtype'] == 'fp32':
nodes_config[op.name] = 'fp32'
else:
node_config = copy.deepcopy(tune_cfg['op'][(op.name, op.op_type)])
for tensor, config in tune_cfg['op'][(op.name, op.op_type)].items():
if 'granularity' not in config:
node_config[tensor]['granularity'] = granularity
if 'algorithm' not in config:
node_config[tensor]['algorithm'] = algorithm
if 'scheme' not in config:
node_config[tensor]['scheme'] = scheme
node_config[tensor]['dtype'] = onnx_proto.TensorProto.INT8 \
if config['dtype'] == "int8" else onnx_proto.TensorProto.UINT8
nodes_config[op.name] = node_config
return nodes_config
def _query_quantizable_ops(self, model):
for node in model.graph.node:
if node.op_type in self.quantizable_op_types:
self.quantizable_ops.append(node)
return self.quantizable_ops
def _query_quantizable_op_types(self):
quantizable_op_types = self.query_handler.get_op_types_by_precision( \
precision='int8') # pylint: disable=no-member
return quantizable_op_types
def evaluate(self, input_graph, dataloader, postprocess=None,
metric=None, measurer=None, iteration=-1,
tensorboard=False, fp32_baseline=False):
"""The function is for evaluation if no given eval func
Args:
input_graph : onnx model for evaluation
dataloader : dataloader for evaluation. lpot.data.dataloader.ONNXDataLoader
postprocess : post-process for evalution. lpot.data.transform.ONNXTransforms
metrics: : metrics for evaluation. lpot.metric.ONNXMetrics
measurer : lpot.objective.Measurer
iteration(int) : max iterations of evaluaton.
tensorboard(bool): whether to use tensorboard for visualizaton
fp32_baseline (boolen, optional): only for compare_label=False pipeline
Returns:
(float) evaluation results. acc, f1 e.g.
"""
session = ort.InferenceSession(input_graph.model.SerializeToString(), None)
len_outputs = len(session.get_outputs())
if metric:
if hasattr(metric, "compare_label"):
if not metric.compare_label:
results = [[] for _ in range(len_outputs)]
if not os.path.exists(os.path.join(self.work_space, "output_tensors")):
os.makedirs(os.path.join(self.work_space, "output_tensors"))
ort_inputs = {}
len_inputs = len(session.get_inputs())
inputs_names = [session.get_inputs()[i].name for i in range(len_inputs)]
for idx, batch in enumerate(dataloader):
labels = batch[1]
if measurer is not None:
for i in range(len_inputs):
# in case dataloader contains non-array input
if not isinstance(batch[i], np.ndarray):
ort_inputs.update({inputs_names[i]: np.array(batch[i])})
else:
ort_inputs.update({inputs_names[i]: batch[i]})
measurer.start()
predictions = session.run(None, ort_inputs)
measurer.end()
else:
for i in range(len_inputs):
ort_inputs.update({inputs_names[i]: batch[i]})
predictions = session.run(None, ort_inputs)
if metric:
if hasattr(metric, "compare_label"):
if not metric.compare_label:
for i in range(len_outputs):
results[i].append(predictions[i])
if postprocess is not None:
predictions, labels = postprocess((predictions, labels))
if metric is not None:
if not hasattr(metric, "compare_label"):
metric.update(predictions, labels)
elif hasattr(metric, "compare_label") and metric.compare_label:
metric.update(predictions, labels)
if idx + 1 == iteration:
break
if metric:
if hasattr(metric, "compare_label"):
if not metric.compare_label:
metric.reset()
results = [np.array(result) for result in results]
if fp32_baseline:
np.savez(os.path.join(self.work_space,"output_tensors", "fp32.npz"),
*results)
metric.update(results, results)
else:
np.savez(os.path.join(self.work_space,"output_tensors", "int8.npz"),
*results)
reference_file = np.load(os.path.join(self.work_space, "output_tensors", \
"fp32.npz"), allow_pickle=True)
reference = [reference_file[key] for key in reference_file]
metric.update(reference, results)
acc = metric.result() if metric is not None else 0
return acc
def save(self, model, path):
model.save(os.path.join(path, "best_model.onnx"))
@adaptor_registry
class ONNXRT_QLinearOpsAdaptor(ONNXRTAdaptor):
"""The ONNXRT adaptor layer, do onnx-rt quantization, calibration, inspect layer tensors.
Args:
framework_specific_info (dict): framework specific configuration for quantization.
"""
def __init__(self, framework_specific_info):
self.query_handler = ONNXRTQuery(local_config_file=os.path.join(
os.path.dirname(__file__), "onnxrt_qlinear.yaml"))
self.backend = "qlinearops"
super(ONNXRT_QLinearOpsAdaptor, self).__init__(framework_specific_info)
@adaptor_registry
class ONNXRT_IntegerOpsAdaptor(ONNXRTAdaptor):
"""The ONNXRT adaptor layer, do onnx-rt quantization, calibration, inspect layer tensors.
Args:
framework_specific_info (dict): framework specific configuration for quantization.
"""
def __init__(self, framework_specific_info):
self.query_handler = ONNXRTQuery(local_config_file=os.path.join(
os.path.dirname(__file__), "onnxrt_integer.yaml"))
self.backend = "integerops"
super(ONNXRT_IntegerOpsAdaptor, self).__init__(framework_specific_info)
class ONNXRTQuery(QueryBackendCapability):
def __init__(self, local_config_file=None):
import onnxruntime as ort
super().__init__()
self.version = ort.__version__
self.cfg = local_config_file
self.cur_config = None
self._one_shot_query()
def _one_shot_query(self):
with open(self.cfg) as f:
content = yaml.safe_load(f)
try:
self.cur_config = self._get_specified_version_cfg(content)
except Exception as e: # pragma: no cover
self.logger.info("Failed to parse {} due to {}".format(self.cfg, str(e)))
self.cur_config = None
raise ValueError("Please check the {} format.".format(self.cfg))
def _get_specified_version_cfg(self, data):
"""Get the configuration for the current runtime.
If there's no matched configuration in the input yaml, we'll
use the `default` field of yaml.
Args:
data (Yaml content): input yaml file.
Returns:
[dictionary]: the content for specific version.
"""
default_config = None
for sub_data in data:
if sub_data['version']['name'] == self.version:
return sub_data
if sub_data['version']['name'] == 'default':
default_config = sub_data
return default_config
def get_version(self):
"""Get the current backend version infomation.
Returns:
[string]: version string.
"""
return self.cur_config['version']['name']
def get_precisions(self):
"""Get supported precisions for current backend.
Returns:
[string list]: the precisions' name.
"""
return self.cur_config['precisions']['names']
def get_op_types(self):
"""Get the supported op types by all precisions.
Returns:
[dictionary list]: A list composed of dictionary which key is precision
and value is the op types.
"""
return self.cur_config['ops']
def get_fuse_patterns(self):
"""Get supported patterns by low precisions.
Returns:
[dictionary list]: A list composed of dictionary which key is precision
and value is the supported patterns.
"""
return self.cur_config['patterns']
def get_quantization_capability(self):
"""Get the supported op types' quantization capability.
Returns:
[dictionary list]: A list composed of dictionary which key is precision
and value is a dict that describes all op types' quantization capability.
"""
return self.cur_config['capabilities']
def get_op_types_by_precision(self, precision):
"""Get op types per precision
Args:
precision (string): precision name
Returns:
[string list]: A list composed of op type.
"""
assert precision in list(self.cur_config['ops'].keys())
return self.cur_config['ops'][precision]
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,643
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/components/benchmark/benchmark_model.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic benchmark script."""
import argparse
import logging as log
from typing import Any, Dict, List
try:
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
except Exception as err:
print(err)
def parse_args() -> Any:
"""Parse input arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
type=str,
required=True,
help="Path to yaml config.",
)
parser.add_argument(
"--input-graph",
type=str,
required=False,
help="Path to model.",
)
parser.add_argument(
"--mode",
type=str,
default="performance",
choices=["accuracy", "performance"],
help="Benchmark mode.",
)
parser.add_argument(
"--framework",
type=str,
required=False,
help="Framework to use.",
)
return parser.parse_args()
def benchmark_model(
input_graph: str,
config: str,
benchmark_mode: str,
framework: str,
datatype: str = "",
) -> List[Dict[str, Any]]:
"""Execute benchmark."""
from lpot import Benchmark, common
benchmark_results = []
if framework == "onnxrt":
import onnx
input_graph = onnx.load(input_graph)
evaluator = Benchmark(config)
evaluator.model = common.Model(input_graph)
results = evaluator()
for mode, result in results.items():
if benchmark_mode == mode:
log.info(f"Mode: {mode}")
acc, batch_size, result_list = result
latency = (sum(result_list) / len(result_list)) / batch_size
log.info(f"Batch size: {batch_size}")
if mode == "accuracy":
log.info(f"Accuracy: {acc:.3f}")
elif mode == "performance":
log.info(f"Latency: {latency * 1000:.3f} ms")
log.info(f"Throughput: {1. / latency:.3f} images/sec")
benchmark_results.append(
{
"precision": datatype,
"mode": mode,
"batch_size": batch_size,
"accuracy": acc,
"latency": latency * 1000,
"throughput": 1.0 / latency,
},
)
return benchmark_results
if __name__ == "__main__":
args = parse_args()
benchmark_model(
input_graph=args.input_graph,
config=args.config,
benchmark_mode=args.mode,
framework=args.framework,
)
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,644
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/utils/json_serializer.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JsonSerializer module."""
import re
from typing import Any, Dict, List, Union
class JsonSerializer:
"""Dict serializable class."""
def __init__(self) -> None:
"""Initialize json serializable class."""
# List of variable names that will
# be skipped during serialization
self._skip = ["_skip"]
def serialize(
self,
serialization_type: str = "default",
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""
Serialize class to dict.
:param serialization_type: serialization type, defaults to "default"
:type serialization_type: str, optional
:return: serialized class
:rtype: Union[dict, List[dict]]
"""
result = {}
for key, value in self.__dict__.items():
if key in self._skip:
continue
if value is None:
continue
variable_name = re.sub(r"^_", "", key)
if isinstance(value, list):
if len(value) == 0:
continue
serialized_list = []
for item in value:
if issubclass(type(item), JsonSerializer):
# pylint: disable=maybe-no-member
serialized_list.append(item.serialize(serialization_type))
result[variable_name] = serialized_list
else:
if issubclass(type(value), JsonSerializer):
# pylint: disable=maybe-no-member
result[variable_name] = value.serialize(serialization_type)
else:
result[variable_name] = self.serialize_item(value)
return result
def serialize_item(self, value: Any) -> Any:
"""
Serialize objects that don't support json dump.
i.e datetime object can't be serialized to JSON format and throw an TypeError exception
TypeError: datetime.datetime(2016, 4, 8, 11, 22, 3, 84913) is not JSON serializable
To handle that override method serialize_item to convert object
>>> serialize_item(datetime)
"2016-04-08T11:22:03.084913"
For all other cases it should return serializable object i.e. str, int float
:param value: Any type
:return: Value that can be handled by json.dump
"""
return value
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,645
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/web/router.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connector between api.py and components."""
import os
from threading import Thread
from lpot.ux.components.benchmark.execute_benchmark import execute_benchmark
from lpot.ux.components.configuration_wizard.get_boundary_nodes import (
get_boundary_nodes,
)
from lpot.ux.components.configuration_wizard.get_configuration import (
get_predefined_configuration,
)
from lpot.ux.components.configuration_wizard.params_feeder import (
get_possible_values,
get_possible_values_v2,
)
from lpot.ux.components.configuration_wizard.save_workload import save_workload
from lpot.ux.components.file_browser.file_browser import get_directory_entries
from lpot.ux.components.manage_workspace import (
get_default_path,
get_workloads_list,
set_workspace,
)
from lpot.ux.components.model_zoo.download_config import download_config
from lpot.ux.components.model_zoo.download_model import download_model
from lpot.ux.components.model_zoo.list_models import list_models
from lpot.ux.components.tune.execute_tune import execute_tuning
from lpot.ux.utils.templates.workdir import Workdir
from lpot.ux.web.communication import Request, Response, create_simple_response
from lpot.ux.web.exceptions import ServiceNotFoundException
class Router:
"""Connector between api.py and components."""
def __init__(self) -> None:
"""Initialize object."""
clean_workloads_wip_status()
def handle(self, request: Request) -> Response:
"""Run operation on requested component and return result."""
operation_map = {
"filesystem": get_directory_entries,
"save_workload": save_workload,
"configuration": get_predefined_configuration,
"tune": process_request_for_tuning,
"benchmark": process_request_for_benchmark,
"get_default_path": get_default_path,
"set_workspace": set_workspace,
"get_workloads_list": get_workloads_list,
"get_boundary_nodes": process_request_for_boundary_nodes,
"get_possible_values": get_possible_values,
"get_possible_values_v2": get_possible_values_v2,
"download_model": process_request_for_model_download,
"list_model_zoo": list_models,
"download_config": process_request_for_model_config,
}
operation = operation_map.get(request.operation)
if operation is None:
raise ServiceNotFoundException(f"Unable to find {request.operation}")
data = operation(request.data)
return create_simple_response(data)
def process_request_for_tuning(data: dict) -> dict:
"""Set thread and execute tuning."""
t = Thread(target=_execute_tuning_benchmark, args=(data,))
t.daemon = True
t.start()
return {"exit_code": 102, "message": "processing"}
def process_request_for_benchmark(data: dict) -> dict:
"""Set thread and execute tuning."""
t = Thread(target=execute_benchmark, args=(data,))
t.daemon = True
t.start()
return {"exit_code": 102, "message": "processing"}
def process_request_for_boundary_nodes(data: dict) -> dict:
"""Set thread and get boundary nodes."""
t = Thread(target=get_boundary_nodes, args=(data,))
t.daemon = True
t.start()
return {"exit_code": 102, "message": "processing"}
def process_request_for_model_download(data: dict) -> dict:
"""Set thread and download model."""
t = Thread(target=download_model, args=(data,))
t.daemon = True
t.start()
return {"exit_code": 102, "message": "processing"}
def process_request_for_model_config(data: dict) -> dict:
"""Set thread and download model."""
t = Thread(target=download_config, args=(data,))
t.daemon = True
t.start()
return {"exit_code": 102, "message": "processing"}
def _execute_tuning_benchmark(data: dict) -> None:
"""Execute both tuning and benchmark."""
tuning_data = execute_tuning(data)
benchmark_data = {
"id": data.get("id"),
"models": [
{
"precision": "fp32",
"path": tuning_data.get("execution_details", {})
.get("tuning", {})
.get("model_path"),
},
{
"precision": "int8",
"path": tuning_data.get("execution_details", {})
.get("tuning", {})
.get(
"model_output_path",
),
},
],
"workspace_path": data.get("workspace_path"),
}
if not tuning_data.get("is_custom_dataloader", None):
execute_benchmark(benchmark_data)
def clean_workloads_wip_status() -> None:
"""Clean WIP status for workloads in workloads_list.json."""
workdir = Workdir(workspace_path=os.environ["HOME"])
workdir.clean_status(status_to_clean="wip")
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,646
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/adaptor/pytorch.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from collections import OrderedDict
import yaml
from lpot.utils.utility import dump_elapsed_time
from .adaptor import adaptor_registry, Adaptor
from ..utils.utility import LazyImport, CpuInfo
from ..utils import logger
from .query import QueryBackendCapability
torch = LazyImport('torch')
ipex = LazyImport('intel_pytorch_extension')
json = LazyImport('json')
REDUCE_RANGE = False if CpuInfo().vnni else True
logger.debug("reduce range:")
logger.debug(REDUCE_RANGE)
def _cfg_to_qconfig(tune_cfg, is_insert_fakequant=False):
"""Convert tune configure to quantization config for each op.
Args:
tune_cfg (dict): dictionary of tune configure for each op
is_insert_fakequant (bool, optional): specify if the module to insert is
fake quantization module.
Returns:
op_qcfgs (dict): dictionary of quantization configure for each op
tune_cfg should be a format like below:
{
'fuse': {'int8': [['CONV2D', 'RELU', 'BN'], ['CONV2D', 'RELU']],
'fp32': [['CONV2D', 'RELU', 'BN']]},
'calib_iteration': 10,
'op': {
('op1', 'CONV2D'): {
'activation': {'dtype': 'uint8',
'algorithm': 'minmax',
'scheme':'sym',
'granularity': 'per_tensor'},
'weight': {'dtype': 'int8',
'algorithm': 'kl',
'scheme':'asym',
'granularity': 'per_channel'}
},
('op2', 'RELU): {
'activation': {'dtype': 'int8',
'scheme': 'asym',
'granularity': 'per_tensor',
'algorithm': 'minmax'}
},
('op3', 'CONV2D'): {
'activation': {'dtype': 'fp32'},
'weight': {'dtype': 'fp32'}
},
...
}
}
"""
op_qcfgs = OrderedDict()
for key in tune_cfg['op']:
value = tune_cfg['op'][key]
assert isinstance(value, dict)
assert 'activation' in value
if value['activation']['dtype'] == 'fp32':
if 'weight' in value:
assert (value['weight']['dtype'] == 'fp32')
op_qcfgs[key] = None
else:
weights_fake_quantize = None
weights_observer = None
if 'weight' in value:
weight = value['weight']
scheme = weight['scheme']
granularity = weight['granularity']
algorithm = weight['algorithm']
dtype = weight['dtype']
if is_insert_fakequant:
weights_fake_quantize = _fake_quantize(algorithm, scheme, granularity, dtype)
else:
weights_observer = _observer(algorithm, scheme, granularity, dtype)
activation = value['activation']
scheme = activation['scheme']
granularity = activation['granularity']
algorithm = activation['algorithm']
dtype = activation['dtype']
if is_insert_fakequant:
activation_fake_quantize = _fake_quantize(algorithm, scheme, granularity, dtype)
else:
activation_observer = _observer(algorithm, scheme, granularity, dtype)
if is_insert_fakequant:
qconfig = torch.quantization.QConfig(
activation=activation_fake_quantize, weight=weights_fake_quantize)
else:
qconfig = torch.quantization.QConfig(
activation=activation_observer, weight=weights_observer)
op_qcfgs[key] = qconfig
return op_qcfgs
def _observer(algorithm, scheme, granularity, dtype):
"""Construct an observer module, In forward, observer will update the statistics of
the observed Tensor. And they should provide a `calculate_qparams` function
that computes the quantization parameters given the collected statistics.
Args:
algorithm (string): What algorithm for computing the quantization parameters based on.
scheme (string): Quantization scheme to be used.
granularity (string): What granularity to computing the quantization parameters,
per channel or per tensor.
dtype (string): Quantized data type
Returns:
oberser (object)
"""
if algorithm == 'minmax':
if granularity == 'per_channel':
observer = torch.quantization.PerChannelMinMaxObserver
if scheme == 'sym':
qscheme = torch.per_channel_symmetric
else:
assert scheme == 'asym'
qscheme = torch.per_channel_affine
else:
assert granularity == 'per_tensor'
observer = torch.quantization.MinMaxObserver
if scheme == 'sym':
qscheme = torch.per_tensor_symmetric
else:
assert scheme == 'asym'
qscheme = torch.per_tensor_affine
else:
assert algorithm == 'kl'
observer = torch.quantization.HistogramObserver
assert granularity == 'per_tensor'
if scheme == 'sym':
qscheme = torch.per_tensor_symmetric
else:
assert scheme == 'asym'
qscheme = torch.per_tensor_affine
if dtype == 'int8':
dtype = torch.qint8
else:
assert dtype == 'uint8'
dtype = torch.quint8
return observer.with_args(qscheme=qscheme, dtype=dtype,
reduce_range=(REDUCE_RANGE and scheme == 'asym'))
def _fake_quantize(algorithm, scheme, granularity, dtype):
"""Construct a fake quantize module, In forward, fake quantize module will update
the statistics of the observed Tensor and fake quantize the input.
They should also provide a `calculate_qparams` function
that computes the quantization parameters given the collected statistics.
Args:
algorithm (string): What algorithm for computing the quantization parameters based on.
scheme (string): Quantization scheme to be used.
granularity (string): What granularity to computing the quantization parameters,
per channel or per tensor.
dtype (sting): Quantized data type
Return:
fake quantization (object)
"""
fake_quant = torch.quantization.FakeQuantize
if algorithm == 'minmax':
if granularity == 'per_channel':
observer = torch.quantization.MovingAveragePerChannelMinMaxObserver
if scheme == 'sym':
qscheme = torch.per_channel_symmetric
else:
assert scheme == 'asym'
qscheme = torch.per_channel_affine
else:
assert granularity == 'per_tensor'
observer = torch.quantization.MovingAverageMinMaxObserver
if scheme == 'sym':
qscheme = torch.per_tensor_symmetric
else:
assert scheme == 'asym'
qscheme = torch.per_tensor_affine
else:
assert algorithm == 'kl'
observer = torch.quantization.HistogramObserver
assert granularity == 'per_tensor'
if scheme == 'sym':
qscheme = torch.per_tensor_symmetric
else:
assert scheme == 'asym'
qscheme = torch.per_tensor_affine
if dtype == 'int8':
qmin = -128
qmax = 127
dtype = torch.qint8
else:
assert dtype == 'uint8'
qmin = 0
qmax = 255
dtype = torch.quint8
return fake_quant.with_args(observer=observer, quant_min=qmin, quant_max=qmax,
dtype=dtype, qscheme=qscheme,
reduce_range=(REDUCE_RANGE and scheme == 'asym'))
def _propagate_qconfig(model, op_qcfgs):
"""Propagate qconfig through the module hierarchy and assign `qconfig`
attribute on each leaf module
Args:
model (object): input model
op_qcfgs (dict): dictionary that maps from name or type of submodule to
quantization configuration, qconfig applies to all submodules of a
given module unless qconfig for the submodules are specified (when
the submodule already has qconfig attribute)
Return:
None, module is modified inplace with qconfig attached
"""
fallback_ops = []
WHITE_LIST = torch.quantization.default_mappings.DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST \
- torch.quantization.default_mappings._INCLUDE_QCONFIG_PROPAGATE_LIST
for k, v in op_qcfgs.items():
if v is None and k[1] != 'QuantStub' \
and k[1] != 'DeQuantStub':
fallback_ops.append(k[0])
else:
if v is None:
weights_observer = _observer('minmax', 'asym',
'per_channel', 'int8')
activation_observer = _observer('minmax', 'sym',
'per_tensor', 'uint8')
v = torch.quantization.QConfig(
activation=activation_observer, weight=weights_observer)
op_qcfg = {k[0]: v}
_propagate_qconfig_recursively(model, '', op_qcfg, white_list=WHITE_LIST)
if fallback_ops:
_fallback_quantizable_ops_recursively(model, '', fallback_ops)
def _propagate_qconfig_recursively(model, prefix, op_qcfg, white_list, qconfig_parent=None):
"""This is a helper function for `propagate_qconfig`
Args:
model (object): input model
prefix (string): prefix of op name
op_qcfg (dict): dictionary that maps from name or type of submodule to
quantization configuration
white_list (list): list of quantizable op types in pytorch
qconfig_parent (object, optional): qconfig of parent module
Returns:
None
"""
for name, child in model.named_children():
model_qconfig = qconfig_parent
op_name = prefix + name
if op_name in op_qcfg:
child.qconfig = op_qcfg[op_name]
model_qconfig = op_qcfg[op_name]
elif model_qconfig is not None and type(child) in white_list:
child.qconfig = model_qconfig
_propagate_qconfig_recursively(
child, op_name + '.', op_qcfg, white_list, model_qconfig)
def _find_quantized_op_num(model, white_list, op_count=0):
"""This is a helper function for `_fallback_quantizable_ops_recursively`
Args:
model (object): input model
white_list (list): list of quantizable op types in pytorch
op_count (int, optional): count the quantizable op quantity in this module
Returns:
the quantizable op quantity in this module
"""
quantize_op_num = op_count
for name_tmp, child_tmp in model.named_children():
if type(child_tmp) in white_list \
and not (isinstance(child_tmp, torch.quantization.QuantStub)
or isinstance(child_tmp, torch.quantization.DeQuantStub)):
quantize_op_num += 1
else:
quantize_op_num = _find_quantized_op_num(
child_tmp, white_list, quantize_op_num)
return quantize_op_num
def _fallback_quantizable_ops_recursively(model, prefix, fallback_ops):
"""Handle all fallback ops(fp32 ops)
Args:
model (object): input model
prefix (string): the prefix of op name
fallback_ops (list): list of fallback ops(fp32 ops)
Returns:
None
"""
class DequantQuantWrapper(torch.nn.Module):
"""A wrapper class that wraps the input module, adds DeQuantStub and
surround the call to module with call to dequant.
this is used by fallback layer when the data type of quantized op
is input:int8/output:int8.
This is used by the fallback utility functions to add the dequant and
quant modules, before `convert` function `QuantStub` will just be observer,
it observes the input tensor, after `convert`, `QuantStub`
will be swapped to `nnq.Quantize` which does actual quantization. Similarly
for `DeQuantStub`.
"""
def __init__(self, module, observer=None):
super(DequantQuantWrapper, self).__init__()
if not module.qconfig and observer:
weights_observer = observer('minmax', 'asym', 'per_channel', 'int8')
activation_observer = observer('minmax', 'sym', 'per_tensor', 'uint8')
module.qconfig = torch.quantization.QConfig(
activation=activation_observer, weight=weights_observer)
self.add_module('quant', torch.quantization.QuantStub(module.qconfig))
self.add_module('dequant', torch.quantization.DeQuantStub())
self.add_module('module', module)
module.qconfig = None
self.train(module.training)
def forward(self, X):
X = self.dequant(X)
X = self.module(X)
return self.quant(X)
def add(self, x, y):
# type: (Tensor, Tensor) -> Tensor
x = self.dequant(x)
y = self.dequant(y)
r = self.module.add(x, y)
return self.quant(r)
def add_scalar(self, x, y):
# type: (Tensor, float) -> Tensor
x = self.dequant(x)
r = self.module.add_scalar(x, y)
return self.quant(r)
def mul(self, x, y):
# type: (Tensor, Tensor) -> Tensor
x = self.dequant(x)
y = self.dequant(y)
r = self.module.mul(x, y)
return self.quant(r)
def mul_scalar(self, x, y):
# type: (Tensor, float) -> Tensor
x = self.dequant(x)
r = self.module.mul_scalar(x, y)
return self.quant(r)
def cat(self, x, dim=0):
# type: (List[Tensor], int) -> Tensor
X = [self.dequant(x_) for x_ in x]
r = self.module.cat(X, dim)
return self.quant(r)
def add_relu(self, x, y):
# type: (Tensor, Tensor) -> Tensor
x = self.dequant(x)
y = self.dequant(y)
r = self.module.add_relu(x, y)
return self.quant(r)
WHITE_LIST = torch.quantization.default_mappings.DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST \
- torch.quantization.default_mappings._INCLUDE_QCONFIG_PROPAGATE_LIST
for name, child in model.named_children():
op_name = prefix + name
if op_name in fallback_ops:
child.qconfig = None
quantize_op_num = _find_quantized_op_num(model, white_list=WHITE_LIST)
if quantize_op_num == 1:
found = False
for name_tmp, child_tmp in model.named_children():
if isinstance(
child_tmp, torch.quantization.QuantStub) or isinstance(
child_tmp, torch.quantization.DeQuantStub):
model._modules[name_tmp] = torch.nn.Identity()
found = True
if not found:
model._modules[name] = DequantQuantWrapper(
child, observer=_observer)
else:
model._modules[name] = DequantQuantWrapper(
child, observer=_observer)
else:
_fallback_quantizable_ops_recursively(
child, op_name + '.', fallback_ops)
@adaptor_registry
class TemplateAdaptor(Adaptor):
unify_op_type_mapping = None
"""Tample adaptor of PyTorch framework.
Args:
framework_specific_info (dict): dictionary of tuning configure from yaml file.
"""
def __init__(self, framework_specific_info):
super(TemplateAdaptor, self).__init__(framework_specific_info)
# set torch random seed
random_seed = framework_specific_info['random_seed']
torch.manual_seed(random_seed)
self.approach = framework_specific_info['approach']
self.device = framework_specific_info['device']
self.q_dataloader = framework_specific_info['q_dataloader']
self.benchmark = framework_specific_info['benchmark'] \
if 'benchmark' in framework_specific_info else False
self.is_baseline = True if not self.benchmark else False
self.query_handler = None
if framework_specific_info['approach'] == "post_training_static_quant":
self.q_mapping = torch.quantization.default_mappings.DEFAULT_MODULE_MAPPING
elif framework_specific_info['approach'] == "quant_aware_training":
self.q_mapping = torch.quantization.default_mappings.DEFAULT_QAT_MODULE_MAPPING
else:
assert False, "Unsupport quantization approach: {}".format(self.approach)
def _get_quantizable_ops_recursively(self, model, prefix, quantizable_ops):
"""This is a helper function for `query_fw_capability`,
and it will get all quantizable ops from model.
Args:
model (object): input model
prefix (string): prefix of op name
quantizable_ops (list): list of quantizable ops from model include op name and type.
Returns:
None
"""
raise NotImplementedError
@dump_elapsed_time("Pass query framework capability")
def query_fw_capability(self, model):
"""This is a helper function to get all quantizable ops from model.
Args:
model (object): input model which is LPOT model
Returns:
q_capability (dictionary): tuning capability for each op from model.
"""
quantizable_ops = []
self._get_quantizable_ops_recursively(model.model, '', quantizable_ops)
capability = self.query_handler.get_quantization_capability()['int8']
q_capability = {}
q_capability['optypewise'] = OrderedDict()
q_capability['opwise'] = OrderedDict()
for q_op in quantizable_ops:
q_capability['opwise'][q_op] = copy.deepcopy(capability[q_op[1]]) \
if q_op[1] in capability.keys() else copy.deepcopy(capability['default'])
if q_op[1] not in q_capability['optypewise'].keys():
q_capability['optypewise'][q_op[1]] = copy.deepcopy(capability[q_op[1]]) \
if q_op[1] in capability.keys() else copy.deepcopy(capability['default'])
return q_capability
@adaptor_registry
class PyTorchAdaptor(TemplateAdaptor):
unify_op_type_mapping = {
"ConvReLU2d": "Conv2d",
"ConvReLU3d": "Conv3d",
"LinearReLU": "Linear",
"ConvBn2d": "Conv2d",
"ConvBnReLU2d": "Conv2d"
}
"""Adaptor of PyTorch framework, all PyTorch API is in this class.
Args:
framework_specific_info (dict): dictionary of tuning configure from yaml file.
"""
def __init__(self, framework_specific_info):
super(PyTorchAdaptor, self).__init__(framework_specific_info)
"""
# Map for swapping float module to quantized ones,
# and this dictionary will change with different PoTorch versions
DEFAULT_MODULE_MAPPING = {
nn.Linear: nnq.Linear,
nn.ReLU: nnq.ReLU,
nn.ReLU6: nnq.ReLU6,
nn.Conv2d: nnq.Conv2d,
nn.Conv3d: nnq.Conv3d,
QuantStub: nnq.Quantize,
DeQuantStub: nnq.DeQuantize,
# Wrapper Modules:
nnq.FloatFunctional: nnq.QFunctional,
# Intrinsic modules:
nni.ConvReLU2d: nniq.ConvReLU2d,
nni.ConvReLU3d: nniq.ConvReLU3d,
nni.LinearReLU: nniq.LinearReLU,
nniqat.ConvReLU2d: nniq.ConvReLU2d,
nniqat.LinearReLU: nniq.LinearReLU,
nniqat.ConvBn2d: nnq.Conv2d,
nniqat.ConvBnReLU2d: nniq.ConvReLU2d,
# QAT modules:
nnqat.Linear: nnq.Linear,
nnqat.Conv2d: nnq.Conv2d,
}
"""
if framework_specific_info['approach'] == "post_training_static_quant":
self.q_mapping = torch.quantization.default_mappings.DEFAULT_MODULE_MAPPING
elif framework_specific_info['approach'] == "quant_aware_training":
self.q_mapping = torch.quantization.default_mappings.DEFAULT_QAT_MODULE_MAPPING
else:
assert False, "Unsupport quantization approach: {}".format(self.approach)
self.tune_cfg = None
if self.device == "cpu":
query_config_file = "pytorch_cpu.yaml"
elif self.device == "gpu":
query_config_file = "pytorch_gpu.yaml"
else:
assert False, "Unsupport this device {}".format(self.device)
self.query_handler = PyTorchQuery(local_config_file=os.path.join(
os.path.dirname(__file__), query_config_file))
self.white_list = \
torch.quantization.default_mappings.DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST \
- torch.quantization.default_mappings._INCLUDE_QCONFIG_PROPAGATE_LIST
# for tensorboard
self.dump_times = 0
self.fused_op = ['nni.ConvReLU1d',
'nni.ConvReLU2d',
'nni.ConvReLU3d',
'nni.LinearReLU',
'nni.BNReLU2d',
'nni.BNReLU3d',
'nniqat.ConvReLU2d',
'nniqat.ConvBn2d',
'nniqat.ConvBnReLU2d',
'nni.LinearReLU']
self.fused_dict = {}
if framework_specific_info['approach'] == "post_training_static_quant":
self.q_mapping = torch.quantization.default_mappings.DEFAULT_MODULE_MAPPING
elif framework_specific_info['approach'] == "quant_aware_training":
self.q_mapping = torch.quantization.default_mappings.DEFAULT_QAT_MODULE_MAPPING
else:
assert False, "Unsupport quantization approach: {}".format(self.approach)
def model_calibration(self, q_model, dataloader, iterations=1):
assert iterations > 0
with torch.no_grad():
for idx, (input, label) in enumerate(dataloader):
if isinstance(input, dict):
if self.device == "gpu":
for inp in input.keys():
input[inp] = input[inp].to("dpcpp")
output = q_model(**input)
elif isinstance(input, list) or isinstance(input, tuple):
if self.device == "gpu":
input = [inp.to("dpcpp") for inp in input]
output = q_model(*input)
else:
if self.device == "gpu":
input = input.to("dpcpp")
output = q_model(input)
if idx >= iterations - 1:
break
@dump_elapsed_time("Pass quantize model")
def quantize(self, tune_cfg, model, dataloader, q_func=None):
"""Execute the quantize process on the specified model.
Args:
tune_cfg (dict): quantization config.
model (object): model need to do quantization.
dataloader (object): calibration dataset.
q_func (objext, optional): training function for quantization aware training mode.
Returns:
(dict): quantized model
"""
assert isinstance(model.model, torch.nn.Module), \
"The model passed in is not the instance of torch.nn.Module"
q_model = copy.deepcopy(model)
if self.approach == 'quant_aware_training':
q_model.model.train()
# For tensorboard display
self.tune_cfg = tune_cfg
op_cfgs = _cfg_to_qconfig(
tune_cfg, (self.approach == 'quant_aware_training'))
_propagate_qconfig(q_model.model, op_cfgs)
# sanity check common API misusage
if not any(hasattr(m, 'qconfig') and m.qconfig for m in q_model.model.modules()):
logger.warn("None of the submodule got qconfig applied. Make sure you "
"passed correct configuration through `qconfig_dict` or "
"by assigning the `.qconfig` attribute directly on submodules")
torch.quantization.add_observer_(q_model.model)
if self.approach == 'post_training_static_quant':
iterations = tune_cfg.get('calib_iteration', 1)
self.model_calibration(q_model.model, dataloader, iterations)
elif self.approach == 'quant_aware_training':
torch.quantization.convert(q_model.model, self.q_mapping, inplace=True)
if q_func is None:
assert False, "quantization aware training mode requires q_function to train"
else:
q_func(q_model.model)
q_model.model.eval()
torch.quantization.convert(q_model.model, inplace=True)
q_model.tune_cfg = copy.deepcopy(self.tune_cfg)
if self.is_baseline:
self.is_baseline = False
return q_model
def evaluate(self, model, dataloader, postprocess=None,
metric=None, measurer=None, iteration=-1,
tensorboard=False, fp32_baseline=False):
"""Execute the evaluate process on the specified model.
Args:
model (object): model to run evaluation.
dataloader (object): evaluation dataset.
postprocess (object, optional): process function after evaluation.
metric (object, optional): metric function.
measurer (object, optional): measurer function.
iteration (int, optional): number of iterations to evaluate.
tensorboard (bool, optional): dump output tensor to tensorboard summary files.
fp32_baseline (boolen, optional): only for compare_label=False pipeline
Returns:
(dict): quantized model
"""
if tensorboard:
model = self._pre_eval_hook(model)
model_ = model.model
assert isinstance(
model_, torch.nn.Module), "The model passed in is not the instance of torch.nn.Module"
model_.eval()
if self.device == "cpu":
model_.to("cpu")
elif self.device == "gpu":
if self.is_baseline:
model_.to("dpcpp")
with torch.no_grad():
for idx, (input, label) in enumerate(dataloader):
if measurer is not None:
measurer.start()
if isinstance(input, dict):
if self.device == "gpu":
for inp in input.keys():
input[inp] = input[inp].to("dpcpp")
output = model_(**input)
elif isinstance(input, list) or isinstance(input, tuple):
if self.device == "gpu":
input = [inp.to("dpcpp") for inp in input]
output = model_(*input)
else:
if self.device == "gpu":
input = input.to("dpcpp")
output = model_(input)
if self.device == "gpu":
output = output.to("cpu")
if measurer is not None:
measurer.end()
if postprocess is not None:
output, label = postprocess((output, label))
if metric is not None:
metric.update(output, label)
if idx + 1 == iteration:
break
acc = metric.result() if metric is not None else 0
if tensorboard:
self._post_eval_hook(model, accuracy=acc)
return acc
def _get_quantizable_ops_recursively(self, model, prefix, quantizable_ops):
"""This is a helper function for `query_fw_capability`,
and it will get all quantizable ops from model.
Args:
model (object): input model
prefix (string): prefix of op name
quantizable_ops (list): list of quantizable ops from model include op name and type.
Returns:
None
"""
for name, child in model.named_children():
op_name = prefix + name
if type(child) in self.white_list:
quantizable_ops.append((
op_name, self.unify_op_type_mapping[str(child.__class__.__name__)]
if str(child.__class__.__name__) in self.unify_op_type_mapping else
str(child.__class__.__name__)))
else:
self._get_quantizable_ops_recursively(
child, op_name + '.', quantizable_ops)
def _pre_eval_hook(self, model):
"""The function is used to do some preprocession before evaluation phase.
Here, it used to add hook for dump output tensor for quantizable ops.
Args:
model (object): input model
Returns:
model (object): model with hook
"""
from abc import ABCMeta
ABC = ABCMeta(str("ABC"), (object, ),
{}) # compatible with Python 2 *and* 3:
class _RecordingObserver(ABC, torch.nn.Module):
"""The module is mainly for debug and records the tensor values during runtime.
Args:
iteration_list (list, optional): indexs of iteration which to dump tensor.
"""
def __init__(self, iteration_list=None, **kwargs):
super(_RecordingObserver, self).__init__(**kwargs)
self.output_tensors_dict = OrderedDict()
self.current_iter = 0
self.iteration_list = iteration_list
def forward(self, x):
if (self.iteration_list is None and self.current_iter == 0) or \
(self.iteration_list is not None and
self.current_iter in self.iteration_list):
self.output_tensors_dict[self.current_iter] = x.to("cpu") \
if x.device != "cpu" else x.clone()
self.current_iter += 1
return x
@torch.jit.export
def get_tensor_value(self):
return self.output_tensors_dict
def _observer_forward_hook(module, input, output):
"""Forward hook that calls observer on the output
Args:
module (object): input module
input (object): module input
output (object): module output
Returns:
module output tensor (object)
"""
return module.activation_post_process(output)
def _add_observer_(module, op_list=None, prefix=""):
"""Add observer for the leaf child of the module.
This function insert observer module to all leaf child module that
has a valid qconfig attribute.
Args:
module (object): input module with qconfig attributes for all the leaf modules that
we want to dump tensor
op_list (list, optional): list of ops which to be dumped in module
prefix (string): name of module
Returns:
None, module is modified inplace with added observer modules and forward_hooks
"""
for name, child in module.named_children():
op_name = name if prefix == "" else prefix + "." + name
if isinstance(child, torch.nn.quantized.FloatFunctional):
if hasattr(child,
'qconfig') and child.qconfig is not None and (
op_list is None or op_name in op_list):
child.activation_post_process = \
child.qconfig.activation()
else:
_add_observer_(child, op_list, op_name)
# Insert observers only for leaf nodes
if hasattr(module, 'qconfig') and module.qconfig is not None and \
len(module._modules) == 0 and not isinstance(module, torch.nn.Sequential) and \
(op_list is None or prefix in op_list):
# observer and hook will be gone after we swap the module
module.add_module(
'activation_post_process',
module.qconfig.activation())
module.register_forward_hook(_observer_forward_hook)
def is_fused_module(module):
"""This is a helper function for `_propagate_qconfig_helper` to detecte
if this module is fused.
Args:
module (object): input module
Returns:
(bool): is fused or not
"""
op_type = str(type(module))
op_type = op_type[op_type.rfind('.')+1:].strip('>').strip('\'')
op_type = 'nni.' + op_type
if op_type in self.fused_op:
return True
else:
return False
def _propagate_qconfig_helper(module,
qconfig_dict,
white_list=None,
qconfig_parent=None,
prefix='',
fused=False):
"""This is a helper function for `propagate_qconfig_`
Args:
module (object): input module
qconfig_dict (dictionary): dictionary that maps from name of submodule to
quantization configuration
white_list (list, optional): list of quantizable modules
qconfig_parent (object, optional): config of parent module, we will fallback to
this config when there is no specified config
for current module
prefix (string, optional): corresponding prefix of the current module,
used as key in qconfig_dict
fused (bool, optional): Indicates whether the module is fused or not
Return:
None, module is modified inplace with qconfig attached
"""
# TODO: Add test
if white_list is None:
white_list = \
torch.quantization.default_mappings.DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST
module_qconfig = qconfig_dict.get(type(module), qconfig_parent)
module_qconfig = qconfig_dict.get(prefix, module_qconfig)
module_qconfig = getattr(module, 'qconfig', module_qconfig)
if type(module) in white_list:
module.qconfig = module_qconfig
for name, child in module.named_children():
module_prefix = prefix + '.' + name if prefix else name
if is_fused_module(module):
if prefix in self.fused_dict:
self.fused_dict[prefix] = [self.fused_dict[prefix], module_prefix]
else:
self.fused_dict[prefix] = module_prefix
_fused = True
else:
_fused = False
_propagate_qconfig_helper(child, qconfig_dict, white_list,
module_qconfig, module_prefix, fused=_fused)
def _prepare(model, inplace=True, op_list=[], white_list=None):
"""The model will be attached with observer or fake quant modules, and qconfig
will be propagated.
Args:
model (object): input model to be modified in-place
inplace (bool, optional): carry out model transformations in-place,
the original module is mutated
op_list (list, optional): list of ops which to be dumped in module
white_list (list, optional): list of quantizable modules
Returns:
model (object): model with qconfig
"""
if not inplace:
model = copy.deepcopy(model)
_propagate_qconfig_helper(model,
qconfig_dict={},
white_list=white_list)
# sanity check common API misusage
if not any(
hasattr(m, 'qconfig') and m.qconfig
for m in model.modules()):
logger.warn(
"None of the submodule got qconfig applied. Make sure you "
"passed correct configuration through `qconfig_dict` or "
"by assigning the `.qconfig` attribute directly on submodules"
)
_add_observer_(model, op_list=op_list)
return model
# create properties
white_list = self.white_list | \
(set(torch.quantization.default_mappings.DEFAULT_MODULE_MAPPING.values()) |
set(torch.quantization.default_mappings.DEFAULT_QAT_MODULE_MAPPING.values()) |
set(torch.quantization.default_mappings.DEFAULT_DYNAMIC_MODULE_MAPPING.values()))
model = copy.deepcopy(model) if self.is_baseline else model
model.model.qconfig = torch.quantization.QConfig(
weight=torch.quantization.default_weight_observer,
activation=_RecordingObserver)
_prepare(model.model, op_list=None, white_list=white_list)
return model
def is_fused_child(self, op_name):
"""This is a helper function for `_post_eval_hook`
Args:
op_name (string): op name
Returns:
(bool): if this op is fused
"""
op = op_name[:op_name.rfind('.')]
if op in self.fused_dict and op_name[op_name.rfind('.')+1:].isdigit():
return True
else:
return False
def is_fused_op(self, op_name):
"""This is a helper function for `_post_eval_hook`
Args:
op_name (string): op name
Returns:
(bool): if this op is fused
"""
op = op_name[:op_name.rfind('.')]
if op in self.fused_dict:
return True
else:
return False
def is_last_fused_child(self, op_name):
"""This is a helper function for `_post_eval_hook`
Args:
op_name (string): op name
Returns:
(bool): if this op is last fused op
"""
op = op_name[:op_name.rfind('.')]
if op_name in self.fused_dict[op][-1]:
return True
else:
return False
def _post_eval_hook(self, model, **args):
"""The function is used to do some post process after complete evaluation.
Here, it used to dump quantizable op's output tensor.
Args:
model (object): input model
Returns:
None
"""
from torch.utils.tensorboard import SummaryWriter
from torch.quantization import get_observer_dict
model = model.model
if args is not None and 'accuracy' in args:
accuracy = args['accuracy']
else:
accuracy = ''
if self.dump_times == 0:
writer = SummaryWriter('runs/eval/baseline' +
'_acc' + str(accuracy), model)
else:
writer = SummaryWriter('runs/eval/tune_' +
str(self.dump_times) +
'_acc' + str(accuracy), model)
if self.dump_times == 0:
for (input, _) in self.q_dataloader:
if isinstance(input, dict):
if self.device == "gpu":
for inp in input.keys():
input[inp] = input[inp].to("dpcpp")
elif isinstance(input, list) or isinstance(input, tuple):
if self.device == "gpu":
input = [inp.to("dpcpp") for inp in input]
else:
if self.device == "gpu":
input = input.to("dpcpp")
writer.add_graph(model, input)
break
summary = OrderedDict()
observer_dict = {}
get_observer_dict(model, observer_dict)
for key in observer_dict:
if isinstance(observer_dict[key],
torch.nn.modules.linear.Identity):
continue
op_name = key.strip(".activation_post_process")
summary[op_name + ".output"] = observer_dict[key].get_tensor_value()
for iter in summary[op_name + ".output"]:
# Only collect last fused child output
op = op_name
if self.is_fused_child(op_name) == True and \
self.is_last_fused_child(op_name) == True:
op = op_name[:op_name.rfind('.')]
else:
if self.is_fused_child(op_name) == True and \
self.is_last_fused_child(op_name) == False:
continue
else:
op = op_name
if summary[op_name + ".output"][iter].is_quantized:
writer.add_histogram(
op + "/Output/int8",
torch.dequantize(summary[op_name +
".output"][iter]))
else:
writer.add_histogram(
op + "/Output/fp32",
summary[op_name + ".output"][iter])
state_dict = model.state_dict()
for key in state_dict:
if not isinstance(state_dict[key], torch.Tensor):
continue
op = key[:key.rfind('.')]
if self.is_fused_child(op) == True:
# fused child tensorboard tag will be merge
weight = key[key.rfind('.')+1:]
op = op[:op.rfind('.')] + '/' + weight
else:
weight = key[key.rfind('.')+1:]
op = key[:key.rfind('.')] + '/' + weight
# To merge ._packed_params
op = op.replace('._packed_params', '')
if state_dict[key].is_quantized:
writer.add_histogram(op + "/int8",
torch.dequantize(state_dict[key]))
else:
writer.add_histogram(op + "/fp32", state_dict[key])
writer.close()
self.dump_times = self.dump_times + 1
return summary
@dump_elapsed_time("Pass save quantized model")
def save(self, model, path=None):
pass
@adaptor_registry
class PyTorch_IPEXAdaptor(TemplateAdaptor): # pragma: no cover
unify_op_type_mapping = {
"Convolution_Relu": "Convolution",
"Convolution_Sum_Relu": "Convolution",
"Convolution_BatchNorm": "Convolution",
"Linear_Relu": "Linear"
}
"""Adaptor of PyTorch framework with Intel PyTorch Extension,
all PyTorch IPEX API is in this class.
Args:
framework_specific_info (dict): dictionary of tuning configure from yaml file.
"""
def __init__(self, framework_specific_info):
super(PyTorch_IPEXAdaptor, self).__init__(framework_specific_info)
self.workspace_path = framework_specific_info['workspace_path']
query_config_file = "pytorch_ipex.yaml"
self.query_handler = PyTorchQuery(local_config_file=os.path.join(
os.path.dirname(__file__), query_config_file))
self.cfgs = None
self.ipex_config_path = \
os.path.join(self.workspace_path, 'ipex_config_tmp.json')
if os.path.exists(self.ipex_config_path):
os.remove(self.ipex_config_path)
def model_calibration(self, q_model, dataloader, iterations=1, conf=None):
assert iterations > 0
with torch.no_grad():
for idx, (input, label) in enumerate(dataloader):
if isinstance(input, dict):
for inp in input.keys():
input[inp] = input[inp].to(ipex.DEVICE)
with ipex.AutoMixPrecision(conf, running_mode='calibration'):
output = q_model(**input)
elif isinstance(input, list) or isinstance(input, tuple):
input = [inp.to(ipex.DEVICE) for inp in input]
with ipex.AutoMixPrecision(conf, running_mode='calibration'):
output = q_model(*input)
else:
input = input.to(ipex.DEVICE) # pylint: disable=no-member
with ipex.AutoMixPrecision(conf, running_mode='calibration'):
output = q_model(input)
if idx >= iterations - 1:
break
@dump_elapsed_time("Pass quantize model")
def quantize(self, tune_cfg, model, dataloader, q_func=None):
"""Execute the quantize process on the specified model.
Args:
tune_cfg (dict): quantization config.
model (object): model need to do quantization, it is LPOT model.
dataloader (object): calibration dataset.
q_func (objext, optional): training function for quantization aware training mode.
Returns:
(dict): quantized model
"""
model_ = copy.deepcopy(model)
try:
q_model = torch.jit.script(model_.model.eval().to(ipex.DEVICE))
except:
try:
for input, _ in dataloader:
q_model = torch.jit.trace(model_.model.eval().to(ipex.DEVICE),
input.to(ipex.DEVICE)).to(ipex.DEVICE)
break
except:
logger.info("This model can't convert to Script model")
q_model = model_.model.eval().to(ipex.DEVICE)
self._cfg_to_qconfig(tune_cfg)
if self.approach == 'post_training_static_quant':
iterations = tune_cfg.get('calib_iteration', 1)
ipex_conf = ipex.AmpConf(torch.int8, configure_file=self.ipex_config_path)
self.model_calibration(q_model, dataloader, iterations, conf=ipex_conf)
ipex_conf.save(self.ipex_config_path)
assert self.approach != 'quant_aware_training', "Intel PyTorch Extension didn't support \
quantization aware training mode"
model_.model = q_model
model_.tune_cfg = copy.deepcopy(self.cfgs)
if self.is_baseline:
self.is_baseline = False
return model_
def _cfg_to_qconfig(self, tune_cfg):
"""Convert tune configure to quantization config for each op.
Args:
tune_cfg (dict): dictionary of tune configure for each op
ipex_config_path: configure file of Intel PyTorch Extension
tune_cfg should be a format like below:
{
'calib_iteration': 10,
'op': {
('op1', 'CONV2D'): {
'activation': {'dtype': 'uint8',
'algorithm': 'minmax',
'scheme':'sym',
'granularity': 'per_tensor'},
'weight': {'dtype': 'int8',
'algorithm': 'kl',
'scheme':'asym',
'granularity': 'per_channel'}
},
('op2', 'RELU): {
'activation': {'dtype': 'int8',
'scheme': 'asym',
'granularity': 'per_tensor',
'algorithm': 'minmax'}
},
('op3', 'CONV2D'): {
'activation': {'dtype': 'fp32'},
'weight': {'dtype': 'fp32'}
},
...
}
}
"""
assert self.cfgs is not None, "No configure for IPEX int8 model..."
for key in tune_cfg['op']:
value = tune_cfg['op'][key]
assert isinstance(value, dict)
assert 'activation' in value
if value['activation']['dtype'] == 'fp32':
if 'weight' in value:
assert value['weight']['dtype'] == 'fp32'
for op_cfg in self.cfgs:
if op_cfg["id"] == key[0]:
op_cfg["quantized"] = False
else:
for op_cfg in self.cfgs:
if op_cfg["id"] == key[0]:
op_cfg["quantized"] = True
with open(self.ipex_config_path, 'w') as write_f:
json.dump(self.cfgs, write_f)
def evaluate(self, model, dataloader, postprocess=None,
metric=None, measurer=None, iteration=-1,
tensorboard=False, fp32_baseline=False):
"""Execute the evaluate process on the specified model.
Args:
model (object): LPOT model to run evaluation.
dataloader (object): evaluation dataset.
postprocess (object, optional): process function after evaluation.
metric (object, optional): metric function.
measurer (object, optional): measurer function.
iteration (int, optional): number of iterations to evaluate.
tensorboard (bool, optional): dump output tensor to tensorboard summary
files(IPEX unspport).
fp32_baseline (boolen, optional): only for compare_label=False pipeline
Returns:
(dict): quantized model
"""
assert not tensorboard, "Intel PyTorch Extension didn't tensor dump"
model_ = model.model
model_.eval()
if self.is_baseline:
model_.to(ipex.DEVICE)
ipex_config = self.ipex_config_path if not self.benchmark else \
os.path.join(self.workspace_path, 'best_configure.json')
conf = ipex.AmpConf(torch.int8, configure_file=ipex_config) \
if not self.is_baseline else ipex.AmpConf(None)
with torch.no_grad():
for idx, (input, label) in enumerate(dataloader):
if measurer is not None:
measurer.start()
if isinstance(input, dict):
for inp in input.keys():
input[inp] = input[inp].to(ipex.DEVICE)
with ipex.AutoMixPrecision(conf, running_mode='inference'):
output = model_(**input)
elif isinstance(input, list) or isinstance(input, tuple):
input = [inp.to(ipex.DEVICE) for inp in input]
with ipex.AutoMixPrecision(conf, running_mode='inference'):
output = model_(*input)
else:
input = input.to(ipex.DEVICE) # pylint: disable=no-member
with ipex.AutoMixPrecision(conf, running_mode='inference'):
output = model_(input)
label = label.to(ipex.DEVICE)
if measurer is not None:
measurer.end()
if postprocess is not None:
output, label = postprocess((output, label))
if metric is not None:
metric.update(output, label)
if idx + 1 == iteration:
break
acc = metric.result() if metric is not None else 0
return acc
def _get_quantizable_ops_recursively(self, model, prefix, quantizable_ops):
"""This is a helper function for `query_fw_capability`,
and it will get all quantizable ops from model.
Args:
model (object): input model
prefix (string): prefix of op name
quantizable_ops (list): list of quantizable ops from model include op name and type.
Returns:
None
"""
if not os.path.exists(self.ipex_config_path):
assert isinstance(model, torch.nn.Module), \
"The model passed in is not the instance of torch.nn.Module"
model_ = copy.deepcopy(model)
model_.eval().to(ipex.DEVICE)
try:
init_model = torch.jit.script(model_)
except:
try:
for input, _ in self.q_dataloader:
init_model = torch.jit.trace(model_, input.to(ipex.DEVICE))
break
except:
logger.info("This model can't convert to Script model")
init_model = model_
# create a quantization config file for intel pytorch extension model
os.makedirs(os.path.dirname(self.ipex_config_path), exist_ok=True)
ipex_conf = ipex.AmpConf(torch.int8)
self.model_calibration(init_model, self.q_dataloader, conf=ipex_conf)
ipex_conf.save(self.ipex_config_path)
with open(self.ipex_config_path, 'r') as f:
self.cfgs = json.load(f)
for op_cfg in self.cfgs:
quantizable_ops.append((op_cfg["id"],
self.unify_op_type_mapping[op_cfg["name"]]
if op_cfg["name"] in self.unify_op_type_mapping else
op_cfg["name"]))
os.remove(self.ipex_config_path)
@dump_elapsed_time("Pass save quantized model")
def save(self, model, path=None):
"""The function is used by tune strategy class for set best configure in LPOT model.
Args:
model (object): The LPOT model which is best results.
path (string): No used.
Returns:
None
"""
pass
class PyTorchQuery(QueryBackendCapability):
def __init__(self, local_config_file=None):
import torch
super().__init__()
self.version = torch.__version__.split('+')[0]
self.cfg = local_config_file
self.cur_config = None
self._one_shot_query()
def _get_specified_version_cfg(self, data):
"""Get the configuration for the current runtime。
If there's no matched configuration in the input yaml, we'll
use the `default` field of yaml.
Args:
data (Yaml content): input yaml file.
Returns:
[dictionary]: the content for specific version.
"""
# default_config = None
position = self.version.rfind('.')
version = float(self.version[:position])
for sub_data in data:
if sub_data['version']['name'] == 'default':
return sub_data
if version >= float(sub_data['version']['name']):
return sub_data
def _one_shot_query(self):
with open(self.cfg) as f:
content = yaml.safe_load(f)
try:
self.cur_config = self._get_specified_version_cfg(content)
except Exception as e:
self.logger.info("Failed to parse {} due to {}".format(self.cfg, str(e)))
self.cur_config = None
raise ValueError("Please check the {} format.".format(self.cfg))
def get_quantization_capability(self):
"""Get the supported op types' quantization capability.
Returns:
[dictionary list]: A list composed of dictionary which key is precision
and value is a dict that describes all op types' quantization capability.
"""
return self.cur_config['capabilities']
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,647
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/utils/parser.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for tuning."""
import re
from typing import Any, Dict
from lpot.ux.utils.logger import log
from lpot.ux.utils.templates.metric import Metric
class Parser:
"""Parser class is responsible for parsing log files."""
def __init__(self, logs: list) -> None:
"""Initialize object."""
self._logs = logs
self.metric = Metric()
def process(self) -> Dict[str, Any]:
"""Process files."""
for log_file in self._logs:
log.debug(f"Read from {log_file}")
with open(log_file) as f:
for line in f:
for key in self.patterns:
prog = re.compile(self.patterns[key])
match = prog.search(line)
if match:
self.metric.insert_data(key, match.group(1))
parsed_data: Dict[str, Any] = self.metric.serialize() # type: ignore
return parsed_data
@property
def patterns(self) -> dict:
"""Set patterns to get metrics from lines."""
return {
"acc_fp32": r".*FP32 baseline is: \[(\d+.\d+),",
"acc_int8": r".*Best tune result is: \[(\d+.\d+),",
"perf_latency_fp32": r"Latency:\s+(\d+(\.\d+)?)",
"perf_latency_int8": r"Latency:\s+(\d+(\.\d+)?)",
"perf_throughput_fp32": r"Throughput:\s+(\d+(\.\d+)?)",
"perf_throughput_int8": r"Throughput:\s+(\d+(\.\d+)?)",
}
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,648
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/utils/workload/workloads_list.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workloads list class."""
import logging
import os
import re
from typing import Any, Dict, List, Optional, Union
from lpot.ux.utils.json_serializer import JsonSerializer
from lpot.ux.utils.templates.metric import Metric
from lpot.ux.utils.utils import get_size
logging.basicConfig(level=logging.INFO)
class WorkloadInfo(JsonSerializer):
"""Create template for workload_list entity."""
def __init__(
self,
request_id: Optional[str],
workload_path: Optional[str],
model_path: Optional[str],
model_output_path: Optional[str],
metric: Optional[Union[Metric, dict]],
status: Optional[str],
code_template_path: Optional[str],
execution_details: Optional[Dict[str, dict]] = None,
) -> None:
"""Initialize configuration WorkloadInfo class."""
super().__init__()
self._id = request_id
self._model_path = model_path
self._model_output_path = model_output_path
self._workload_path = workload_path
self._status = status
self._metric = metric
self._code_template_path = code_template_path
self._config_path: Optional[str] = None
self._log_path: Optional[str] = None
self._execution_details = execution_details
if self._workload_path:
self._config_path = os.path.join(
self._workload_path,
"config.yaml",
)
self._log_path = os.path.join(self._workload_path, "output.txt")
if not os.path.isfile(self._log_path):
os.makedirs(os.path.dirname(self._log_path), exist_ok=True)
with open(self._log_path, "w") as log_file:
log_file.write("Configuration created.\n")
if self._model_path and self._metric:
if isinstance(self._metric, dict) and not self._metric.get("size_fp32"):
self._metric["size_fp32"] = get_size(self._model_path)
if isinstance(self._metric, Metric) and not self._metric.size_fp32:
self._metric.insert_data("size_fp32", str(get_size(self._model_path)))
def insert_data(self, data: dict) -> None:
"""
Set all available properties from workload_info dict.
param: data
type: dict
"""
for key, value in data:
attribute = "_" + key
if attribute in self.__dict__:
self.__setattr__(attribute, data[key])
def serialize(
self,
serialization_type: str = "default",
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""
Serialize class to dict.
:param serialization_type: serialization type, defaults to "default"
:type serialization_type: str, optional
:return: serialized class
:rtype: Union[dict, List[dict]]
"""
result = {}
for key, value in self.__dict__.items():
variable_name = re.sub(r"^_", "", key)
if key in self._skip:
continue
elif issubclass(type(value), JsonSerializer):
# pylint: disable=maybe-no-member
result[variable_name] = value.serialize(serialization_type)
else:
result[variable_name] = self.serialize_item(value)
if result.get("metric", None):
result.update(result["metric"])
return result
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,649
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/components/tune/execute_tune.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Execute tune."""
import json
import os
from typing import Any, Dict
from lpot.ux.components.tune.tuning import Tuning
from lpot.ux.utils.exceptions import ClientErrorException
from lpot.ux.utils.executor import Executor
from lpot.ux.utils.logger import log
from lpot.ux.utils.parser import Parser
from lpot.ux.utils.templates.workdir import Workdir
from lpot.ux.utils.utils import get_size, load_json
from lpot.ux.web.communication import MessageQueue
mq = MessageQueue()
def execute_tuning(data: Dict[str, Any]) -> dict:
"""Get configuration."""
from lpot.ux.utils.workload.workload import Workload
if not str(data.get("id", "")):
message = "Missing request id."
mq.post_error(
"tuning_finish",
{"message": message, "code": 404},
)
raise Exception(message)
request_id: str = data["id"]
workdir = Workdir(request_id=request_id)
workload_path: str = workdir.workload_path
try:
workload_data = load_json(
os.path.join(workload_path, "workload.json"),
)
except Exception as err:
mq.post_error(
"tuning_finish",
{"message": repr(err), "code": 404, "id": request_id},
)
raise err
workload = Workload(workload_data)
tuning: Tuning = Tuning(workload, workdir.workload_path, workdir.template_path)
send_data = {
"message": "started",
"id": request_id,
"size_fp32": get_size(tuning.model_path),
}
workdir.clean_logs()
workdir.update_data(
request_id=request_id,
model_path=tuning.model_path,
model_output_path=tuning.model_output_path,
status="wip",
)
executor = Executor(
workspace_path=workload_path,
subject="tuning",
data=send_data,
log_name="output",
)
proc = executor.call(
tuning.command,
)
tuning_time = executor.process_duration
if tuning_time:
tuning_time = round(tuning_time, 2)
log.debug(f"Elapsed time: {tuning_time}")
logs = [os.path.join(workload_path, "output.txt")]
parser = Parser(logs)
if proc.is_ok:
response_data = parser.process()
if isinstance(response_data, dict):
response_data["id"] = request_id
response_data["tuning_time"] = tuning_time
response_data["size_int8"] = get_size(tuning.model_output_path)
response_data["model_output_path"] = tuning.model_output_path
response_data["size_fp32"] = get_size(tuning.model_path)
response_data["is_custom_dataloader"] = bool(workdir.template_path)
workdir.update_data(
request_id=request_id,
model_path=tuning.model_path,
model_output_path=tuning.model_output_path,
metric=response_data,
status="success",
execution_details={"tuning": tuning.serialize()},
)
response_data["execution_details"] = {"tuning": tuning.serialize()}
log.debug(f"Parsed data is {json.dumps(response_data)}")
mq.post_success("tuning_finish", response_data)
return response_data
else:
log.debug("FAIL")
workdir.update_data(
request_id=request_id,
model_path=tuning.model_path,
status="error",
)
mq.post_failure("tuning_finish", {"message": "failed", "id": request_id})
raise ClientErrorException("Tuning failed during execution.")
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,650
|
SnehalA/lpot
|
refs/heads/master
|
/examples/tensorflow/nlp/bert/tune_squad.py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
import tensorflow as tf
import numpy as np
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"input_model", None,
"Run inference with specified pb graph.")
flags.DEFINE_string(
"output_model", None,
"The output model of the quantized model.")
flags.DEFINE_string(
"mode", 'tune',
"One of three options: 'benchmark'/'tune'/'accuracy'.")
flags.DEFINE_string(
"config", 'bert.yaml', "yaml configuration of the model")
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
if FLAGS.mode == 'benchmark':
from lpot import Benchmark
evaluator = Benchmark(FLAGS.config)
evaluator.model = FLAGS.input_model
results = evaluator()
for mode, result in results.items():
acc, batch_size, result_list = result
latency = np.array(result_list).mean() / batch_size
print('\n{} mode benchmark result:'.format(mode))
print('Accuracy is {:.3f}'.format(acc))
print('Batch size = {}'.format(batch_size))
print('Latency: {:.3f} ms'.format(latency * 1000))
print('Throughput: {:.3f} images/sec'.format(1./ latency))
elif FLAGS.mode == 'tune':
from lpot.quantization import Quantization
quantizer = Quantization(FLAGS.config)
quantizer.model = FLAGS.input_model
q_model = quantizer()
q_model.save(FLAGS.output_model)
if __name__ == "__main__":
tf.compat.v1.app.run()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,651
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/utils/templates/metric.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tuning class."""
from typing import Optional
from lpot.ux.utils.json_serializer import JsonSerializer
DIGITS = 4
class Metric(JsonSerializer):
"""Metric represents data which is sent from Tuning and Benchmark."""
# TODO: Split into Accuracy, Performance if necessary for Benchmark
def __init__(self) -> None:
"""Initialize configuration Dataset class."""
super().__init__()
self._acc_fp32: Optional[float] = None
self._acc_int8: Optional[float] = None
self._perf_latency_fp32: Optional[float] = None
self._perf_latency_int8: Optional[float] = None
self._perf_throughput_int8: Optional[float] = None
self._perf_throughput_fp32: Optional[float] = None
self._tuning_time: Optional[float] = None
self._size_fp32: Optional[float] = None
self._size_int8: Optional[float] = None
@property
def acc_fp32(self) -> Optional[float]:
"""Accuracy for float32."""
return self._acc_fp32
@acc_fp32.setter
def acc_fp32(self, value: str) -> None:
"""Set accuracy fp32 from value."""
float_value = float(value)
if float_value > 1:
float_value /= 100
self._acc_fp32 = round(float_value, DIGITS)
@property
def acc_int8(self) -> Optional[float]:
"""Accuracy for int8."""
return self._acc_int8
@acc_int8.setter
def acc_int8(self, value: str) -> None:
"""Set accuracy int8 from value."""
float_value = float(value)
if float_value > 1:
float_value /= 100
self._acc_int8 = round(float_value, DIGITS)
@property
def perf_latency_fp32(self) -> Optional[float]:
"""Latency for fp32."""
return self._perf_latency_fp32
@perf_latency_fp32.setter
def perf_latency_fp32(self, value: str) -> None:
"""Set latency for fp32."""
self._perf_latency_fp32 = round(float(value), DIGITS)
if not self.perf_throughput_fp32:
self.perf_throughput_fp32 = self.calculate_throughput(
self._perf_latency_fp32,
)
@property
def perf_latency_int8(self) -> Optional[float]:
"""Latency for int8."""
return self._perf_latency_int8
@perf_latency_int8.setter
def perf_latency_int8(self, value: str) -> None:
"""Set latency for int8."""
self._perf_latency_int8 = round(float(value), DIGITS)
if not self.perf_throughput_int8:
self.perf_throughput_int8 = self.calculate_throughput(
self._perf_latency_int8,
)
@property
def perf_throughput_int8(self) -> Optional[float]:
"""Throughput for int8 model."""
return self._perf_throughput_int8
@perf_throughput_int8.setter
def perf_throughput_int8(self, value: str) -> None:
"""Set throughput from value for int8 model."""
self._perf_throughput_int8 = round(float(value), DIGITS)
@property
def perf_throughput_fp32(self) -> Optional[float]:
"""Throughput for fp32 model."""
return self._perf_throughput_fp32
@perf_throughput_fp32.setter
def perf_throughput_fp32(self, value: str) -> None:
"""Set throughput from value for fp32 model."""
self._perf_throughput_fp32 = round(float(value), DIGITS)
def insert_data(self, attribute: str, value: str) -> None:
"""Set attribute value."""
self.__setattr__(attribute, value)
@staticmethod
def calculate_throughput(value: float) -> float:
"""
Calculate throughput based on latency.
Right now 1000 represents number of images in dataset.
TODO: change 1000 to the batch size when Benchmark is ready
"""
return 1000 / value
@property
def size_fp32(self) -> Optional[float]:
"""Model size for float32."""
return self._size_fp32
@size_fp32.setter
def size_fp32(self, value: str) -> None:
"""Set model size fp32 from value."""
self._size_fp32 = float(value)
@property
def size_int8(self) -> Optional[float]:
"""Model size for int8."""
return self._size_int8
@size_int8.setter
def size_int8(self, value: str) -> None:
"""Set model size int8 from value."""
self._size_int8 = float(value)
@property
def tuning_time(self) -> Optional[float]:
"""Tuning time."""
return self.tuning_time
@tuning_time.setter
def tuning_time(self, value: str) -> None:
"""Set tuning_time value."""
self.tuning_time = float(value)
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,652
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/data/transforms/imagenet_transform.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from lpot.utils.utility import LazyImport
from .transform import transform_registry, BaseTransform
tf = LazyImport('tensorflow')
cv2 = LazyImport('cv2')
@transform_registry(transform_type="ParseDecodeImagenet", \
process="preprocess", framework="tensorflow")
class ParseDecodeImagenetTransform(BaseTransform):
def __call__(self, sample):
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/class/label': tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1)}
sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.io.parse_single_example(serialized=sample, features=feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
image = features['image/encoded']
image = tf.image.decode_jpeg(
image, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST')
return (image, label)
@transform_registry(transform_type="ResizeCropImagenet", \
process="preprocess", framework="tensorflow")
class TensorflowResizeCropImagenetTransform(BaseTransform):
def __init__(self, height, width, random_crop=False, resize_side=256, \
random_flip_left_right=False, mean_value=[0.0,0.0,0.0], scale=1.0):
self.height = height
self.width = width
self.mean_value = mean_value
self.scale = scale
self.random_crop = random_crop
self.random_flip_left_right = random_flip_left_right
self.resize_side = resize_side
# sample is (images, labels)
def __call__(self, sample):
image, label = sample
shape = tf.shape(input=image)
height = tf.cast(shape[0], dtype=tf.float32)
width = tf.cast(shape[1], dtype=tf.float32)
scale = tf.cond(pred=tf.greater(height, width), \
true_fn=lambda: self.resize_side / width,
false_fn=lambda: self.resize_side / height,)
scale = tf.cast(scale, dtype=tf.float32)
new_height = tf.cast(tf.math.rint(height*scale), dtype=tf.int32)
new_width = tf.cast(tf.math.rint(width*scale), dtype=tf.int32)
image = tf.expand_dims(image, 0)
image = tf.image.resize(image, [new_height, new_width],
method=tf.image.ResizeMethod.BILINEAR)
image = tf.squeeze(image)
# image = tf.cond(pred=tf.greater(shape[0], shape[1]), \
# false_fn=lambda: tf.image.resize(image, \
# tf.convert_to_tensor(value=[self.resize_side*shape[0]/shape[1], \
# self.resize_side], dtype=tf.int32)),
# true_fn=lambda: tf.image.resize(image, \
# tf.convert_to_tensor(value=[self.resize_side, \
# self.resize_side * shape[1] / shape[0]], dtype=tf.int32)),
# )
shape = tf.shape(input=image)
if self.random_crop:
y0 = np.random.uniform(low=0, high=(shape[0] - self.height +1))
x0 = np.random.uniform(low=0, high=(shape[1] - self.width +1))
else:
y0 = (shape[0] - self.height) // 2
x0 = (shape[1] - self.width) // 2
image = tf.image.crop_to_bounding_box(image, y0, x0, self.height, self.width)
image.set_shape([self.height, self.width, 3])
if self.random_flip_left_right:
image = tf.image.random_flip_left_right(image)
means = tf.broadcast_to(self.mean_value, tf.shape(input=image))
image = (image - means) * self.scale
return (image, label)
@transform_registry(transform_type="QuantizedInput", \
process="preprocess", framework="tensorflow")
class QuantizedInput(BaseTransform):
def __init__(self, dtype, scale=None):
self.dtype_map = {'uint8': tf.uint8, 'int8': tf.int8}
assert dtype in self.dtype_map.keys(), \
'only support cast dtype {}'.format(self.dtype_map.keys())
self.dtype = dtype
self.scale = scale
def __call__(self, sample):
# scale is not know when tuning, in this case this transform
# do nothing, it's only used when scale is set
if self.scale == None:
return sample
image, label = sample
image = image * self.scale
if self.dtype == 'uint8':
image = image + 128
image = tf.dtypes.cast(image, dtype=self.dtype_map[self.dtype])
return image, label
@transform_registry(transform_type="LabelShift", \
process="postprocess", framework="tensorflow")
class LabelShift(BaseTransform):
def __init__(self, label_shift=0):
self.label_shift = label_shift
def __call__(self, sample):
images, labels = sample
labels = np.array(labels) - self.label_shift
return images, labels
@transform_registry(transform_type="BilinearImagenet", \
process="preprocess", framework="tensorflow")
class BilinearImagenetTransform(BaseTransform):
def __init__(self, height, width, central_fraction=0.875,
mean_value=[0.0,0.0,0.0], scale=1.0):
self.height = height
self.width = width
self.mean_value = mean_value
self.scale = scale
self.central_fraction = central_fraction
# sample is (images, labels)
def __call__(self, sample):
image, label = sample
if image.dtype is not tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image containing 87.5% area of the original image.
if self.central_fraction:
image = tf.image.central_crop(image, central_fraction=self.central_fraction)
if self.height and self.width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize(image, [self.height, self.width], \
method=tf.image.ResizeMethod.BILINEAR)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
means = tf.broadcast_to(self.mean_value, tf.shape(input=image))
image = (image - means) * self.scale
return (image, label)
@transform_registry(transform_type="ResizeCropImagenet", \
process="preprocess", framework="onnxrt_qlinearops, onnxrt_integerops")
class ONNXResizeCropImagenetTransform(BaseTransform):
def __init__(self, height, width, random_crop=False, resize_side=256, \
mean_value=[0.0,0.0,0.0], std_value=[0.229, 0.224, 0.225]):
self.height = height
self.width = width
self.mean_value = mean_value
self.std_value = std_value
self.random_crop = random_crop
self.resize_side = resize_side
# sample is (images, labels)
def __call__(self, sample):
image, label = sample
height, width = image.shape[0], image.shape[1]
scale = self.resize_side / width if height > width else self.resize_side / height
new_height = int(height*scale)
new_width = int(width*scale)
image = cv2.resize(image, (new_height, new_width))
image = image / 255.
shape = image.shape
if self.random_crop:
y0 = np.random.uniform(low=0, high=(shape[0] - self.height +1))
x0 = np.random.uniform(low=0, high=(shape[1] - self.width +1))
else:
y0 = (shape[0] - self.height) // 2
x0 = (shape[1] - self.width) // 2
if len(image.shape) == 2:
image = np.array([image])
image = np.repeat(image, 3, axis=0)
image = image.transpose(1, 2, 0)
image = image[y0:y0+self.height, x0:x0+self.width, :]
image = ((image - self.mean_value)/self.std_value).astype(np.float32)
return (image.transpose(2, 0, 1), label)
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,653
|
SnehalA/lpot
|
refs/heads/master
|
/test/test_onnx_model.py
|
import sys
import onnx
from onnx import helper, TensorProto, numpy_helper
import unittest
import numpy as np
sys.path.append('..')
from lpot.adaptor.ox_utils.onnx_model import ONNXModel
from lpot.adaptor.ox_utils.onnx_calibrate import CalibrationDataReader
def get_onnx_model():
model = torchvision.models.resnet18()
x = Variable(torch.randn(1, 3, 224, 224))
torch_out = torch.onnx.export(model, x, "resnet18.onnx", export_params=True, verbose=True)
def generate_input_initializer(tensor_shape, tensor_dtype, input_name):
'''
Helper function to generate initializers for test inputs
'''
tensor = np.random.ranf(tensor_shape).astype(tensor_dtype)
init = numpy_helper.from_array(tensor, input_name)
return init
class TestDataReader(CalibrationDataReader):
'''for test purpose'''
def __init__(self):
pass
def get_next(self):
return None
class TestDataReaderSecond(CalibrationDataReader):
'''for test purpose'''
def __init__(self):
self.preprocess_flag = True
self.enum_data_dicts = []
def get_next(self):
if self.preprocess_flag:
self.preprocess_flag = False
nhwc_data_list = []
nhwc_data_list.append(np.array([[[[0.45,0.60,0.75]],
[[0.25,0.50,0.75]],
[[0.90,0.70,0.50]]]]).astype(np.float32))
nhwc_data_list.append(np.array([[[[0.62,0.94,0.38]],
[[0.70,0.13,0.07]],
[[0.89,0.75,0.84]]]]).astype(np.float32))
nhwc_data_list.append(np.array([[[[0.64,0.24,0.97]],
[[0.82,0.58,0.27]],
[[0.019,0.34,0.02]]]]).astype(np.float32))
input_name = 'input0'
self.enum_data_dicts = iter([{input_name: nhwc_data} for nhwc_data in nhwc_data_list])
return next(self.enum_data_dicts, None)
class TestOnnxModel(unittest.TestCase):
def setUp(self):
# Relu
# | \
# Conv \
# | \
# Relu |
# | Conv
# Conv /
# \ /
# |
# Add
input0 = helper.make_tensor_value_info('input0', TensorProto.FLOAT, [1, 3, 1, 3])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 1, 3])
X1_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X1_weight')
X1_bias = generate_input_initializer([3], np.float32, 'X1_bias')
X3_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X3_weight')
X3_bias = generate_input_initializer([3],np.float32, 'X3_bias')
X5_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X5_weight')
X5_bias = generate_input_initializer([3],np.float32,'X5_bias')
relu_node_1 = onnx.helper.make_node('Relu', ['input0'], ['X1'], name='Relu1')
conv_node_1 = onnx.helper.make_node('Conv', ['X1', 'X1_weight', 'X1_bias'], ['X2'], name='Conv1')
relu_node_2 = onnx.helper.make_node('Relu', ['X2'], ['X3'], name= 'Relu2')
conv_node_2 = onnx.helper.make_node('Conv', ['X3', 'X3_weight', 'X3_bias'], ['X4'], name='Conv2')
conv_node_3 = onnx.helper.make_node('Conv', ['X1', 'X5_weight', 'X5_bias'], ['X5'], name='Conv3')
add_node = onnx.helper.make_node('Add', ['X4', 'X5'], ['output'], name='Add')
graph = helper.make_graph([relu_node_1, conv_node_1, relu_node_2, conv_node_2, conv_node_3, add_node], 'test_graph_6', [input0], [output])
graph.initializer.add().CopyFrom(X1_weight)
graph.initializer.add().CopyFrom(X1_bias)
graph.initializer.add().CopyFrom(X3_weight)
graph.initializer.add().CopyFrom(X3_bias)
graph.initializer.add().CopyFrom(X5_weight)
graph.initializer.add().CopyFrom(X5_bias)
model = helper.make_model(graph)
test_model_path = './test_model_6.onnx'
onnx.save(model, test_model_path)
model = onnx.load(test_model_path)
self.model = ONNXModel(model)
def test_nodes(self):
self.assertEqual(len(self.model.nodes()), 6)
nodes_name = [node.name for node in self.model.nodes()]
nodes = ["Relu1", "Conv1", "Relu2", "Conv2", "Conv3", "Add"]
for node in nodes:
self.assertTrue(node in nodes_name)
def test_initializer(self):
self.assertEqual(len(self.model.initializer()), 6)
inits_name = [init.name for init in self.model.initializer()]
inits = ['X1_weight', 'X1_bias', 'X3_weight', 'X3_bias', 'X5_weight', 'X5_bias']
for init in inits:
self.assertTrue(init in inits_name)
def test_remove_node(self):
for node in self.model.nodes():
if node.op_type == "Add":
self.model.remove_node(node)
self.assertEqual(len(self.model.nodes()), 5)
nodes_name = [node.name for node in self.model.nodes()]
nodes = ["Relu1", "Conv1", "Relu2", "Conv2", "Conv3"]
for node in nodes:
self.assertTrue(node in nodes_name)
def test_remove_nodes(self):
nodes_to_remove = []
for node in self.model.nodes():
if node.name == "Conv3" or node.name == "Add":
nodes_to_remove.append(node)
self.model.remove_nodes(nodes_to_remove)
self.assertEqual(len(self.model.nodes()), 4)
nodes_name = [node.name for node in self.model.nodes()]
nodes = ["Relu1", "Conv1", "Relu2", "Conv2"]
for node in nodes:
self.assertTrue(node in nodes_name)
def test_add_node(self):
node_to_add = onnx.helper.make_node('Relu', ['output'], ['output1'], keepdims=0)
self.model.add_node(node_to_add)
last_node = self.model.nodes()[-1]
self.assertEqual(last_node.op_type, 'Relu')
def test_add_nodes(self):
nodes_to_add = []
for i in range(2):
node_to_add = onnx.helper.make_node('Relu', ["add_node{}_input".format(str(i))], ["add_node{}_output".format(str(i))], keepdims=0)
nodes_to_add.append(node_to_add)
self.model.add_nodes(nodes_to_add)
self.assertEqual(self.model.nodes()[-1].input, ['add_node1_input'])
self.assertEqual(self.model.nodes()[-2].input, ['add_node0_input'])
self.assertEqual(self.model.nodes()[-1].output, ['add_node1_output'])
self.assertEqual(self.model.nodes()[-2].output, ['add_node0_output'])
def test_get_initializer(self):
inits = ['X1_weight', 'X1_bias', 'X3_weight', 'X3_bias', 'X5_weight', 'X5_bias']
for init in inits:
self.assertIsNotNone(self.model.get_initializer(init))
def test_remove_initializer(self):
for init in self.model.initializer():
if init.name == "X1_weight":
self.model.remove_initializer(init)
self.assertEqual(len(self.model.initializer()), 5)
inits_name = [init.name for init in self.model.initializer()]
inits = ['X1_bias', 'X3_weight', 'X3_bias', 'X5_weight', 'X5_bias']
for init in inits:
self.assertTrue(init in inits_name)
def test_remove_initializers(self):
init_to_remove = []
for init in self.model.initializer():
if "bias" in init.name:
init_to_remove.append(init)
self.model.remove_initializers(init_to_remove)
self.assertEqual(len(self.model.initializer()), 3)
inits_name = [init.name for init in self.model.initializer()]
inits = ['X1_weight', 'X3_weight', 'X5_weight']
for init in inits:
self.assertTrue(init in inits_name)
def test_input_name_to_nodes(self):
self.assertEqual(len(self.model.input_name_to_nodes()), 12)
ipts_name = [name for name in self.model.input_name_to_nodes()]
ipts = ['input0', 'X1', 'X2', 'X3', 'X3_weight', 'X3_bias','X5_weight', 'X5_bias', 'X4', 'X5']
for ipt in ipts:
self.assertTrue(ipt in ipts_name)
def test_output_name_to_node(self):
self.assertEqual(len(self.model.output_name_to_node()), 6)
opts_name = [name for name in self.model.output_name_to_node()]
opts = ['X1', 'X2', 'X3', 'X4', 'X5', 'output']
for opt in opts:
self.assertTrue(opt in opts_name)
def test_get_children(self):
for node in self.model.nodes():
if node.name == "Relu1":
children = self.model.get_children(node)
self.assertEqual(len(children), 2)
children_name = [child.name for child in children]
names = ["Conv1", "Conv3"]
for name in names:
self.assertTrue(name in children_name)
def test_get_parents(self):
for node in self.model.nodes():
if node.op_type == "Add":
parents = self.model.get_parents(node)
self.assertEqual(len(parents), 2)
parents_name = [parent.name for parent in parents]
names = ["Conv2", "Conv3"]
for name in names:
self.assertTrue(name in parents_name)
def test_get_parent(self):
for node in self.model.nodes():
if node.op_type == "Add":
node_to_get_parent = node
parent = self.model.get_parent(node, 0)
self.assertEqual(parent.name, "Conv2")
parent = self.model.get_parent(node, 1)
self.assertEqual(parent.name, "Conv3")
parent = self.model.get_parent(node, 2)
self.assertIsNone(parent)
def test_find_nodes_by_initializer(self):
for init in self.model.initializer():
if init.name == "X1_weight":
initializer = init
nodes = self.model.find_nodes_by_initializer(self.model.graph(), initializer)
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].name, "Conv1")
def test_save(self):
self.model.save_model_to_file('./test_model_6.onnx', use_external_data_format=True)
if __name__ == "__main__":
unittest.main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,654
|
SnehalA/lpot
|
refs/heads/master
|
/test/test_filter.py
|
import unittest
import tensorflow as tf
import numpy as np
import os
from lpot.data import FILTERS, TRANSFORMS, DATASETS, DATALOADERS
from lpot.utils.create_obj_from_config import create_dataset, get_preprocess, create_dataloader
class TestCOCOFilter(unittest.TestCase):
def testLabelBalanceCOCORecord(self):
from PIL import Image
tf.compat.v1.disable_eager_execution()
random_array = np.random.random_sample([100,100,3]) * 255
random_array = random_array.astype(np.uint8)
im = Image.fromarray(random_array)
im.save('test.jpeg')
image = tf.compat.v1.gfile.FastGFile('test.jpeg','rb').read()
source_id = '000000397133.jpg'.encode('utf-8')
label = 'person'.encode('utf-8')
example1 = tf.train.Example(features=tf.train.Features(feature={
'image/encoded':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image])),
'image/object/class/text':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[label])),
'image/source_id':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[source_id])),
'image/object/bbox/xmin':tf.train.Feature(
float_list=tf.train.FloatList(value=[10])),
'image/object/bbox/ymin':tf.train.Feature(
float_list=tf.train.FloatList(value=[10])),
'image/object/bbox/xmax':tf.train.Feature(
float_list=tf.train.FloatList(value=[100])),
'image/object/bbox/ymax':tf.train.Feature(
float_list=tf.train.FloatList(value=[100])),
}))
example2 = tf.train.Example(features=tf.train.Features(feature={
'image/encoded':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image])),
'image/object/class/text':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[label])),
'image/source_id':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[source_id])),
'image/object/bbox/xmin':tf.train.Feature(
float_list=tf.train.FloatList(value=[10, 20])),
'image/object/bbox/ymin':tf.train.Feature(
float_list=tf.train.FloatList(value=[10, 20])),
'image/object/bbox/xmax':tf.train.Feature(
float_list=tf.train.FloatList(value=[100, 200])),
'image/object/bbox/ymax':tf.train.Feature(
float_list=tf.train.FloatList(value=[100, 200])),
}))
with tf.io.TFRecordWriter('test.record') as writer:
writer.write(example1.SerializeToString())
writer.write(example2.SerializeToString())
preprocesses = TRANSFORMS('tensorflow', 'preprocess')
preprocess = get_preprocess(preprocesses, {'ParseDecodeCoco':{}})
filters = FILTERS('tensorflow')
filter = filters['LabelBalanceCOCORecord'](2)
datasets = DATASETS('tensorflow')
dataset = datasets['COCORecord']('test.record', \
transform=preprocess, filter=filter)
dataloader = DATALOADERS['tensorflow'](dataset=dataset, batch_size=1)
for (inputs, labels) in dataloader:
self.assertEqual(inputs.shape, (1,100,100,3))
self.assertEqual(labels[0].shape, (1,2,4))
dataset2 = create_dataset(
'tensorflow', {'COCORecord':{'root':'test.record'}}, {'ParseDecodeCoco':{}}, {'LabelBalance':{'size':2}})
dataloader2 = DATALOADERS['tensorflow'](dataset=dataset2, batch_size=1)
for (inputs, labels) in dataloader2:
self.assertEqual(inputs.shape, (1,100,100,3))
self.assertEqual(labels[0].shape, (1,2,4))
dataloader3 = create_dataloader('tensorflow', {'batch_size':1, 'dataset':{'COCORecord':{'root':'test.record'}},\
'filter':{'LabelBalance':{'size':2}}, 'transform':{'ParseDecodeCoco':{}}})
for (inputs, labels) in dataloader3:
self.assertEqual(inputs.shape, (1,100,100,3))
self.assertEqual(labels[0].shape, (1,2,4))
os.remove('test.record')
os.remove('test.jpeg')
if __name__ == "__main__":
unittest.main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,655
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/utils/hw_info.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UX server HW info module."""
import platform
import subprocess
import psutil
class HWInfo:
"""Class responsible for gathering information about platform hardware."""
def __init__(self) -> None:
"""Initialize HW Info class and gather information platform hardware."""
self.sockets: int = get_number_of_sockets()
self.cores: int = psutil.cpu_count(logical=False)
self.total_memory: float = psutil.virtual_memory().total / (1024 ** 3)
self.system: str = get_distribution()
def get_number_of_sockets() -> int:
"""Get number of sockets in platform."""
cmd = "lscpu | grep 'Socket(s)' | cut -d ':' -f 2"
proc = subprocess.Popen(
args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False,
)
proc.wait()
if proc.stdout:
for line in proc.stdout:
return int(line.decode("utf-8", errors="ignore").strip())
return 0
def get_distribution() -> str:
"""
Return system distibution.
:return: distribution name
:rtype: str
"""
if psutil.WINDOWS:
return f"{platform.system()} {platform.release()}"
elif psutil.LINUX:
try:
return " ".join(platform.dist())
except AttributeError:
return f"{platform.system()} {platform.release()}"
else:
return platform.platform()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,656
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/benchmark.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .adaptor import FRAMEWORKS
from .objective import OBJECTIVES
from .conf.config import Conf
from .utils import logger
from .utils.create_obj_from_config import create_eval_func, create_dataloader
from .conf.dotdict import deep_get, deep_set
from .model import BaseModel as LpotModel
class Benchmark(object):
"""Benchmark class can be used to evaluate the model performance, with the objective
setting, user can get the data of what they configured in yaml
Args:
conf_fname (string): The path to the YAML configuration file containing accuracy goal,
tuning objective and preferred calibration & quantization tuning space etc.
"""
def __init__(self, conf_fname):
self.conf = Conf(conf_fname)
self.framework = self.conf.usr_cfg.model.framework.lower()
self._model = None
self._b_dataloader = None
def __call__(self):
cfg = self.conf.usr_cfg
framework_specific_info = {'device': cfg.device, \
'approach': cfg.quantization.approach, \
'random_seed': cfg.tuning.random_seed}
framework = cfg.model.framework.lower()
if framework == 'tensorflow':
framework_specific_info.update({"inputs": cfg.model.inputs, \
"outputs": cfg.model.outputs, \
"recipes": cfg.model.recipes, \
'workspace_path': cfg.tuning.workspace.path})
if framework == 'mxnet':
framework_specific_info.update({"b_dataloader": self._b_dataloader})
if 'onnxrt' in framework.lower():
framework_specific_info.update({"backend": framework.lower().split('_')[-1], \
'workspace_path': cfg.tuning.workspace.path})
if framework == 'pytorch':
framework_specific_info.update({"q_dataloader": None,
"benchmark": True})
if framework == 'pytorch_ipex':
framework_specific_info.update({"workspace_path": cfg.tuning.workspace.path,
"q_dataloader": None,
"benchmark": True})
assert isinstance(self._model, LpotModel), 'need set lpot Model for quantization....'
adaptor = FRAMEWORKS[framework](framework_specific_info)
assert cfg.evaluation is not None, 'benchmark need evaluation filed not be None'
results = {}
for mode in cfg.evaluation.keys():
iteration = -1 if deep_get(cfg, 'evaluation.{}.iteration'.format(mode)) is None \
else deep_get(cfg, 'evaluation.{}.iteration'.format(mode))
metric = deep_get(cfg, 'evaluation.{}.metric'.format(mode))
b_postprocess_cfg = deep_get(cfg, 'evaluation.{}.postprocess'.format(mode))
if self._b_dataloader is None:
assert deep_get(cfg, 'evaluation.{}.dataloader'.format(mode)) is not None, \
'dataloader field of yaml file is missing'
b_dataloader_cfg = deep_get(cfg, 'evaluation.{}.dataloader'.format(mode))
self._b_dataloader = create_dataloader(self.framework, b_dataloader_cfg)
b_func = create_eval_func(self.framework, \
self._b_dataloader, \
adaptor, \
metric, \
b_postprocess_cfg,
iteration=iteration)
else:
b_func = create_eval_func(self.framework, \
self._b_dataloader, \
adaptor, \
metric, \
b_postprocess_cfg,
iteration=iteration)
objective = cfg.tuning.objective.lower()
self.objective = OBJECTIVES[objective](cfg.tuning.accuracy_criterion, \
is_measure=True)
val = self.objective.evaluate(b_func, self._model)
logger.info('{} mode benchmark done!'.format(mode))
# measurer contain info not only performance(eg, memory, model_size)
# also measurer have result list among steps
acc, _ = val
batch_size = self._b_dataloader.batch_size
warmup = 0 if deep_get(cfg, 'evaluation.{}.warmup'.format(mode)) is None \
else deep_get(cfg, 'evaluation.{}.warmup'.format(mode))
assert len(self.objective.measurer.result_list()) > warmup, \
'itreation should larger than warmup'
results[mode] = acc, batch_size, \
self.objective.measurer.result_list()[warmup:]
return results
@property
def b_dataloader(self):
return self._b_dataloader
@b_dataloader.setter
def b_dataloader(self, dataloader):
"""Set Data loader for benchmark, It is iterable and the batched data
should consists of a tuple like (input, label) or yield (input, _),
when b_dataloader is set, user can configure postprocess(optional) and metric
in yaml file or set postprocess and metric cls for evaluation.
Or just get performance without label in dataloader and configure postprocess/metric.
Args:
dataloader(generator): user are supported to set a user defined dataloader
which meet the requirements that can yield tuple of
(input, label)/(input, _) batched data.
Another good practice is to use lpot.common.DataLoader
to initialize a lpot dataloader object.
Notice lpot.common.DataLoader is just a wrapper of the
information needed to build a dataloader, it can't yield
batched data and only in this setter method
a 'real' eval_dataloader will be created,
the reason is we have to know the framework info
and only after the Quantization object created then
framework infomation can be known. Future we will support
creating iterable dataloader from lpot.common.DataLoader
"""
from .common import _generate_common_dataloader
self._b_dataloader = _generate_common_dataloader(dataloader, self.framework)
@property
def model(self):
return self._model
@model.setter
def model(self, user_model):
"""Set the user model and dispatch to framework specific internal model object
Args:
user_model: user are supported to set model from original framework model format
(eg, tensorflow frozen_pb or path to a saved model), but not recommended.
Best practice is to set from a initialized lpot.common.Model.
If tensorflow model is used, model's inputs/outputs will be auto inferenced,
but sometimes auto inferenced inputs/outputs will not meet your requests,
set them manually in config yaml file. Another corner case is slim model
of tensorflow, be careful of the name of model configured in yaml file,
make sure the name is in supported slim model list.
"""
from .common import Model as LpotModel
from .model import MODELS
if not isinstance(user_model, LpotModel):
logger.warning('force convert user raw model to lpot model, \
better initialize lpot.common.Model and set....')
user_model = LpotModel(user_model)
framework_model_info = {}
cfg = self.conf.usr_cfg
if self.framework == 'tensorflow':
framework_model_info.update(
{'name': cfg.model.name,
'input_tensor_names': cfg.model.inputs,
'output_tensor_names': cfg.model.outputs,
'workspace_path': cfg.tuning.workspace.path})
self._model = MODELS[self.framework](\
user_model.root, framework_model_info, **user_model.kwargs)
@property
def metric(self):
logger.warning('metric not support getter....')
return None
@metric.setter
def metric(self, user_metric):
"""Set metric class and lpot will initialize this class when evaluation
lpot have many built-in metrics, but user can set specific metric through
this api. The metric class should take the outputs of the model or
postprocess(if have) as inputs, lpot built-in metric always take
(predictions, labels) as inputs for update,
and user_metric.metric_cls should be sub_class of lpot.metric.BaseMetric.
Args:
user_metric(lpot.common.Metric): user_metric should be object initialized from
lpot.common.Metric, in this method the
user_metric.metric_cls will be registered to
specific frameworks and initialized.
"""
from .common import Metric as LpotMetric
assert isinstance(user_metric, LpotMetric), \
'please initialize a lpot.common.Metric and set....'
metric_cfg = {user_metric.name : {**user_metric.kwargs}}
if deep_get(self.conf.usr_cfg, "evaluation.accuracy.metric"):
logger.warning('already set metric in yaml file, will override it...')
deep_set(self.conf.usr_cfg, "evaluation.accuracy.metric", metric_cfg)
from .conf.dotdict import DotDict
self.conf.usr_cfg = DotDict(self.conf.usr_cfg)
from .metric import METRICS
metrics = METRICS(self.framework)
metrics.register(user_metric.name, user_metric.metric_cls)
@property
def postprocess(self, user_postprocess):
logger.warning('postprocess not support getter....')
return None
@postprocess.setter
def postprocess(self, user_postprocess):
"""Set postprocess class and lpot will initialize this class when evaluation.
The postprocess class should take the outputs of the model as inputs, and
output (predictions, labels) as inputs for metric update.
user_postprocess.postprocess_cls should be sub_class of lpot.data.BaseTransform.
Args:
user_postprocess(lpot.common.Postprocess):
user_postprocess should be object initialized from lpot.common.Postprocess,
in this method the user_postprocess.postprocess_cls will be
registered to specific frameworks and initialized.
"""
from .common import Postprocess as LpotPostprocess
assert isinstance(user_postprocess, LpotPostprocess), \
'please initialize a lpot.common.Postprocess and set....'
postprocess_cfg = {user_postprocess.name : {**user_postprocess.kwargs}}
if deep_get(self.conf.usr_cfg, "evaluation.accuracy.postprocess"):
logger.warning('already set postprocess in yaml file, will override it...')
deep_set(self.conf.usr_cfg, "evaluation.accuracy.postprocess.transform", postprocess_cfg)
from .data import TRANSFORMS
postprocesses = TRANSFORMS(self.framework, 'postprocess')
postprocesses.register(user_postprocess.name, user_postprocess.postprocess_cls)
logger.info("{} registered to postprocess".format(user_postprocess.name))
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,657
|
SnehalA/lpot
|
refs/heads/master
|
/test/test_onnx_calibrate.py
|
import os
import shutil
import sys
import unittest
import numpy as np
import onnx
from onnx import helper, TensorProto, numpy_helper
sys.path.append('..')
# from lpot.data.dataloaders.onnx_dataloader import ONNXDataLoader
# from lpot.data.datasets.imagenet_dataset import ImagenetDataset
# from lpot.data.transforms.imagenet_transform import ResizeCropImagenetTransform
from lpot.adaptor.ox_utils.onnx_calibrate import ONNXCalibrater, CalibrationDataReader, calibrate
from lpot.data.datasets.dataset import Dataset
def generate_input_initializer(tensor_shape, tensor_dtype, input_name):
'''
Helper function to generate initializers for test inputs
'''
tensor = np.random.ranf(tensor_shape).astype(tensor_dtype)
init = numpy_helper.from_array(tensor, input_name)
return init
class TestDataReader(CalibrationDataReader):
'''for test purpose'''
def __init__(self):
pass
def get_next(self):
return None
class TestDataReaderSecond(CalibrationDataReader):
'''for test purpose'''
def __init__(self):
self.preprocess_flag = True
self.enum_data_dicts = []
def get_next(self):
if self.preprocess_flag:
self.preprocess_flag = False
nhwc_data_list = []
nhwc_data_list.append(np.array([[[[0.45,0.60,0.75]],
[[0.25,0.50,0.75]],
[[0.90,0.70,0.50]]]]).astype(np.float32))
nhwc_data_list.append(np.array([[[[0.62,0.94,0.38]],
[[0.70,0.13,0.07]],
[[0.89,0.75,0.84]]]]).astype(np.float32))
nhwc_data_list.append(np.array([[[[0.64,0.24,0.97]],
[[0.82,0.58,0.27]],
[[0.019,0.34,0.02]]]]).astype(np.float32))
input_name = 'input0'
self.enum_data_dicts = iter([{input_name: nhwc_data} for nhwc_data in nhwc_data_list])
return next(self.enum_data_dicts, None)
class TestDataset(Dataset):
"""Configuration for Imagenet dataset."""
def __init__(self):
data_list = []
data_list.append(np.array([[[[[0.45,0.60,0.75]],
[[0.25,0.50,0.75]],
[[0.90,0.70,0.50]]]]]).astype(np.float32))
data_list.append(np.array([[[[[0.62,0.94,0.38]],
[[0.70,0.13,0.07]],
[[0.89,0.75,0.84]]]]]).astype(np.float32))
data_list.append(np.array([[[[[0.64,0.24,0.97]],
[[0.82,0.58,0.27]],
[[0.019,0.34,0.02]]]]]).astype(np.float32))
self.data_list = data_list
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
data = self.data_list[index]
return data
class TestCalibrate(unittest.TestCase):
work_space = './onnxrt_calib_test'
@classmethod
def setUpClass(cls):
os.makedirs(cls.work_space)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.work_space, ignore_errors=True)
def test_augment_graph(self):
''' TEST_CONFIG_1'''
# Conv
# |
# Clip
# |
# MatMul
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
E = helper.make_tensor_value_info('E', TensorProto.FLOAT, [1, 1, 5, 1])
F = helper.make_tensor_value_info('F', TensorProto.FLOAT, [1, 1, 5, 1])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
clip_node = onnx.helper.make_node('Clip', ['C'], ['D'], name='Clip')
matmul_node = onnx.helper.make_node('MatMul', ['D', 'E'], ['F'], name='MatMul')
graph = helper.make_graph([conv_node, clip_node, matmul_node], 'test_graph_1', [A, B, E], [F])
model = helper.make_model(graph)
test_model_path = os.path.join(self.work_space, './test_model_1.onnx')
onnx.save(model, test_model_path)
test_model = onnx.load(test_model_path)
# Augmenting graph
data_reader = TestDataReader()
augmented_model_path = os.path.join(self.work_space,'./augmented_test_model_1.onnx')
calibrater = ONNXCalibrater(test_model, data_reader, ['Conv', 'MatMul'], [], [], augmented_model_path)
augmented_model = calibrater.augment_graph()
onnx.save(augmented_model, augmented_model_path)
# Checking if each added ReduceMin and ReduceMax node and its output exists
augmented_model_node_names = [node.name for node in augmented_model.graph.node]
augmented_model_outputs = [output.name for output in augmented_model.graph.output]
added_node_names = ['A_ReduceMin', 'A_ReduceMax', 'B_ReduceMin', 'B_ReduceMax', 'C_ReduceMin', \
'C_ReduceMax', 'D_ReduceMin', 'D_ReduceMax', 'F_ReduceMin', 'F_ReduceMax']
added_outputs = ['A_ReduceMin', 'A_ReduceMax', 'B_ReduceMin', 'B_ReduceMax', 'C_ReduceMin', \
'C_ReduceMax', 'D_ReduceMin', 'D_ReduceMax', 'F_ReduceMin', 'F_ReduceMax']
# Original 3 nodes + added ReduceMin/Max nodes * 6 (exlude graph input/output)
self.assertEqual(len(augmented_model_node_names), 15)
# Original 1 graph output + added outputs * 6
self.assertEqual(len(augmented_model_outputs), 13)
for name in added_node_names:
self.assertTrue(name in augmented_model_node_names)
for output in added_outputs:
self.assertTrue(output in augmented_model_outputs)
print('Finished TEST_CONFIG_1')
'''TEST_CONFIG_2'''
# Conv
# |
# Conv
G = helper.make_tensor_value_info('G', TensorProto.FLOAT, [1, 1, 5, 5])
H = helper.make_tensor_value_info('H', TensorProto.FLOAT, [1, 1, 3, 3])
J = helper.make_tensor_value_info('J', TensorProto.FLOAT, [1, 1, 3, 3])
K = helper.make_tensor_value_info('K', TensorProto.FLOAT, [1, 1, 5, 5])
conv_node_1 = onnx.helper.make_node('Conv', ['G', 'H'], ['I'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
conv_node_2 = onnx.helper.make_node('Conv', ['I', 'J'], ['K'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
graph = helper.make_graph([conv_node_1, conv_node_2], 'test_graph_2', [G, H, J], [K])
model = helper.make_model(graph)
test_model_path = os.path.join(self.work_space,'./test_model_2.onnx')
onnx.save(model, test_model_path)
test_model = onnx.load(test_model_path)
# Augmenting graph
data_reader = TestDataReader()
augmented_model_path = os.path.join(self.work_space,'./augmented_test_model_2.onnx')
calibrater = ONNXCalibrater(test_model, data_reader, ['Conv', 'MatMul'], [], [], augmented_model_path)
augmented_model = calibrater.augment_graph()
onnx.save(augmented_model, augmented_model_path)
augmented_model_node_names = [node.name for node in augmented_model.graph.node]
augmented_model_outputs = [output.name for output in augmented_model.graph.output]
added_node_names = ['I_ReduceMin', 'I_ReduceMax', 'J_ReduceMin', 'J_ReduceMax', 'H_ReduceMin', 'H_ReduceMax', \
'G_ReduceMin', 'G_ReduceMax', 'K_ReduceMin', 'K_ReduceMax']
added_outputs = ['I_ReduceMin', 'I_ReduceMax', 'J_ReduceMin', 'J_ReduceMax', 'H_ReduceMin', 'H_ReduceMax',\
'G_ReduceMin', 'G_ReduceMax', 'K_ReduceMin', 'K_ReduceMax']
# Original 2 nodes + added ReduceMin/Max nodes * 4
self.assertEqual(len(augmented_model_node_names), 12)
# Original 1 graph output + added outputs * 4
self.assertEqual(len(augmented_model_outputs), 11)
for name in added_node_names:
self.assertTrue(name in augmented_model_node_names)
for output in added_outputs:
self.assertTrue(output in augmented_model_outputs)
print('Finished TEST_CONFIG_2')
'''TEST_CONFIG_3'''
# Relu
# |
# Conv \
# | |
# Clip |
# | /
# MatMul
L = helper.make_tensor_value_info('L', TensorProto.FLOAT, [1, 1, 5, 5])
N = helper.make_tensor_value_info('N', TensorProto.FLOAT, [1, 1, 3, 3])
Q = helper.make_tensor_value_info('Q', TensorProto.FLOAT, [1, 1, 5, 5])
relu_node = onnx.helper.make_node('Relu', ['L'], ['M'], name='Relu')
conv_node = onnx.helper.make_node('Conv', ['M', 'N'], ['O'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
clip_node = onnx.helper.make_node('Clip', ['O'], ['P'], name='Clip')
matmul_node = onnx.helper.make_node('MatMul', ['P','M'], ['Q'], name='MatMul')
graph = helper.make_graph([relu_node, conv_node, clip_node, matmul_node], 'test_graph_3', [L, N], [Q])
model = helper.make_model(graph)
test_model_path = os.path.join(self.work_space,'./test_model_3.onnx')
onnx.save(model, test_model_path)
test_model = onnx.load(test_model_path)
# Augmenting graph
data_reader = TestDataReader()
augmented_model_path = os.path.join(self.work_space,'./augmented_test_model_3.onnx')
calibrater = ONNXCalibrater(test_model, data_reader, ['Conv', 'MatMul'], [], [], augmented_model_path)
augmented_model = calibrater.augment_graph()
onnx.save(augmented_model, augmented_model_path)
augmented_model_node_names = [node.name for node in augmented_model.graph.node]
augmented_model_outputs = [output.name for output in augmented_model.graph.output]
added_node_names = ['O_ReduceMin', 'O_ReduceMax', 'Q_ReduceMin', 'Q_ReduceMax', 'N_ReduceMin', \
'N_ReduceMax', 'P_ReduceMin', 'P_ReduceMax', 'M_ReduceMin', 'M_ReduceMax']
added_outputs = ['O_ReduceMin', 'O_ReduceMax', 'Q_ReduceMin', 'Q_ReduceMax', 'N_ReduceMin', \
'N_ReduceMax', 'P_ReduceMin', 'P_ReduceMax', 'M_ReduceMin', 'M_ReduceMax']
# Original 4 nodes + added ReduceMin/Max nodes * 8
self.assertEqual(len(augmented_model_node_names), 14)
# Original 1 graph output + added outputs * 8
self.assertEqual(len(augmented_model_outputs), 11)
for name in added_node_names:
self.assertTrue(name in augmented_model_node_names)
for output in added_outputs:
self.assertTrue(output in augmented_model_outputs)
print('Finished TEST_CONFIG_3')
'''TEST_CONFIG_4'''
# Attention
# |
# MatMul
Attention_weight = helper.make_tensor_value_info('Attention_weight', TensorProto.FLOAT, [13,7 ])
Attention_bias = helper.make_tensor_value_info('Attention_bias', TensorProto.FLOAT, [13, 7])
Attention_mask = helper.make_tensor_value_info('Attention_mask', TensorProto.INT32, [13, 7])
S = helper.make_tensor_value_info('S', TensorProto.FLOAT, [13, 7])
T = helper.make_tensor_value_info('T', TensorProto.FLOAT, [13, 7])
attention_node = onnx.helper.make_node('Attention', ['Attention_weight', 'Attention_bias', 'Attention_mask'], ['R'], name='Attention')
matmul_node = onnx.helper.make_node('MatMul', ['R', 'S'], ['T'], name='MatMul')
graph = helper.make_graph([attention_node, matmul_node], 'test_graph_4', [Attention_weight, Attention_bias, Attention_mask, S], [T])
model = helper.make_model(graph)
test_model_path = os.path.join(self.work_space,'./test_model_4.onnx')
onnx.save(model, test_model_path)
test_model = onnx.load(test_model_path)
# Augmenting graph
data_reader = TestDataReader()
augmented_model_path = os.path.join(self.work_space,'./augmented_test_model_4.onnx')
calibrater = ONNXCalibrater(test_model, data_reader, ['Conv', 'MatMul', 'Attention'], [], [], augmented_model_path)
augmented_model = calibrater.augment_graph()
onnx.save(augmented_model, augmented_model_path)
augmented_model_node_names = [node.name for node in augmented_model.graph.node]
augmented_model_outputs = [output.name for output in augmented_model.graph.output]
added_node_names = ['Attention_bias_ReduceMin', 'Attention_bias_ReduceMax', 'Attention_weight_ReduceMin', \
'Attention_weight_ReduceMax', 'S_ReduceMin', 'S_ReduceMax', 'R_ReduceMin', 'R_ReduceMax', 'T_ReduceMin', 'T_ReduceMax']
added_outputs = ['Attention_bias_ReduceMin', 'Attention_bias_ReduceMax', 'Attention_weight_ReduceMin', \
'Attention_weight_ReduceMax', 'S_ReduceMin', 'S_ReduceMax', 'R_ReduceMin', 'R_ReduceMax', 'T_ReduceMin', 'T_ReduceMax']
# Original 2 nodes + added ReduceMin/Max nodes * 5
self.assertEqual(len(augmented_model_node_names), 12)
# Original 1 graph output + added outputs * 5
self.assertEqual(len(augmented_model_outputs), 11)
for name in added_node_names:
self.assertTrue(name in augmented_model_node_names)
for output in added_outputs:
self.assertTrue(output in augmented_model_outputs)
print('Finished TEST_CONFIG_4')
def test_quant_param_calculation(self):
'''TEST_CONFIG_5'''
# Relu
# | \
# Conv \
# | \
# Relu |
# | Conv
# Conv /
# \ /
# |
# Add
input0 = helper.make_tensor_value_info('input0', TensorProto.FLOAT, [1, 3, 1, 3])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 1, 3])
X1_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X1_weight')
X1_bias = generate_input_initializer([3], np.float32, 'X1_bias')
X3_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X3_weight')
X3_bias = generate_input_initializer([3],np.float32, 'X3_bias')
X5_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X5_weight')
X5_bias = generate_input_initializer([3],np.float32,'X5_bias')
relu_node_1 = onnx.helper.make_node('Relu', ['input0'], ['X1'], name='Relu1')
conv_node_1 = onnx.helper.make_node('Conv', ['X1', 'X1_weight', 'X1_bias'], ['X2'], name='Conv1')
relu_node_2 = onnx.helper.make_node('Relu', ['X2'], ['X3'], name= 'Relu2')
conv_node_2 = onnx.helper.make_node('Conv', ['X3', 'X3_weight', 'X3_bias'], ['X4'], name='Conv2')
conv_node_3 = onnx.helper.make_node('Conv', ['X1', 'X5_weight', 'X5_bias'], ['X5'], name='Conv3')
add_node = onnx.helper.make_node('Add', ['X4', 'X5'], ['output'], name='Add')
graph = helper.make_graph([relu_node_1, conv_node_1, relu_node_2, conv_node_2, conv_node_3, add_node], 'test_graph_5', [input0], [output])
graph.initializer.add().CopyFrom(X1_weight)
graph.initializer.add().CopyFrom(X1_bias)
graph.initializer.add().CopyFrom(X3_weight)
graph.initializer.add().CopyFrom(X3_bias)
graph.initializer.add().CopyFrom(X5_weight)
graph.initializer.add().CopyFrom(X5_bias)
model = helper.make_model(graph)
test_model_path = os.path.join(self.work_space,'./test_model_5.onnx')
onnx.save(model, test_model_path)
test_model = onnx.load(test_model_path)
data_reader = TestDataset()
augmented_model_path = os.path.join(self.work_space,'./augmented_test_model_5.onnx')
calibrater = ONNXCalibrater(test_model, data_reader,['Conv', 'MatMul'], [], [], augmented_model_path)
augmented_model = calibrater.augment_graph()
onnx.save(augmented_model, augmented_model_path)
#test calculation of quantization params
#TO_DO: check rmin/rmax
dict_for_quantization = calibrater.get_intermediate_outputs()
quantization_params_dict = calibrater.calculate_quantization_params(dict_for_quantization)
#check the size of the quantization dictionary
self.assertEqual(len(quantization_params_dict), 11)
#check the computation of zp and scale
for key, value in quantization_params_dict.items():
self.assertTrue(value is not None)
self.assertTrue(len(value) == 2)
thresholds = dict_for_quantization[key]
rmin = min(thresholds[0], 0)
rmax = max(thresholds[1], 0)
if key == 'X2': #next_node is Relu
if rmin < 0: rmin = 0
scale_expected = np.float32((rmax - rmin) / 255 if rmin != rmax else 1)
zp_expected = np.uint8(round(max(0, min(255, (0 - rmin) / scale_expected))))
zp_actual = value[0]
scale_actual = value[1]
self.assertEqual(zp_expected, zp_actual)
self.assertEqual(scale_expected, scale_actual)
print('Finished' + ' test calculation of quantization params.')
if __name__ == '__main__':
unittest.main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,658
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/web/configuration.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration module for UX server."""
import argparse
import socket
from typing import Dict
from numpy.random import randint
from lpot.utils.utility import singleton
from lpot.ux.utils.exceptions import NotFoundException
from lpot.ux.utils.utils import determine_ip
@singleton
class Configuration:
"""Configuration object for UX server."""
PORT_DEFAULT = 5000
MAX_PORTS_TRIED = 10
def __init__(self) -> None:
"""Set the variables."""
self.ip = determine_ip()
args = self.get_command_line_args()
self.port = self.determine_port(args)
def get_command_line_args(self) -> Dict:
"""Return arguments passed in command line."""
parser = argparse.ArgumentParser(description="Run UX server.")
parser.add_argument(
"-p",
"--port",
type=int,
help="port number to listen on",
)
return vars(parser.parse_args())
def determine_port(self, args: Dict) -> int:
"""
Return port to be used by the server.
Will raise a NotFoundException if port is already in use.
When port given in command line, only that port will be tried.
When no port specified will try self.MAX_PORTS_TRIED times,
starting with self.PORT_DEFAULT.
"""
command_line_port = args.get("port")
if command_line_port is not None:
if self.is_port_taken(command_line_port):
raise NotFoundException(
f"Port {command_line_port} already in use, exiting.",
)
else:
return command_line_port
ports = [self.PORT_DEFAULT] + randint(
1025,
65536,
self.MAX_PORTS_TRIED - 1,
).tolist()
for port in ports:
if not self.is_port_taken(port):
return port
raise NotFoundException(
f"Unable to find a free port in {len(ports)} attempts, exiting.",
)
def is_port_taken(self, port: int) -> bool:
"""Return if given port is already in use."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((self.ip, port))
except socket.error:
return True
finally:
s.close()
return False
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,659
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/pruning.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .conf.config import Conf
from .policy import POLICIES
from .utils import logger
from .utils.utility import singleton
from .model import BaseModel as LpotModel
@singleton
class Pruning(object):
"""This is base class of pruning object.
Since DL use cases vary in the accuracy metrics (Top-1, MAP, ROC etc.), loss criteria
(<1% or <0.1% etc.) and pruning objectives (performance, memory footprint etc.).
Pruning class provides a flexible configuration interface via YAML for users to specify
these parameters.
Args:
conf_fname (string): The path to the YAML configuration file containing accuracy goal,
pruning objective and related dataloaders etc.
"""
def __init__(self, conf_fname):
self.conf = Conf(conf_fname)
self.cfg = self.conf.usr_cfg
self.framework = self.cfg.model.framework.lower()
self._model = None
self._calib_func = None
def on_epoch_begin(self, epoch):
""" called on the begining of epochs"""
for policy in self.policies:
policy.on_epoch_begin(epoch)
def on_batch_begin(self, batch_id):
""" called on the begining of batches"""
for policy in self.policies:
policy.on_batch_begin(batch_id)
def on_batch_end(self):
""" called on the end of batches"""
for policy in self.policies:
policy.on_batch_end()
def on_epoch_end(self):
""" called on the end of epochs"""
for policy in self.policies:
policy.on_epoch_end()
stats, sparsity = self._model.report_sparsity()
logger.info(stats)
logger.info(sparsity)
def __call__(self):
"""The main entry point of pruning.
This interface currently only works on pytorch
and provides three usages:
a) Fully yaml configuration: User specifies all the info through yaml,
including dataloaders used in calibration and evaluation phases
and quantization tuning settings.
For this usage, only model parameter is mandotory.
b) Partial yaml configuration: User specifies dataloaders used in calibration
and evaluation phase by code.
The tool provides built-in dataloaders and evaluators, user just need provide
a dataset implemented __iter__ or __getitem__ methods and invoke dataloader()
with dataset as input parameter to create lpot dataloader before calling this
function.
After that, User specifies fp32 "model", calibration dataset "q_dataloader"
and evaluation dataset "eval_dataloader".
The calibrated and quantized model is evaluated with "eval_dataloader"
with evaluation metrics specified in the configuration file. The evaluation tells
the tuner whether the quantized model meets the accuracy criteria. If not,
the tuner starts a new calibration and tuning flow.
For this usage, model, q_dataloader and eval_dataloader parameters are mandotory.
c) Partial yaml configuration: User specifies dataloaders used in calibration phase
by code.
This usage is quite similar with b), just user specifies a custom "eval_func"
which encapsulates the evaluation dataset by itself.
The calibrated and quantized model is evaluated with "eval_func".
The "eval_func" tells the tuner whether the quantized model meets
the accuracy criteria. If not, the Tuner starts a new calibration and tuning flow.
For this usage, model, q_dataloader and eval_func parameters are mandotory.
Returns:
pruned model: best pruned model found, otherwise return None
"""
framework_specific_info = {'device': self.cfg.device,
'approach': self.cfg.quantization.approach,
'random_seed': self.cfg.tuning.random_seed,
'q_dataloader': None}
if self.framework == 'tensorflow':
framework_specific_info.update(
{"inputs": self.cfg.model.inputs, "outputs": self.cfg.model.outputs})
assert isinstance(self._model, LpotModel), 'need set lpot Model for quantization....'
policies = {}
for policy in POLICIES:
for name in self.cfg["pruning"][policy]:
policies[name] = {"policy_name": policy,
"policy_spec": self.cfg["pruning"][policy][name]}
self.policies = []
for name, policy_spec in policies.items():
print(policy_spec)
self.policies.append(POLICIES[policy_spec["policy_name"]](
self._model, policy_spec["policy_spec"], self.cfg))
return self._calib_func(self._model.model)
@property
def model(self):
return self._model
@model.setter
def model(self, user_model):
"""Only support PyTorch model, it's torch.nn.model instance.
Args:
user_model: user are supported to set model from original PyTorch model format
Best practice is to set from a initialized lpot.common.Model.
"""
from .common import Model as LpotModel
if not isinstance(user_model, LpotModel):
logger.warning('force convert user raw model to lpot model, \
better initialize lpot.common.Model and set....')
user_model = LpotModel(user_model)
framework_model_info = {}
cfg = self.conf.usr_cfg
if self.framework == 'tensorflow':
framework_model_info.update(
{'name': cfg.model.name,
'input_tensor_names': cfg.model.inputs,
'output_tensor_names': cfg.model.outputs,
'workspace_path': cfg.tuning.workspace.path})
from .model import MODELS
self._model = MODELS[self.framework](\
user_model.root, framework_model_info, **user_model.kwargs)
@property
def q_func(self):
logger.warning('q_func not support getter....')
return None
@q_func.setter
def q_func(self, user_q_func):
"""Training function for pruning.
Args:
user_q_func: This function takes "model" as input parameter
and executes entire training process with self
contained training hyper-parameters. If q_func set,
an evaluation process must be triggered and user should
set eval_dataloader with metric configured or directly eval_func
to make evaluation of the model executed.
"""
logger.warning('q_func is to be deprecated, please construct q_dataloader....')
self._calib_func = user_q_func
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,660
|
SnehalA/lpot
|
refs/heads/master
|
/examples/helloworld/tf_example4/test.py
|
import tensorflow as tf
import time
import numpy as np
from tensorflow import keras
from lpot.data import DATASETS, DataLoader
from lpot import common
tf.compat.v1.disable_eager_execution()
def main():
import lpot
quantizer = lpot.Quantization('./conf.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 100, 100, 3), label=True)
quantizer.model = common.Model('./model/public/rfcn-resnet101-coco-tf/model/public/rfcn-resnet101-coco-tf/rfcn_resnet101_coco_2018_01_28/')
quantizer.calib_dataloader = common.DataLoader(dataset)
quantized_model = quantizer()
if __name__ == "__main__":
main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,661
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/adaptor/adaptor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
'''The framework backends supported by lpot, including tensorflow, mxnet and pytorch.
User could add new backend support by implementing new Adaptor subclass under this directory.
The naming convention of new Adaptor subclass should be something like ABCAdaptor, user
could choose this framework backend by setting "abc" string in framework field of yaml.
FRAMEWORKS variable is used to store all implemented Adaptor subclasses of framework backends.
'''
FRAMEWORKS = {}
def adaptor_registry(cls):
'''The class decorator used to register all Adaptor subclasses.
Args:
cls (class): The class of register.
'''
assert cls.__name__.endswith(
'Adaptor'), "The name of subclass of Adaptor should end with \'Adaptor\' substring."
if cls.__name__[:-len('Adaptor')].lower() in FRAMEWORKS:
raise ValueError('Cannot have two frameworks with the same name.')
FRAMEWORKS[cls.__name__[:-len('Adaptor')].lower()] = cls
return cls
class Adaptor(object):
'''The base class of framework adaptor layer.
'''
def __init__(self, framework_specific_info):
pass
@abstractmethod
def quantize(self, tune_cfg, model, dataloader, q_func=None):
'''The function is used to do calibration and quanitization in post-training quantization.
Args:
tune_cfg(dict): The chosen tuning configuration.
model (object): The model to do calibration.
dataloader(object): The dataloader used to load calibration dataset.
q_func (optional): training function for quantization aware training mode.
'''
raise NotImplementedError
@abstractmethod
def evaluate(self, model, dataloader, postprocess=None,
metric=None, measurer=None, iteration=-1, tensorboard=False):
'''The function is used to run evaluation on validation dataset.
Args:
model (object): The model to do calibration.
dataloader (generator): generate the data and labels.
postprocess (object, optional): process the result from the model
metric (object, optional): Depends on model category. Defaults to None.
measurer (object, optional): for precise benchmark measurement.
iteration(int, optional): control steps of mini-batch
tensorboard (boolean, optional): for tensorboard inspect tensor.
'''
raise NotImplementedError
@abstractmethod
def query_fw_capability(self, model):
'''The function is used to return framework tuning capability.
Args:
model (object): The model to query quantization tuning capability.
'''
raise NotImplementedError
@abstractmethod
def query_fused_patterns(self, model):
'''The function is used to run fused patterns in framework.
Args:
model (object): The model to do calibration.
Return:
[['conv', 'relu'], ['conv', 'relu', 'bn']]
'''
raise NotImplementedError
@abstractmethod
def inspect_tensor(self, model, dataloader, op_list=[], iteration_list=[]):
'''The function is used by tune strategy class for dumping tensor info.
Args:
model (object): The model to do calibration.
Return:
Numpy Array Dict
{'op1': tensor, 'op2': tensor}
'''
raise NotImplementedError
@abstractmethod
def mapping(self, src_model, dst_model):
'''The function is used to create a dict to map tensor name
of src model to tensor name of dst model.
Return:
Dict
{'src_op1': 'dst_op1'}
'''
raise NotImplementedError
def quantize_input(self, model):
''' quantize the model to be able to take quantized input
Args:
model (object): The model to quantize input
Return:
model (object): The quantized input model
scale (float): The scale for dataloader to generate quantized input
'''
return model, 1.
@abstractmethod
def _pre_eval_hook(self, model):
'''The function is used to do some preprocession before evaluation phase.
Return:
model
'''
raise NotImplementedError
@abstractmethod
def _post_eval_hook(self, model, **args):
'''The function is used to do some post process after complete evaluation.
'''
raise NotImplementedError
@abstractmethod
def save(self, model, path):
'''The function is used by tune strategy class for saving model.
Args:
model (object): The model to saved.
path (string): The path where to save.
'''
raise NotImplementedError
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,662
|
SnehalA/lpot
|
refs/heads/master
|
/examples/helloworld/tf_example1/test.py
|
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
def main():
import lpot
from lpot import common
quantizer = lpot.Quantization('./conf.yaml')
quantizer.model = common.Model("./mobilenet_v1_1.0_224_frozen.pb")
quantized_model = quantizer()
if __name__ == "__main__":
main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,663
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/web/server.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main endpoint for GUI."""
import os
from functools import wraps
from threading import Thread
from typing import Any, Callable
from flask import Flask
from flask import Request as WebRequest
from flask import Response as WebResponse
from flask import jsonify, request, send_file
from flask_cors import CORS, cross_origin
from flask_socketio import SocketIO
from lpot.ux.utils.exceptions import (
AccessDeniedException,
ClientErrorException,
NotFoundException,
)
from lpot.ux.utils.utils import determine_ip, is_development_env, verify_file_path
from lpot.ux.web.communication import MessageQueue, Request
from lpot.ux.web.router import Router
allowed_origin = r"http://{}:*".format(determine_ip())
if is_development_env():
allowed_origin = "*"
app = Flask(__name__, static_url_path="")
CORS(app, origins=allowed_origin)
socketio = SocketIO(app, max_http_buffer_size=2000)
router = Router()
METHODS = ["GET", "POST"]
# Suppress TensorFlow messages
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def run_server(addr: str, port: int, token: str) -> None:
"""Run webserver on specified scheme, address and port."""
app.secret_key = token
socketio.init_app(app, cors_allowed_origins=f"http://{addr}:{port}")
cors_allowed_origins = f"http://{addr}:{port}"
if is_development_env():
cors_allowed_origins = "*"
socketio.init_app(app, cors_allowed_origins=cors_allowed_origins)
socketio.run(app, host=addr, port=port)
@app.after_request
def block_iframe(response: WebResponse) -> WebResponse:
"""Block iframe and set others CSP."""
response.headers["X-Frame-Options"] = "DENY"
response.headers[
"Content-Security-Policy"
] = "frame-ancestors 'none'; font-src 'self'; img-src 'self'; script-src 'self'"
response.headers["Access-Control-Max-Age"] = "-1"
return response
@app.after_request
def block_sniffing(response: WebResponse) -> WebResponse:
"""Block MIME sniffing."""
response.headers["X-Content-Type-Options"] = "nosniff"
return response
def require_api_token(func: Callable) -> Any:
"""Validate authorization token."""
@wraps(func)
def check_token(*args: str, **kwargs: str) -> Any:
"""Validate that correct token was provided."""
provided_token = request.headers.get(
"Authorization",
request.args.to_dict().get("token", None),
)
if not app.secret_key == provided_token:
return (
"Invalid token, please use the URL displayed by the server on startup",
403,
)
return func(*args, **kwargs)
return check_token
@app.route("/", methods=METHODS)
def root() -> Any:
"""Serve JS application index."""
return app.send_static_file("index.html")
@app.route("/file/<path:path>", methods=METHODS)
@cross_origin(origins=allowed_origin)
@require_api_token
def serve_from_filesystem(path: str) -> Any:
"""Serve any file from filesystem."""
try:
absolute_path = f"/{path}"
verify_file_path(absolute_path)
return send_file(absolute_path, as_attachment=True, cache_timeout=0)
except NotFoundException as err:
return str(err), 404
except AccessDeniedException as err:
return str(err), 403
@app.route("/api/<path:subpath>", methods=METHODS)
@cross_origin(origins=allowed_origin)
@require_api_token
def handle_api_call(subpath: str) -> Any:
"""Handle API access."""
try:
parameters = build_parameters(subpath, request)
response = router.handle(parameters)
return jsonify(response.data)
except ClientErrorException as err:
return str(err), 400
except AccessDeniedException as err:
return str(err), 403
except NotFoundException as err:
return str(err), 404
@app.route("/api/<path:subpath>", methods=["OPTIONS"])
@cross_origin(origins=allowed_origin)
def allow_api_call(subpath: str) -> Any:
"""Allow for API access."""
return "OK"
@app.errorhandler(404)
def page_not_found(e: Any) -> Any:
"""Serve JS application index when no static file found."""
return app.send_static_file("index.html")
@app.after_request
def disable_cache(response: WebResponse) -> WebResponse:
"""Disable cache on all requests."""
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "0"
response.headers["Cache-Control"] = "public, max-age=0"
return response
def build_parameters(endpoint: str, request: WebRequest) -> Request:
"""Build domain object from flask request."""
data = request.get_json() if request.is_json else request.args.to_dict(flat=False)
return Request(request.method, endpoint, data)
def web_socket_publisher(web_socket: SocketIO) -> None:
"""Send messages from queue via web-socket to GUI."""
queue = MessageQueue()
while True:
message = queue.get()
web_socket.emit(
message.subject,
{"status": message.status, "data": message.data},
broadcast=True,
)
publisher = Thread(
target=web_socket_publisher,
args=(socketio,),
)
publisher.daemon = True
publisher.start()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,664
|
SnehalA/lpot
|
refs/heads/master
|
/test/test_adaptor_onnxrt.py
|
import os
import shutil
import unittest
import torch
import torchvision
import yaml
import onnx
from lpot.adaptor import FRAMEWORKS
def build_static_yaml():
fake_yaml = """
model:
name: imagenet
framework: onnxrt_qlinearops
quantization:
approach: post_training_static_quant
calibration:
sampling_size: 50
op_wise: {
'Gather_*': {
'activation': {'dtype': ['fp32'], 'scheme':['sym']},
'weight': {'dtype': ['fp32'], 'scheme':['sym']}
}
}
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
"""
with open("static_yaml.yaml", "w", encoding="utf-8") as f:
f.write(fake_yaml)
def build_dynamic_yaml():
fake_yaml = """
model:
name: imagenet
framework: onnxrt_integerops
quantization:
approach: post_training_dynamic_quant
calibration:
sampling_size: 50
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
"""
with open("dynamic_yaml.yaml", "w", encoding="utf-8") as f:
f.write(fake_yaml)
def build_non_MSE_yaml():
fake_yaml = """
model:
name: imagenet
framework: onnxrt_qlinearops
quantization:
approach: post_training_static_quant
calibration:
sampling_size: 50
op_wise: {
'Gather_*': {
'activation': {'dtype': ['fp32'], 'scheme':['sym']},
'weight': {'dtype': ['fp32'], 'scheme':['sym']}
}
}
evaluation:
accuracy:
metric:
MSE:
compare_label: False
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.1
exit_policy:
timeout: 0
random_seed: 9527
"""
with open("non_MSE_yaml.yaml", "w", encoding="utf-8") as f:
f.write(fake_yaml)
def eval_func(model):
return 1.0
def export_onnx_model(model, path):
x = torch.randn(100, 3, 224, 224, requires_grad=True)
torch_out = model(x)
# Export the model
torch.onnx.export(model, # model being run
x, # model input (or a tuple for multiple inputs)
path, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=12, # the ONNX version to export the model to, please ensure at least 11.
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ["input"], # the model"s input names
output_names = ["output"], # the model"s output names
dynamic_axes={"input" : {0 : "batch_size"}, # variable lenght axes
"output" : {0 : "batch_size"}})
class TestAdaptorONNXRT(unittest.TestCase):
mb_v2_export_path = "mb_v2.onnx"
mb_v2_model = torchvision.models.mobilenet_v2()
rn50_export_path = "rn50.onnx"
rn50_model = torchvision.models.resnet50()
@classmethod
def setUpClass(self):
build_static_yaml()
build_dynamic_yaml()
build_non_MSE_yaml()
export_onnx_model(self.mb_v2_model, self.mb_v2_export_path)
self.mb_v2_model = onnx.load(self.mb_v2_export_path)
export_onnx_model(self.rn50_model, self.rn50_export_path)
self.rn50_model = onnx.load(self.rn50_export_path)
@classmethod
def tearDownClass(self):
os.remove("static_yaml.yaml")
os.remove("dynamic_yaml.yaml")
os.remove("non_MSE_yaml.yaml")
os.remove(self.mb_v2_export_path)
os.remove(self.rn50_export_path)
shutil.rmtree("./saved", ignore_errors=True)
shutil.rmtree("runs", ignore_errors=True)
def test_adaptor(self):
framework_specific_info = {"device": "cpu",
"approach": "post_training_static_quant",
"random_seed": 1234,
"q_dataloader": None,
"backend": "qlinearops",
"workspace_path": './lpot_workspace/{}/{}/'.format(
'onnxrt',
'imagenet')}
framework = "onnxrt_qlinearops"
_ = FRAMEWORKS[framework](framework_specific_info)
def test_quantizate(self):
from lpot import Quantization, common
for fake_yaml in ["static_yaml.yaml", "dynamic_yaml.yaml"]:
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset("dummy", (100, 3, 224, 224), low=0., high=1., label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(self.rn50_model)
q_model = quantizer()
eval_func(q_model)
for fake_yaml in ["non_MSE_yaml.yaml"]:
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset("dummy", (100, 3, 224, 224), low=0., high=1., label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(self.mb_v2_model)
q_model = quantizer()
eval_func(q_model)
if __name__ == "__main__":
unittest.main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,665
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/ux/components/benchmark/execute_benchmark.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Execute benchmark."""
import json
import os
from typing import Any, Dict
from lpot.ux.components.benchmark.benchmark import Benchmark
from lpot.ux.utils.exceptions import ClientErrorException
from lpot.ux.utils.executor import Executor
from lpot.ux.utils.logger import log
from lpot.ux.utils.parser import Parser
from lpot.ux.utils.templates.workdir import Workdir
from lpot.ux.utils.utils import load_json
from lpot.ux.web.communication import MessageQueue
mq = MessageQueue()
def execute_benchmark(data: Dict[str, Any]) -> None:
"""
Execute benchmark.
Expected data:
{
"id": "configuration_id",
"workspace_path": "/path/to/workspace",
"models": [
{
"precision": "fp32",
"path": "/localdisk/fp32.pb"
},
{
"precision": "int8",
"path": "/localdisk/int8.pb"
}
]
}
"""
from lpot.ux.utils.workload.workload import Workload
request_id = str(data.get("id", ""))
models = data.get("models", None)
if not (request_id and models):
message = "Missing request id or model list."
mq.post_error(
"benchmark_finish",
{"message": message, "code": 404, "id": request_id},
)
raise ClientErrorException(message)
workdir = Workdir(request_id=request_id, overwrite=False)
try:
workload_path = workdir.workload_path
workload_data = load_json(
os.path.join(workload_path, "workload.json"),
)
except Exception as err:
mq.post_error(
"benchmark_finish",
{"message": repr(err), "code": 404, "id": request_id},
)
raise ClientErrorException(repr(err))
workload = Workload(workload_data)
response_data: Dict[str, Any] = {"id": request_id, "execution_details": {}}
mq.post_success(
"benchmark_start",
{
"message": "started",
"id": request_id,
},
)
for idx, model_info in enumerate(models, start=1):
model_precision = model_info.get("precision", None)
model_path = model_info.get("path", None)
benchmark_mode = model_info.get("mode", "performance")
if not (model_precision and model_path):
message = "Missing model precision or model path."
mq.post_error(
"benchmark_finish",
{"message": message, "code": 404, "id": request_id},
)
raise ClientErrorException(message)
benchmark: Benchmark = Benchmark(
workload=workload,
model_path=model_path,
datatype=model_precision,
mode=benchmark_mode,
)
log_name = f"{model_precision}_{benchmark_mode}_benchmark"
executor = Executor(
workload_path,
subject="benchmark",
data={"id": request_id},
send_response=False,
log_name=log_name,
additional_log_names=["output.txt"],
)
proc = executor.call(
benchmark.command,
)
logs = [os.path.join(workload_path, f"{log_name}.txt")]
if proc.is_ok:
parser = Parser(logs)
metrics = parser.process()
metric = {}
execution_details: Dict[str, Any] = {}
throughput_field = f"perf_throughput_{model_precision}"
if isinstance(metrics, dict):
metric = {throughput_field: metrics.get(throughput_field, "")}
execution_details = {
f"{model_precision}_benchmark": benchmark.serialize(),
}
response_data.update({"progress": f"{idx}/{len(models)}"})
response_data.update(metric)
response_data["execution_details"].update(execution_details)
workdir.update_metrics(
request_id=request_id,
metric_data=metric,
)
workdir.update_execution_details(
request_id=request_id,
execution_details=execution_details,
)
log.debug(f"Parsed data is {json.dumps(response_data)}")
mq.post_success("benchmark_progress", response_data)
else:
log.error("Benchmark failed.")
mq.post_failure("benchmark_finish", {"message": "failed", "id": request_id})
raise ClientErrorException("Benchmark failed during execution.")
mq.post_success("benchmark_finish", response_data)
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,666
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/data/transforms/coco_transform.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from lpot.utils.utility import LazyImport
from .transform import transform_registry, BaseTransform
tf = LazyImport('tensorflow')
@transform_registry(transform_type="ParseDecodeCoco", \
process="preprocess", framework="tensorflow")
class ParseDecodeCocoTransform(BaseTransform):
def __call__(self, sample):
# Dense features in Example proto.
feature_map = {
'image/encoded':
tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/class/text':
tf.compat.v1.VarLenFeature(dtype=tf.string),
'image/object/class/label':
tf.compat.v1.VarLenFeature(dtype=tf.int64),
'image/source_id':tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=''),
}
sparse_float32 = tf.compat.v1.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update({
k: sparse_float32
for k in [
'image/object/bbox/xmin', 'image/object/bbox/ymin',
'image/object/bbox/xmax', 'image/object/bbox/ymax'
]
})
features = tf.io.parse_single_example(sample, feature_map)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
encoded_image = features['image/encoded']
image_tensor = tf.image.decode_image(encoded_image, channels=3)
image_tensor.set_shape([None, None, 3])
str_label = features['image/object/class/text'].values
int_label = features['image/object/class/label'].values
image_id = features['image/source_id']
return image_tensor, (bbox[0], str_label, int_label, image_id)
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,667
|
SnehalA/lpot
|
refs/heads/master
|
/lpot/adaptor/ox_utils/onnx_calibrate.py
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
# Copyright (c) Microsoft, Intel Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import abc
import copy
import logging
import itertools
import numpy as np
import onnx
import onnxruntime
from onnxruntime.quantization.operators.base_operator import QuantOperatorBase
from onnxruntime.quantization.quant_utils import __producer__, __version__, attribute_to_kwarg, \
QuantizationMode, QuantizedValue, QuantizedValueType
from onnx import helper, TensorProto, numpy_helper
from onnx import onnx_pb as onnx_proto
logger = logging.getLogger()
class CalibrationDataReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'get_next') and callable(subclass.get_next) or NotImplemented)
@abc.abstractmethod
def get_next(self) -> dict:
"""generate the input data dict for ONNXinferenceSession run"""
raise NotImplementedError
class ONNXCalibrater:
def __init__(self, model, data_reader: CalibrationDataReader, calibrate_op_types, black_nodes,
white_nodes, augmented_model_path, iterations=1):
'''
:param model: ONNX model to calibrate
:param data_reader: user implemented object to read in and preprocess calibration dataset
based on CalibrationDataReader Interface
:param op_types: operator types to be calibrated and quantized, default = 'Conv,MatMul'
:param black_nodes: operator names that should not be quantized, default = ''
:param white_nodes: operator names that force to be quantized, default = ''
:param augmented_model_path: save augmented_model to this path
'''
self.model = model
self.data_reader = data_reader
self.calibrate_op_types = calibrate_op_types
self.black_nodes = black_nodes
self.white_nodes = white_nodes
self.augmented_model_path = augmented_model_path
self.input_name_to_nodes = {}
self.iterations = iterations
def augment_graph(self):
'''
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
'''
model = copy.deepcopy(self.model)
added_nodes = []
added_outputs = []
tensors_to_calibrate = set()
for node in model.graph.node: # pylint: disable=no-member
should_be_calibrate = ((node.op_type in self.calibrate_op_types) and
(node.name not in self.black_nodes)) or \
(node.name in self.white_nodes)
if should_be_calibrate:
if node.op_type == "Attention":
logger.debug("indice input {} of attention node {} can't be calibrated"
.format(node.input[-1], node.name))
tensors_to_calibrate.update(node.input[:-1])
elif node.op_type == "Gather":
logger.debug("indice input {} of gather node {} can't be calibrated"
.format(node.input[-1], node.name))
tensors_to_calibrate.update(node.input[:-1])
else:
tensors_to_calibrate.update(node.input)
tensors_to_calibrate.update(node.output)
for tensor in tensors_to_calibrate:
if tensor in model.graph.initializer: # pylint: disable=no-member
tensors_to_calibrate.remove(tensor)
# If augmenting all ops, it's possible that some nodes' input value are 0.
# Can't reduce on dim with value of 0 if 'keepdims' is false, therefore set keepdims to 1.
if self.calibrate_op_types:
keepdims_value = 0
else:
keepdims_value = 1
for tensor in tensors_to_calibrate:
# Adding ReduceMin nodes
reduce_min_name = tensor + '_ReduceMin'
reduce_min_node = onnx.helper.make_node('ReduceMin', [tensor], [tensor + '_ReduceMin'],
reduce_min_name,
keepdims=keepdims_value)
added_nodes.append(reduce_min_node)
added_outputs.append(helper.make_tensor_value_info(reduce_min_node.output[0],
TensorProto.FLOAT, ()))
# Adding ReduceMax nodes
reduce_max_name = tensor + '_ReduceMax'
reduce_max_node = onnx.helper.make_node('ReduceMax', [tensor], [tensor + '_ReduceMax'],
reduce_max_name,
keepdims=keepdims_value)
added_nodes.append(reduce_max_node)
added_outputs.append(helper.make_tensor_value_info(reduce_max_node.output[0],
TensorProto.FLOAT, ()))
model.graph.node.extend(added_nodes) # pylint: disable=no-member
model.graph.output.extend(added_outputs) # pylint: disable=no-member
return model
# Using augmented outputs to generate inputs for quantization
def get_intermediate_outputs(self, calib_mode='naive'):
'''
Gather intermediate model outputs after running inference
parameter calib_mode: type 'naive' gives (ReduceMin, ReduceMax) pairs
for each augmented node across test data sets, where
the first element is a minimum of all ReduceMin values
and the second element is a maximum of all ReduceMax
values;
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
'''
# conduct inference session and get intermediate outputs
session = onnxruntime.InferenceSession(self.augmented_model_path, None)
intermediate_outputs = []
for idx, batch in enumerate(self.data_reader):
# batch = tuple(t.detach().cpu().numpy() for t in batch)
ort_inputs = {}
for i in range(len(session.get_inputs())):
ort_inputs.update({session.get_inputs()[i].name: batch[i]})
intermediate_outputs.append(session.run(None, ort_inputs))
if idx >= self.iterations - 1:
break
node_output_names = [session.get_outputs()[i].name for i in
range(len(intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(node_output_names, intermediate_output)) for intermediate_output in
intermediate_outputs
]
# number of outputs in original model
model = self.model
num_model_outputs = len(model.graph.output)
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
added_node_output_names = node_output_names[num_model_outputs:]
node_names = [added_node_output_names[i].rpartition('_')[0]
for i in range(0, len(added_node_output_names), 2)] # output names
# Characterizing distribution of a node's values across test data sets
clean_merged_dict = dict((i, merged_dict[i]) for i in merged_dict
if i != list(merged_dict.keys())[0])
if calib_mode == 'naive':
pairs = [
tuple([
float(min(clean_merged_dict[added_node_output_names[i]])),
float(max(clean_merged_dict[added_node_output_names[i + 1]]))
]) for i in range(0, len(added_node_output_names), 2)
]
else:
raise ValueError('Unknown value for calib_mode. \
Currently only naive mode is supported.')
final_dict = dict(zip(node_names, pairs))
return final_dict
def _get_input_name_to_nodes(self, model):
'''
Helper function to get input_name_to_nodes dictionary
'''
for node in model.graph.node:
for input_name in node.input:
if input_name not in self.input_name_to_nodes:
self.input_name_to_nodes[input_name] = [node]
else:
self.input_name_to_nodes[input_name].append(node)
def _get_output_name_to_nodes(self, model):
'''
Helper function to get output_name_to_nodes dictionary
'''
self.output_name_to_nodes = {}
for node in model.graph.node:
for output_name in node.output:
if output_name not in self.output_name_to_nodes:
self.output_name_to_nodes[output_name] = [node]
else:
self.output_name_to_nodes[output_name].append(node)
def _get_node_from_name(self, name):
'''
Helper function to get node from name
'''
for node in self.model.graph.node:
if node.name == name:
return node
else:
return None
def calculate_scale_zeropoint(self, last_node, next_node, rmin, rmax):
zp_and_scale = []
# adjust rmin and rmax such that 0 is included in the range. This is required
# to make sure zero can be uniquely represented.
rmin = min(rmin, 0)
rmax = max(rmax, 0)
if next_node:
if next_node.op_type == 'Relu':
if rmin < 0:
rmin = 0
if last_node:
if last_node.op_type in ['Conv', 'FusedConv']:
attrs = [attr for attr in last_node.attribute]
attrs_names = [attr.name for attr in last_node.attribute]
if 'activation' in attrs_names:
if attrs[attrs_names.index('activation')].s == b'Relu':
rmin = max(rmin, 0)
if attrs[attrs_names.index('activation')].s == b'Clip':
assert 'activation_params' in attrs_names, "the model contains no \
params for clip node \
{}".format(last_node)
clip_params = attrs[attrs_names.index('activation_params')].floats
rmin = min(rmin, clip_params[0], clip_params[1])
rmax = max(rmax, clip_params[0], clip_params[1])
scale = np.float32((rmax - rmin) / 255 if rmin != rmax else 1)
initial_zero_point = (0 - rmin) / scale
zero_point = np.uint8(round(max(0, min(255, initial_zero_point))))
zp_and_scale.append(zero_point)
zp_and_scale.append(scale)
return zp_and_scale
def calculate_quantization_params(self, quantization_thresholds):
'''
Given quantization thresholds, calculate the quantization params.
:param quantization_thresholds:
Dictionary specifying the min and max values for outputs of conv and matmul nodes.
The quantization_thresholds should be specified in the following format:
{
"param_name": [min, max]
}
example:
{
'Conv_3:0': [np.float32(0), np.float32(0.5)],
'Conv_4:0': [np.float32(1), np.float32(3.5)]
}
:return: Dictionary containing the zero point and
scale values for outputs of conv and matmul nodes.
The dictionary format is
{
"param_name": [zero_point, scale]
}
'''
if quantization_thresholds is None:
raise ValueError(
'quantization thresholds is required to calculate quantization \
params (zero point and scale)')
quantization_params = {}
model = self.model
self._get_input_name_to_nodes(model)
self._get_output_name_to_nodes(model)
for tensor_name in quantization_thresholds.keys():
child = None
if tensor_name in self.input_name_to_nodes:
children = self.input_name_to_nodes[tensor_name]
if len(children) == 1:
child = children[0]
parent = None
if tensor_name in self.output_name_to_nodes:
parents = self.output_name_to_nodes[tensor_name]
if len(parents) == 1:
parent = parents[0]
node_thresholds = quantization_thresholds[tensor_name]
node_params = self.calculate_scale_zeropoint(parent, child, node_thresholds[0],
node_thresholds[1])
quantization_params[tensor_name] = node_params
return quantization_params
def calibrate(model,
data_reader: CalibrationDataReader,
op_types=['Conv', 'MatMul'],
black_nodes=[],
white_nodes=[],
augmented_model_path='augmented_model.onnx',
iterations=1):
'''
Given an onnx model, augment and run the augmented model on calibration data set, \
aggregate and calculate the quantization parameters.
:param model: ONNX model to calibrate
:param data_reader: user implemented object to read in and preprocess calibration dataset \
based on CalibrationDataReader interface
:param op_types: operator types to be calibrated and quantized, default = 'Conv,MatMul'
:param black_nodes: operator names that should not be quantized, default = ''
:param white_nodes: operator names that force to be quantized, default = ''
:param augmented_model_path: save augmented_model to this path
'''
# 1. initialize a calibrater
calibrater = ONNXCalibrater(model, data_reader, op_types, black_nodes, white_nodes,
augmented_model_path, iterations)
# 2. augment
augmented_model = calibrater.augment_graph()
onnx.save(augmented_model, augmented_model_path)
# 3. generate quantization thresholds
dict_for_quantization = calibrater.get_intermediate_outputs()
# 4. generate quantization parameters dict
quantization_params_dict = calibrater.calculate_quantization_params(dict_for_quantization)
print("Calibrated,quantized parameters calculated and returned.")
return quantization_params_dict
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,668
|
SnehalA/lpot
|
refs/heads/master
|
/test/test_adaptor_pytorch.py
|
import torch
import torch.nn.quantized as nnq
from torch.quantization import QuantStub, DeQuantStub
import torchvision
import unittest
import os
from lpot.adaptor import FRAMEWORKS
from lpot.model import MODELS
import lpot.adaptor.pytorch as lpot_torch
from lpot import Quantization, common
import shutil
import copy
import numpy as np
try:
import intel_pytorch_extension as ipex
TEST_IPEX = True
except:
TEST_IPEX = False
def build_ptq_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['asym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['asym']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
with open('ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
def build_ipex_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch_ipex
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
with open('ipex_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
def build_dump_tensors_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
tensorboard: true
'''
with open('dump_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
def build_qat_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: quant_aware_training
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['asym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['asym']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
with open('qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
def eval_func(model):
# switch to evaluate mode
model.eval()
with torch.no_grad():
input = torch.randn(10, 3, 224, 224)
# compute output
output = model(input)
return 0.0
def q_func(model):
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
# switch to evaluate mode
model.train()
input = torch.randn(1, 3, 224, 224)
# compute output
output = model(input)
loss = output.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return
@unittest.skipIf(TEST_IPEX, "TODO: Please wait to IPEX + PyTorch1.7 release")
class TestPytorchAdaptor(unittest.TestCase):
framework_specific_info = {"device": "cpu",
"approach": "post_training_static_quant",
"random_seed": 1234,
"q_dataloader": None}
framework = "pytorch"
adaptor = FRAMEWORKS[framework](framework_specific_info)
model = torchvision.models.quantization.resnet18()
lpot_model = MODELS['pytorch'](model)
@classmethod
def setUpClass(self):
build_ptq_yaml()
build_qat_yaml()
build_dump_tensors_yaml()
@classmethod
def tearDownClass(self):
os.remove('ptq_yaml.yaml')
os.remove('qat_yaml.yaml')
os.remove('dump_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_get_all_weight_name(self):
assert len(list(self.lpot_model.get_all_weight_names())) == 62
def test_get_weight(self):
for name, param in self.model.named_parameters():
if name == "layer4.1.conv2.weight":
param.data.fill_(0.0)
if name == "fc.bias":
param.data.fill_(0.1)
assert int(torch.sum(self.lpot_model.get_weight("layer4.1.conv2.weight"))) == 0
assert torch.allclose(
torch.sum(
self.lpot_model.get_weight("fc.bias")),
torch.tensor(100.))
def test_update_weights(self):
self.lpot_model.update_weights("fc.bias", torch.zeros([1000]))
assert int(torch.sum(self.lpot_model.get_weight("fc.bias"))) == 0
def test_report_sparsity(self):
df, total_sparsity = self.lpot_model.report_sparsity()
self.assertTrue(total_sparsity > 0)
self.assertTrue(len(df) == 22)
def test_quantization_saved(self):
from lpot.utils.pytorch import load
model = copy.deepcopy(self.model)
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
if fake_yaml == 'ptq_yaml.yaml':
model.eval().fuse_model()
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True)
quantizer.model = common.Model(model)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
q_model = quantizer()
q_model.save('./saved')
# Load configure and weights by lpot.utils
saved_model = load("./saved", model)
eval_func(saved_model)
from lpot import Benchmark
evaluator = Benchmark('ptq_yaml.yaml')
# Load configure and weights by lpot.model
evaluator.model = common.Model(model)
evaluator.b_dataloader = common.DataLoader(dataset)
results = evaluator()
evaluator.model = common.Model(model)
fp32_results = evaluator()
self.assertTrue((fp32_results['accuracy'][0] - results['accuracy'][0]) < 0.01)
def test_tensor_dump(self):
model = copy.deepcopy(self.lpot_model)
model.model.eval().fuse_model()
quantizer = Quantization('dump_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True)
quantizer.model = common.Model(model.model)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
quantizer()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
def test_floatfunctions_fallback(self):
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super(ModelWithFunctionals, self).__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
self.my_scalar_add = nnq.FloatFunctional()
self.mymul = nnq.FloatFunctional()
self.my_scalar_mul = nnq.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
w = self.my_scalar_add.add_scalar(w, -0.5)
w = self.mymul.mul(w, w)
w = self.my_scalar_mul.mul_scalar(w, 0.5)
w = self.dequant(w)
return w
model = ModelWithFunctionals()
model = MODELS['pytorch'](model)
x = torch.rand(10, 1, dtype=torch.float)
y = model.model(x)
fallback_ops = []
q_capability = self.adaptor.query_fw_capability(model)
for k, v in q_capability["opwise"].items():
if k[0] != "quant":
fallback_ops.append(k[0])
model.model.qconfig = torch.quantization.default_qconfig
model.model.quant.qconfig = torch.quantization.default_qconfig
lpot_torch._fallback_quantizable_ops_recursively(model.model, '', fallback_ops)
torch.quantization.add_observer_(model.model)
model.model(x)
torch.quantization.convert(model.model, self.adaptor.q_mapping, inplace=True)
qy = model.model(x)
tol = {'atol': 1e-01, 'rtol': 1e-03}
self.assertTrue(np.allclose(y, qy, **tol))
@unittest.skipIf(not TEST_IPEX, "Unsupport Intel PyTorch Extension")
class TestPytorchIPEXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_ipex_yaml()
@classmethod
def tearDownClass(self):
os.remove('ipex_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_tuning_ipex(self):
from lpot import Quantization
model = torchvision.models.resnet18()
model = MODELS['pytorch_ipex'](model)
quantizer = Quantization('ipex_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True)
quantizer.model = common.Model(model)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
lpot_model = quantizer()
lpot_model.save("./saved")
new_model = MODELS['pytorch_ipex'](model.model, {"workspace_path": "./saved"})
new_model.model.to(ipex.DEVICE)
try:
script_model = torch.jit.script(new_model.model)
except:
script_model = torch.jit.trace(new_model.model, torch.randn(10, 3, 224, 224).to(ipex.DEVICE))
from lpot import Benchmark
evaluator = Benchmark('ipex_yaml.yaml')
evaluator.model = common.Model(script_model)
evaluator.b_dataloader = common.DataLoader(dataset)
results = evaluator()
if __name__ == "__main__":
unittest.main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,669
|
SnehalA/lpot
|
refs/heads/master
|
/test/test_transform.py
|
"""Tests for the transform module."""
import numpy as np
import random
import unittest
import os
from lpot.data import TRANSFORMS, DATALOADERS
from lpot.utils.create_obj_from_config import get_postprocess, create_dataset
from lpot.utils.utility import LazyImport
from PIL import Image
mx = LazyImport('mxnet')
tf = LazyImport('tensorflow')
torch = LazyImport('torch')
torchvision = LazyImport('torchvision')
random.seed(1)
np.random.seed(1)
class TestMetrics(unittest.TestCase):
def test_tensorflow_2(self):
image = np.ones([1, 256, 256, 1])
resize_kwargs = {"size":[224, 224]}
transforms = TRANSFORMS(framework="tensorflow", process="preprocess")
resize = transforms['Resize'](**resize_kwargs)
random_crop_kwargs = {"size": 128}
random_crop = transforms['RandomCrop'](**random_crop_kwargs)
transform_list = [resize, random_crop]
compose = transforms['Compose'](transform_list)
image_result = compose((image, None))
self.assertEqual(image_result[0].shape, (1, 128, 128, 1))
class TestONNXQLImagenetTransform(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.img = np.random.random_sample([600,600,3])*255
def testResizeCropImagenetTransform(self):
transforms = TRANSFORMS('onnxrt_qlinearops', "preprocess")
transform = transforms['ResizeCropImagenet'](height=224, width=224)
sample = (self.img, 0)
result = transform(sample)
resized_input = result[0]
self.assertEqual(len(resized_input), 3)
self.assertEqual(len(resized_input[0]), 224)
self.assertEqual(len(resized_input[0][0]), 224)
class TestONNXITImagenetTransform(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.img = np.random.random_sample([600,600,3])*255
def testResizeCropImagenetTransform(self):
transforms = TRANSFORMS('onnxrt_integerops', "preprocess")
transform = transforms['ResizeCropImagenet'](height=224, width=224)
sample = (self.img, 0)
result = transform(sample)
resized_input = result[0]
self.assertEqual(len(resized_input), 3)
self.assertEqual(len(resized_input[0]), 224)
self.assertEqual(len(resized_input[0][0]), 224)
class TestTensorflowImagenetTransform(unittest.TestCase):
tf.compat.v1.disable_v2_behavior()
def testBilinearImagenetTransform(self):
transforms = TRANSFORMS('tensorflow', "preprocess")
transform = transforms['BilinearImagenet'](height=224, width=224)
rand_input = np.random.random_sample([600,600,3]).astype(np.float32)
sample = (rand_input, 0)
result = transform(sample)
resized_input = result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(len(resized_input), 224)
self.assertEqual(len(resized_input[0]), 224)
self.assertEqual(len(resized_input[0][0]), 3)
def testResizeCropImagenetTransform(self):
transforms = TRANSFORMS('tensorflow', "preprocess")
transform = transforms['ResizeCropImagenet'](height=224, width=224)
rand_input = np.random.random_sample([600,600,3]).astype(np.float32)
sample = (rand_input, 0)
result = transform(sample)
resized_input = result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(len(resized_input), 224)
self.assertEqual(len(resized_input[0]), 224)
self.assertEqual(len(resized_input[0][0]), 3)
def testLabelShift(self):
transforms = TRANSFORMS('tensorflow', "postprocess")
transform = transforms['LabelShift'](label_shift=1)
rand_input = np.random.random_sample([600,600,3]).astype(np.float32)
sample = (rand_input, 1001)
label = transform(sample)[1]
self.assertEqual(label, 1000)
def testQuantizedInput(self):
transforms = TRANSFORMS('tensorflow', "preprocess")
transform = transforms['QuantizedInput'](dtype='uint8', scale=100)
rand_input = np.random.random_sample([600,600,3]).astype(np.float32)
sample = (rand_input, 1001)
result = transform(sample)
quantized_input = result[0].eval(session=tf.compat.v1.Session())
self.assertLessEqual(quantized_input.max(), 255)
self.assertGreaterEqual(quantized_input.min(), 0)
class TestDataConversion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.img = np.random.random_sample([10,10,3])*255
cls.mx_trans = TRANSFORMS('mxnet', 'preprocess')
cls.pt_trans = TRANSFORMS('pytorch', 'preprocess')
def testToPILImage(self):
trans = TestDataConversion.pt_trans['ToPILImage']()
image, _ = trans((TestDataConversion.img.astype(np.uint8), None))
self.assertTrue(isinstance(image, Image.Image))
def testToTensor(self):
trans = TestDataConversion.pt_trans['ToTensor']()
image, _ = trans((TestDataConversion.img.astype(np.uint8), None))
self.assertTrue(isinstance(image, torch.Tensor))
trans = TestDataConversion.mx_trans['ToTensor']()
image, _ = trans((mx.nd.array(TestDataConversion.img), None))
self.assertTrue(isinstance(image, mx.ndarray.NDArray)) # pylint: disable=no-member
def testToNDArray(self):
trans = TestDataConversion.mx_trans['ToNDArray']()
image, _ = trans((TestDataConversion.img.astype(np.uint8), None))
self.assertTrue(isinstance(image, mx.ndarray.NDArray))
class TestSameTransfoms(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.img = np.random.random_sample([10,10,3])*255
cls.tf_trans = TRANSFORMS('tensorflow', 'preprocess')
cls.pt_trans = TRANSFORMS('pytorch', 'preprocess')
cls.mx_trans = TRANSFORMS('mxnet', 'preprocess')
cls.ox_trans = TRANSFORMS('onnxrt_qlinearops', 'preprocess')
cls.mx_img = mx.nd.array(cls.img.astype(np.uint8))
cls.pt_img = Image.fromarray(cls.img.astype(np.uint8))
_ = TRANSFORMS('tensorflow', 'postprocess')
_ = TRANSFORMS('pytorch', 'postprocess')
_ = TRANSFORMS('mxnet', 'postprocess')
_ = TRANSFORMS('onnxrt_qlinearops' , 'postprocess')
_ = TRANSFORMS('onnxrt_integerops', 'postprocess')
def testCast(self):
args = {'dtype': 'int64'}
tf_func = TestSameTransfoms.tf_trans['Cast'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result[0][0][0].dtype, 'int64')
def testNormalize(self):
args = {}
normalize = TestSameTransfoms.pt_trans['Normalize'](**args)
totensor = TestSameTransfoms.pt_trans['ToTensor']()
pt_func = TestSameTransfoms.pt_trans['Compose']([totensor, normalize])
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
self.assertEqual(TestSameTransfoms.img.astype(
np.uint8)[0][0][0]/255., pt_result[0][0][0])
args = {'std': [0.]}
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['Normalize'](**args)
def testRescale(self):
ox_func = TestSameTransfoms.ox_trans['Rescale']()
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertAlmostEqual(ox_result[1][2][0], TestSameTransfoms.img[1][2][0]/255.)
def testTranspose(self):
args = {'perm': [2, 0, 1]}
tf_func = TestSameTransfoms.tf_trans['Transpose'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
ox_func = TestSameTransfoms.ox_trans['Transpose'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
mx_func = TestSameTransfoms.mx_trans['Transpose'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (3,10,10))
self.assertEqual(ox_result.shape, (3,10,10))
self.assertEqual(mx_result.shape, (3,10,10))
def testCenterCrop(self):
args = {'size':[4,4]}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['CenterCrop'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CenterCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4,4,3))
self.assertEqual(pt_result.size, (4,4))
self.assertEqual(mx_result.shape, (4,4,3))
self.assertEqual(np.array(pt_result)[0][0][0], mx_result.asnumpy()[0][0][0])
self.assertEqual(np.array(pt_result)[0][0][0], int(tf_result[0][0][0]))
args = {'size':4}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['CenterCrop'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CenterCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4,4,3))
self.assertEqual(pt_result.size, (4,4))
self.assertEqual(mx_result.shape, (4,4,3))
self.assertEqual(np.array(pt_result)[0][0][0], mx_result.asnumpy()[0][0][0])
self.assertEqual(np.array(pt_result)[0][0][0], int(tf_result[0][0][0]))
args = {'size':[4]}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (4,4,3))
tf_result = tf_func((np.array([TestSameTransfoms.img]), None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (1,4,4,3))
with self.assertRaises(ValueError):
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((np.array([[TestSameTransfoms.img]]), None))
args = {'size':[20]}
with self.assertRaises(ValueError):
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
def testResize(self):
tf_func = TestSameTransfoms.tf_trans['Resize'](**{'size':[4,5]})
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['Resize'](**{'size':[4,5]})
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['Resize'](**{'size':[4,5]})
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4,5,3))
self.assertEqual(pt_result.size, (5,4))
self.assertEqual(mx_result.shape, (4,5,3))
args = {'size': 4}
tf_func = TestSameTransfoms.tf_trans['Resize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['Resize'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['Resize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4,4,3))
self.assertEqual(pt_result.size, (4,4))
self.assertEqual(mx_result.shape, (4,4,3))
args = {'size': [4]}
tf_func = TestSameTransfoms.tf_trans['Resize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
mx_func = TestSameTransfoms.mx_trans['Resize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4,4,3))
self.assertEqual(mx_result.shape, (4,4,3))
args = {'size': 4, 'interpolation':'test'}
with self.assertRaises(ValueError):
TestSameTransfoms.tf_trans['Resize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['Resize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.mx_trans['Resize'](**args)
def testRandomResizedCrop(self):
tf_func = TestSameTransfoms.tf_trans['RandomResizedCrop'](**{'size':[4,5]})
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['RandomResizedCrop'](**{'size':[4,5]})
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['RandomResizedCrop'](**{'size':[4,5]})
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4,5,3))
self.assertEqual(pt_result.size, (5,4))
self.assertEqual(mx_result.shape, (4,5,3))
args = {'size': [4]}
tf_func = TestSameTransfoms.tf_trans['RandomResizedCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (4,4,3))
mx_func = TestSameTransfoms.mx_trans['RandomResizedCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(mx_result.shape, (4,4,3))
args = {'size': 4}
tf_func = TestSameTransfoms.tf_trans['RandomResizedCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['RandomResizedCrop'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['RandomResizedCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4,4,3))
self.assertEqual(pt_result.size, (4,4))
self.assertEqual(mx_result.shape, (4,4,3))
args = {'size': 4, 'scale':(0.8, 0.2)}
with self.assertRaises(ValueError):
TestSameTransfoms.tf_trans['RandomResizedCrop'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['RandomResizedCrop'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.mx_trans['RandomResizedCrop'](**args)
args = {'size': 4, 'interpolation':'test'}
with self.assertRaises(ValueError):
TestSameTransfoms.tf_trans['RandomResizedCrop'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['RandomResizedCrop'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.mx_trans['RandomResizedCrop'](**args)
def testCropResize(self):
args = {'x':0, 'y':0, 'width':10, 'height':10, 'size':[5,5]}
tf_func = TestSameTransfoms.tf_trans['CropResize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
mx_func = TestSameTransfoms.mx_trans['CropResize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
ox_func = TestSameTransfoms.ox_trans['CropResize'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['CropResize'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
self.assertEqual(tf_result.shape, (5,5,3))
self.assertEqual(mx_result.shape, (5,5,3))
self.assertEqual(ox_result.shape, (5,5,3))
self.assertEqual(pt_result.size, (5,5))
args = {'x':0, 'y':0, 'width':10, 'height':10, 'size':5}
tf_func = TestSameTransfoms.tf_trans['CropResize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
mx_func = TestSameTransfoms.mx_trans['CropResize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
ox_func = TestSameTransfoms.ox_trans['CropResize'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (5,5,3))
self.assertEqual(mx_result.shape, (5,5,3))
self.assertEqual(ox_result.shape, (5,5,3))
args = {'x':0, 'y':0, 'width':10, 'height':10, 'size':[5]}
tf_func = TestSameTransfoms.tf_trans['CropResize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
mx_func = TestSameTransfoms.mx_trans['CropResize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
ox_func = TestSameTransfoms.ox_trans['CropResize'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (5,5,3))
self.assertEqual(mx_result.shape, (5,5,3))
self.assertEqual(ox_result.shape, (5,5,3))
args = {'x':0, 'y':0, 'width':10, 'height':10, 'size':[5,5]}
tf_func = TestSameTransfoms.tf_trans['CropResize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
mx_func = TestSameTransfoms.mx_trans['CropResize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
ox_func = TestSameTransfoms.ox_trans['CropResize'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (5,5,3))
self.assertEqual(mx_result.shape, (5,5,3))
self.assertEqual(ox_result.shape, (5,5,3))
args = {'x':0, 'y':0, 'width':10, 'height':10, 'size':5, 'interpolation':'test'}
with self.assertRaises(ValueError):
TestSameTransfoms.ox_trans['CropResize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.mx_trans['CropResize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.tf_trans['CropResize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['CropResize'](**args)
def testRandomHorizontalFlip(self):
tf_func = TestSameTransfoms.tf_trans['RandomHorizontalFlip']()
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['RandomHorizontalFlip']()
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['RandomHorizontalFlip']()
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertTrue(
(np.array(TestSameTransfoms.pt_img) == np.array(pt_result)).all() or
(np.fliplr(np.array(TestSameTransfoms.pt_img)) == np.array(pt_result)).all()
)
self.assertTrue(
(TestSameTransfoms.img == tf_result).all() or
(np.fliplr(TestSameTransfoms.img) == tf_result).all()
)
self.assertTrue(
(TestSameTransfoms.mx_img.asnumpy() == mx_result.asnumpy()).all() or
(np.fliplr(TestSameTransfoms.mx_img.asnumpy()) == mx_result.asnumpy()).all()
)
def testRandomVerticalFlip(self):
tf_func = TestSameTransfoms.tf_trans['RandomVerticalFlip']()
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['RandomVerticalFlip']()
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['RandomVerticalFlip']()
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertTrue(
(np.array(TestSameTransfoms.pt_img) == np.array(pt_result)).all() or
(np.flipud(np.array(TestSameTransfoms.pt_img)) == np.array(pt_result)).all()
)
self.assertTrue(
(TestSameTransfoms.img == tf_result).all() or
(np.flipud(TestSameTransfoms.img) == tf_result).all()
)
self.assertTrue(
(TestSameTransfoms.mx_img.asnumpy() == mx_result.asnumpy()).all() or
(np.flipud(TestSameTransfoms.mx_img.asnumpy()) == mx_result.asnumpy()).all()
)
class TestTFTransorm(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.img = np.ones([10,10,3])
cls.transforms = TRANSFORMS('tensorflow', 'preprocess')
def testRandomCrop(self):
args = {'size': [50]}
transform = TestTFTransorm.transforms['RandomCrop'](**args)
self.assertRaises(ValueError, transform, (TestTFTransorm.img, None))
args = {'size': [5, 5]}
transform = TestTFTransorm.transforms['RandomCrop'](**args)
img_result = transform((TestTFTransorm.img, None))[0]
img_result = img_result.eval(session=tf.compat.v1.Session())
self.assertEqual(img_result.shape, (5,5,3))
args = {'size': [10,10]}
transform = TestTFTransorm.transforms['RandomCrop'](**args)
img_result = transform((TestTFTransorm.img, None))[0]
self.assertEqual(img_result.shape, (10,10,3))
def testRescale(self):
transform = TestTFTransorm.transforms['Rescale']()
img_result = transform((TestTFTransorm.img, None))[0]
img_result = img_result.eval(session=tf.compat.v1.Session())
comp_result = np.array(TestTFTransorm.img)/255.
self.assertAlmostEqual(img_result[0][0][0], comp_result[0][0][0], places=5)
def testNormalize(self):
args = {'mean':[0.0,0.0,0.0], 'std':[0.2, 0.5, 0.1]}
normalize = TestTFTransorm.transforms['Normalize'](**args)
img_result = normalize((TestTFTransorm.img, None))[0]
img_result = img_result.eval(session=tf.compat.v1.Session())
comp_result = np.array(TestTFTransorm.img)/[0.2, 0.5, 0.1]
self.assertAlmostEqual(img_result[0][0][0], comp_result[0][0][0], places=5)
self.assertAlmostEqual(img_result[0][0][1], comp_result[0][0][1], places=5)
self.assertAlmostEqual(img_result[0][0][2], comp_result[0][0][2], places=5)
args = {'mean':[0.0,0.0,0.0], 'std':[0, 0, 0]}
with self.assertRaises(ValueError):
TestTFTransorm.transforms["Normalize"](**args)
def testRandomResizedCrop(self):
args = {'size':[50]}
randomresizedcrop = TestTFTransorm.transforms["RandomResizedCrop"](**args)
compose = TestTFTransorm.transforms['Compose']([randomresizedcrop])
image_result = compose((TestTFTransorm.img, None))[0]
image_result = image_result.eval(session=tf.compat.v1.Session())
self.assertEqual(image_result.shape, (50,50,3))
args = {'size':[100, 100]}
randomresizedcrop = TestTFTransorm.transforms["RandomResizedCrop"](**args)
compose = TestTFTransorm.transforms['Compose']([randomresizedcrop])
image_result = compose((TestTFTransorm.img, None))[0]
image_result = image_result.eval(session=tf.compat.v1.Session())
self.assertEqual(image_result.shape, (100,100,3))
args = {'size':[100, 100], 'scale':(0.8, 0.1)}
with self.assertRaises(ValueError):
TestTFTransorm.transforms["RandomResizedCrop"](**args)
class TestAlignImageChannel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.img1 = np.random.random_sample([100,100,3]) * 255
cls.img2 = np.random.random_sample([100,100]) * 255
cls.img3 = np.random.random_sample([100,100,4]) * 255
def testTensorflow(self):
transforms = TRANSFORMS('tensorflow', 'preprocess')
align = transforms['AlignImageChannel'](**{'dim':1})
image, _ = align((TestAlignImageChannel.img1.astype(np.uint8), None))
self.assertEqual(image.shape[-1], 1)
align = transforms['AlignImageChannel'](**{'dim':1})
image, _ = align((TestAlignImageChannel.img2.astype(np.uint8), None))
self.assertEqual(image.shape[-1], 1)
align = transforms['AlignImageChannel'](**{'dim':3})
image, _ = align((TestAlignImageChannel.img3.astype(np.uint8), None))
self.assertEqual(image.shape[-1], 3)
align = transforms['AlignImageChannel'](**{'dim':2})
self.assertRaises(ValueError, align,
(TestAlignImageChannel.img1.astype(np.uint8), None))
with self.assertRaises(ValueError):
transforms['AlignImageChannel'](**{'dim':5})
def testONNX(self):
transforms = TRANSFORMS('onnxrt_qlinearops', 'preprocess')
align = transforms['AlignImageChannel'](**{'dim':1})
image, _ = align((TestAlignImageChannel.img1.astype(np.uint8), None))
self.assertEqual(image.shape[-1], 1)
align = transforms['AlignImageChannel'](**{'dim':1})
image, _ = align((TestAlignImageChannel.img2.astype(np.uint8), None))
self.assertEqual(image.shape[-1], 1)
align = transforms['AlignImageChannel'](**{'dim':3})
image, _ = align((TestAlignImageChannel.img3.astype(np.uint8), None))
self.assertEqual(image.shape[-1], 3)
align = transforms['AlignImageChannel'](**{'dim':2})
self.assertRaises(ValueError, align,
(TestAlignImageChannel.img1.astype(np.uint8), None))
with self.assertRaises(ValueError):
transforms['AlignImageChannel'](**{'dim':5})
class TestToArray(unittest.TestCase):
def testParse(self):
random_array = np.random.random_sample([10,10,3]) * 255
random_array = random_array.astype(np.uint8)
img1 = Image.fromarray(random_array)
onnx_transforms = TRANSFORMS('onnxrt_qlinearops', 'preprocess')
onnx_parse = onnx_transforms['ToArray']()
img, _ = onnx_parse((img1, None))
self.assertTrue(isinstance(img, np.ndarray))
mxnet_transforms = TRANSFORMS('mxnet', 'preprocess')
mxnet_parse = mxnet_transforms['ToArray']()
img, _ = mxnet_parse((mx.nd.array(random_array), None))
self.assertTrue(isinstance(img, np.ndarray))
self.assertRaises(ValueError, mxnet_parse, ([1,2], None))
class TestMXNetTransform(unittest.TestCase):
@classmethod
def setUpClass(cls):
array = np.random.random_sample([100,100,3]) * 255
cls.img = mx.nd.array(array)
cls.transforms = TRANSFORMS('mxnet', 'preprocess')
def testRandomCrop(self):
args = {'size':[50]}
randomcrop = TestMXNetTransform.transforms["RandomCrop"](**args)
compose = TestMXNetTransform.transforms['Compose']([randomcrop])
image_result = compose((TestMXNetTransform.img, None))
self.assertEqual(image_result[0].shape, (50,50,3))
def testNormalize(self):
args = {'mean':[0.0,0.0,0.0], 'std':[0.29, 0.24, 0.25]}
normalize = TestMXNetTransform.transforms['Normalize'](**args)
image_result = normalize((TestMXNetTransform.img, None))
self.assertAlmostEqual(image_result[0].asnumpy()[0][0][0],
(TestMXNetTransform.img.asnumpy()/[0.29])[0][0][0], places=3)
class TestONNXTransfrom(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.img = np.random.random_sample([100,100,3]) * 255
cls.transforms = TRANSFORMS('onnxrt_qlinearops', 'preprocess')
def testResize(self):
args = {'size':[224]}
resize = TestONNXTransfrom.transforms['Resize'](**args)
compose = TestONNXTransfrom.transforms['Compose']([resize])
image_result = compose((self.img, None))
self.assertEqual(image_result[0].shape, (224,224,3))
args = {'size':[100, 100], 'interpolation':'test'}
with self.assertRaises(ValueError):
TestONNXTransfrom.transforms['Resize'](**args)
args = {'size':224}
resize = TestONNXTransfrom.transforms['Resize'](**args)
compose = TestONNXTransfrom.transforms['Compose']([resize])
image_result = compose((self.img, None))
self.assertEqual(image_result[0].shape, (224,224,3))
args = {'size':[224,224]}
resize = TestONNXTransfrom.transforms['Resize'](**args)
compose = TestONNXTransfrom.transforms['Compose']([resize])
image_result = compose((self.img, None))
self.assertEqual(image_result[0].shape, (224,224,3))
def testNormalize(self):
args = {'mean':[0.0,0.0,0.0], 'std':[0.29, 0.24, 0.25]}
normalize = TestONNXTransfrom.transforms['Normalize'](**args)
compose = TestONNXTransfrom.transforms['Compose']([normalize])
image_result = compose((TestONNXTransfrom.img, None))
self.assertTrue(
(image_result[0] == np.array(TestONNXTransfrom.img)/[0.29, 0.24, 0.25]).all())
args = {'mean':[0.0,0.0,0.0], 'std':[0,0,0]}
with self.assertRaises(ValueError):
TestONNXTransfrom.transforms["Normalize"](**args)
def testRandomCrop(self):
args = {'size':[50]}
randomcrop = TestONNXTransfrom.transforms["RandomCrop"](**args)
compose = TestONNXTransfrom.transforms['Compose']([randomcrop])
image_result = compose((TestONNXTransfrom.img, None))
self.assertEqual(image_result[0].shape, (50,50,3))
args = {'size':[1000, 1000]}
with self.assertRaises(ValueError):
trans = TestONNXTransfrom.transforms["RandomCrop"](**args)
trans((TestONNXTransfrom.img, None))
args = {'size':50}
randomcrop = TestONNXTransfrom.transforms["RandomCrop"](**args)
compose = TestONNXTransfrom.transforms['Compose']([randomcrop])
image_result = compose((TestONNXTransfrom.img, None))
self.assertEqual(image_result[0].shape, (50,50,3))
args = {'size':[100,100]}
randomcrop = TestONNXTransfrom.transforms["RandomCrop"](**args)
compose = TestONNXTransfrom.transforms['Compose']([randomcrop])
image_result = compose((TestONNXTransfrom.img, None))
self.assertEqual(image_result[0].shape, (100,100,3))
def testCenterCrop(self):
args = {'size':[100]}
centercrop = TestONNXTransfrom.transforms["CenterCrop"](**args)
compose = TestONNXTransfrom.transforms['Compose']([centercrop])
image_result = compose((TestONNXTransfrom.img, None))
self.assertEqual(image_result[0].shape, (100,100,3))
args = {'size': 5}
centercrop = TestONNXTransfrom.transforms["CenterCrop"](**args)
image_result = centercrop((TestONNXTransfrom.img, None))
self.assertEqual(image_result[0].shape, (5,5,3))
args = {'size': [5, 6]}
centercrop = TestONNXTransfrom.transforms["CenterCrop"](**args)
image_result = centercrop((TestONNXTransfrom.img, None))
self.assertEqual(image_result[0].shape, (5,6,3))
args = {'size':[150]}
centercrop = TestONNXTransfrom.transforms["CenterCrop"](**args)
with self.assertRaises(ValueError):
centercrop((TestONNXTransfrom.img, None))
def testRandomResizedCrop(self):
args = {'size':[150]}
randomresizedcrop = TestONNXTransfrom.transforms["RandomResizedCrop"](**args)
compose = TestONNXTransfrom.transforms['Compose']([randomresizedcrop])
image_result = compose((TestONNXTransfrom.img, None))
self.assertEqual(image_result[0].shape, (150,150,3))
args = {'size':[150, 150], 'scale':(0.9, 0.3)}
with self.assertRaises(ValueError):
TestONNXTransfrom.transforms["RandomResizedCrop"](**args)
args = {'size':150, 'interpolation':'test'}
with self.assertRaises(ValueError):
TestONNXTransfrom.transforms["RandomResizedCrop"](**args)
class TestBertSquad(unittest.TestCase):
@classmethod
def setUpClass(cls):
bert_url = 'https://storage.googleapis.com/bert_models/2019_05_30/wwm_uncased_L-24_H-1024_A-16.zip'
label_url = 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json'
model_path = '/tmp/.lpot/wwm_uncased_L-24_H-1024_A-16.zip'
model_unzip = '/tmp/.lpot/wwm_uncased_L-24_H-1024_A-16'
label_path = '/tmp/.lpot/dev-v1.1.json'
if not os.path.exists(model_path):
os.system('mkdir -p /tmp/.lpot && wget {} -O {}'.format(bert_url, model_path))
if not os.path.exists(label_path):
os.system('mkdir -p /tmp/.lpot && wget {} -O {}'.format(label_url, label_path))
if not os.path.exists(model_unzip):
os.system('unzip {} -d /tmp/.lpot'.format(model_path))
os.system('mkdir -p bert && cp {}/vocab.txt bert'.format(model_unzip))
os.system('cp {} bert'.format(label_path))
cls.label_file = 'bert/dev-v1.1.json'
cls.vocab_file = 'bert/vocab.txt'
def testSquadV1PostAndF1(self):
from lpot.data.transforms.transform import SquadV1PostTransform
squad_post = SquadV1PostTransform(self.label_file, self.vocab_file)
unique_ids=np.arange(1000000000, 1000010833)
start_logits=np.ones((10833, 384), np.float32)
end_logits=np.ones((10833, 384), np.float32)
preds = [unique_ids, start_logits, end_logits]
def get_labels(label_file):
import json
with open(label_file) as lf:
label_json = json.load(lf)
assert label_json['version'] == '1.1', 'only support squad 1.1'
return label_json['data']
labels = get_labels(self.label_file)
preds, labels = squad_post((preds, labels))
from lpot.metric.metric import SquadF1
squad_metric = SquadF1()
squad_metric.update(preds, labels)
result = squad_metric.result()
self.assertEqual(round(result, 2), 1.92)
squad_metric.reset()
def testBertDataLoader(self):
from lpot.data.datasets.bert_dataset import TensorflowBertDataset
fake_record='fake.tf_record'
bert_dataset = TensorflowBertDataset(fake_record, self.label_file)
self.assertEqual(len(bert_dataset), 1)
get_record, _ = bert_dataset[0]
self.assertEqual(fake_record, get_record)
from lpot.data.dataloaders.tensorflow_dataloader import TensorflowDataLoader
bert_dataloader = TensorflowDataLoader(bert_dataset, batch_size=1)
iterator = iter(bert_dataloader)
(get_record, batch_size), _ = next(iterator)
self.assertEqual(fake_record, get_record)
class TestImagenetTransform(unittest.TestCase):
def testParseDecodeImagenet(self):
random_array = np.random.random_sample([100,100,3]) * 255
random_array = random_array.astype(np.uint8)
im = Image.fromarray(random_array)
im.save('test.jpeg')
image = tf.compat.v1.gfile.FastGFile('test.jpeg','rb').read()
label = 10
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image])),
'image/class/label': tf.train.Feature(
int64_list=tf.train.Int64List(value=[label])),
'image/object/bbox/xmin': tf.train.Feature(
float_list=tf.train.FloatList(value=[10])),
'image/object/bbox/ymin': tf.train.Feature(
float_list=tf.train.FloatList(value=[20])),
'image/object/bbox/xmax': tf.train.Feature(
float_list=tf.train.FloatList(value=[100])),
'image/object/bbox/ymax': tf.train.Feature(
float_list=tf.train.FloatList(value=[200])),
}))
with tf.io.TFRecordWriter('test.record') as writer:
writer.write(example.SerializeToString())
eval_dataset = create_dataset(
'tensorflow', {'TFRecordDataset':{'root':'test.record'}}, {'ParseDecodeImagenet':{}}, None)
dataloader = DATALOADERS['tensorflow'](dataset=eval_dataset, batch_size=1)
for (inputs, labels) in dataloader:
self.assertEqual(inputs.shape, (1,100,100,3))
self.assertEqual(labels[0][0], 10)
break
os.remove('test.record')
os.remove('test.jpeg')
class TestCOCOTransform(unittest.TestCase):
def testCOCODecode(self):
from lpot.data.transforms.coco_transform import ParseDecodeCocoTransform
tf.compat.v1.disable_eager_execution()
random_array = np.random.random_sample([100,100,3]) * 255
random_array = random_array.astype(np.uint8)
im = Image.fromarray(random_array)
im.save('test.jpeg')
image = tf.compat.v1.gfile.FastGFile('test.jpeg','rb').read()
source_id = '000000397133.jpg'.encode('utf-8')
label = 'person'.encode('utf-8')
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image])),
'image/object/class/text':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[label])),
'image/source_id':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[source_id])),
'image/object/bbox/xmin':tf.train.Feature(
float_list=tf.train.FloatList(value=[10])),
'image/object/bbox/ymin':tf.train.Feature(
float_list=tf.train.FloatList(value=[10])),
'image/object/bbox/xmax':tf.train.Feature(
float_list=tf.train.FloatList(value=[100])),
'image/object/bbox/ymax':tf.train.Feature(
float_list=tf.train.FloatList(value=[100])),
}))
with tf.io.TFRecordWriter('test.record') as writer:
writer.write(example.SerializeToString())
eval_dataset = create_dataset(
'tensorflow', {'COCORecord':{'root':'test.record'}}, {'ParseDecodeCoco':{}}, None)
dataloader = DATALOADERS['tensorflow'](dataset=eval_dataset, batch_size=1)
for (inputs, labels) in dataloader:
self.assertEqual(inputs.shape, (1,100,100,3))
self.assertEqual(labels[0].shape, (1,1,4))
func = ParseDecodeCocoTransform()
out = func(example.SerializeToString())
self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100,100,3))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image])),
'image/source_id':tf.train.Feature(
bytes_list=tf.train.BytesList(value=[source_id])),
'image/object/bbox/xmin':tf.train.Feature(
float_list=tf.train.FloatList(value=[10])),
'image/object/bbox/ymin':tf.train.Feature(
float_list=tf.train.FloatList(value=[10])),
'image/object/bbox/xmax':tf.train.Feature(
float_list=tf.train.FloatList(value=[100])),
'image/object/bbox/ymax':tf.train.Feature(
float_list=tf.train.FloatList(value=[100])),
}))
with tf.io.TFRecordWriter('test2.record') as writer:
writer.write(example.SerializeToString())
self.assertRaises(ValueError, create_dataset,
'tensorflow', {'COCORecord':{'root':'test2.record'}}, None, None)
os.remove('test2.record')
os.remove('test.record')
os.remove('test.jpeg')
if __name__ == "__main__":
unittest.main()
|
{"/lpot/ux/utils/workload/workload.py": ["/lpot/ux/utils/json_serializer.py"], "/lpot/adaptor/onnxrt.py": ["/lpot/adaptor/adaptor.py", "/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/router.py": ["/lpot/ux/components/benchmark/execute_benchmark.py", "/lpot/ux/components/configuration_wizard/params_feeder.py", "/lpot/ux/components/tune/execute_tune.py"], "/lpot/adaptor/pytorch.py": ["/lpot/adaptor/adaptor.py"], "/lpot/ux/utils/parser.py": ["/lpot/ux/utils/templates/metric.py"], "/lpot/ux/utils/workload/workloads_list.py": ["/lpot/ux/utils/json_serializer.py", "/lpot/ux/utils/templates/metric.py"], "/lpot/ux/components/tune/execute_tune.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/lpot/ux/utils/templates/metric.py": ["/lpot/ux/utils/json_serializer.py"], "/test/test_onnx_model.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/test/test_onnx_calibrate.py": ["/lpot/adaptor/ox_utils/onnx_calibrate.py"], "/lpot/ux/web/server.py": ["/lpot/ux/web/router.py"], "/lpot/ux/components/benchmark/execute_benchmark.py": ["/lpot/ux/utils/parser.py", "/lpot/ux/utils/workload/workload.py"], "/test/test_adaptor_pytorch.py": ["/lpot/adaptor/pytorch.py"], "/test/test_transform.py": ["/lpot/data/transforms/coco_transform.py"]}
|
38,699
|
cyuanli/DRL_Seminar_BlackJack
|
refs/heads/master
|
/evaluation.py
|
from Blackjack import BlackjackEnv
from blackjack_dqn import BlackjackDqn
n_train_episodes = 200000
n_test_episodes = 10000
n_batch_size = 100
t_train_interval = 10
environment_configurations = [{"one_card_dealer": True},
{},
{"card_values": [2] * 52},
{"card_values": [3, 1, 3, 9, 6, 0, 7, -2, 2, 6, 8, 1, 3,
4, -1, 4, 3, 9, -1, 4, 0, 4, 7, -2, -1, 5,
2, 6, -3, -1, 2, 2, -1, 7, 1, 0, 7, 8, 4,
5, 3, -1, 0, 3, -1, 3, 0, 6, -2, 4, -3, 4]}]
achieved_win_rates = []
for env_config in environment_configurations:
print("Config: {}".format(env_config))
env = BlackjackEnv(env_config)
agent = BlackjackDqn(env)
agent.train(n_train_episodes, n_batch_size, t_train_interval)
avg_reward = agent.test(n_test_episodes)
print('Average reward: {}'.format(avg_reward))
achieved_win_rates.append(avg_reward)
print("DQN achieved the following win rates: {}".format(achieved_win_rates))
# OUTPUT:
# DQN achieved the following win rates:
# [0.9977, 0.4078, 1.0, 0.4448]
|
{"/evaluation.py": ["/blackjack_dqn.py"], "/blackjack_dqn.py": ["/Agents/dqn.py"]}
|
38,700
|
cyuanli/DRL_Seminar_BlackJack
|
refs/heads/master
|
/blackjack_dqn.py
|
import argparse
import Blackjack
import gym
import numpy as np
from tqdm import trange
from Agents.dqn import DqnAgent
class BlackjackDqn(DqnAgent):
def __init__(self, env):
DqnAgent.__init__(self, env, state_dim=104, action_space=2)
def _transform_state(self, state):
one_hot = [False for _ in range(52)]
one_hot[state[1]] = True
return np.concatenate([state[0], one_hot])
def train(self, n_episodes, n_batch_size, t_train_interval):
for t in trange(n_episodes, desc='Training', ncols=100):
state = self._transform_state(self.env.reset())
terminal = False
episode = []
while not terminal:
action = self.get_action(state)
state_next, reward, terminal, _ = self.env.step(action)
state_next = self._transform_state(state_next)
episode.append(
np.array([state, action, reward, state_next, terminal]))
state = state_next
self.replay_buffer.add(episode)
if t % t_train_interval == 0 and len(self.replay_buffer) >= n_batch_size:
batch = self.replay_buffer.sample(n_batch_size)
self.q_network.train(batch, self.gamma)
def test(self, n_episodes):
rewards = []
for _ in trange(n_episodes, desc='Testing', ncols=100):
state = self._transform_state(self.env.reset())
terminal = False
cummulative_reward = 0
while not terminal:
action = self.get_action(state, explore=False)
state, reward, terminal, _ = self.env.step(action)
state = self._transform_state(state)
cummulative_reward += reward
rewards.append(cummulative_reward)
return np.mean(rewards)
def main(n_train_episodes, n_batch_size, t_train_interval, n_test_episodes):
agent = BlackjackDqn(gym.make('Blackjack-v1'))
agent.train(n_train_episodes, n_batch_size, t_train_interval)
print('Average reward: {}'.format(agent.test(n_test_episodes)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='DQN for coding challenge of DRL seminar')
parser.add_argument('--n_train', dest='n_train_episodes', default=20000,
type=int, help='number of episodes for training')
parser.add_argument('--batch_size', dest='n_batch_size', default=100,
type=int, help='size of training batches')
parser.add_argument('--train_interval', dest='t_train_interval', default=10,
type=int, help='interval between trainings in episodes')
parser.add_argument('--n_test', dest='n_test_episodes', default=2000,
type=int, help='number of episodes for testing')
args = parser.parse_args()
main(args.n_train_episodes, args.n_batch_size,
args.t_train_interval, args.n_test_episodes)
|
{"/evaluation.py": ["/blackjack_dqn.py"], "/blackjack_dqn.py": ["/Agents/dqn.py"]}
|
38,701
|
cyuanli/DRL_Seminar_BlackJack
|
refs/heads/master
|
/Agents/dqn.py
|
import numpy as np
import random
import tensorflow as tf
from tqdm import trange
# tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
class QNetwork():
def __init__(self, state_dim):
self.model = tf.keras.Sequential([
tf.keras.layers.Input(state_dim),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(2, activation='linear')
])
self.loss = tf.keras.losses.MeanSquaredError()
self.optimizer = tf.keras.optimizers.SGD(learning_rate=0.2)
def predict(self, state):
if state.ndim == 1:
state = np.expand_dims(state, axis=0)
return self.model(state)
def train(self, experiences, gamma):
states = np.stack(experiences[:, 0])
actions = experiences[:, 1]
rewards = experiences[:, 2]
states_next = np.stack(experiences[:, 3])
terminals = experiences[:, 4]
q_next = self.predict(states_next)
q_expected = rewards + (1 - terminals) * \
gamma * np.max(q_next, axis=1)
with tf.GradientTape() as tape:
q = self.predict(states)
q_train = np.copy(q)
for i, action in enumerate(actions):
q_train[i][action] = q_expected[i]
loss = self.loss(q, q_train)
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(grads, self.model.trainable_variables))
class ReplayBuffer():
def __init__(self, size=5000):
self.buffer = []
self.size = size
def __len__(self):
return len(self.buffer)
def add(self, experiences):
new_size = len(self.buffer) + len(experiences)
if new_size > self.size:
del self.buffer[0:new_size-self.size]
self.buffer.extend(experiences)
assert len(self.buffer) <= self.size
def sample(self, n):
return np.array(random.sample(self.buffer, n))
class DqnAgent():
def __init__(self, env, state_dim, action_space):
self.state_dim = state_dim
self.action_space = action_space
self.epsilon = 0.2
self.gamma = 1.
self.env = env
self.q_network = QNetwork(state_dim)
self.replay_buffer = ReplayBuffer()
def get_action(self, state, explore=True):
if explore and np.random.rand() < self.epsilon:
return np.random.randint(self.action_space)
q_values = self.q_network.predict(state)
return np.argmax(q_values[0])
def train(self, n_episodes, n_batch_size, t_train_interval):
for t in trange(n_episodes, desc='Training', ncols=100):
state = self.env.reset()
terminal = False
episode = []
while not terminal:
action = self.get_action(state)
state_next, reward, terminal = self.env.step(action)
episode.append(
np.array([state, action, reward, state_next, terminal]))
state = state_next
self.replay_buffer.add(episode)
if t % t_train_interval == 0 and len(self.replay_buffer) >= n_batch_size:
batch = self.replay_buffer.sample(n_batch_size)
self.q_network.train(batch, self.gamma)
def test(self, n_episodes):
rewards = []
for _ in trange(n_episodes, desc='Testing', ncols=100):
state = self.env.reset()
terminal = False
cummulative_reward = 0
while not terminal:
action = self.get_action(state, explore=False)
state, reward, terminal = self.env.step(action)
cummulative_reward += reward
rewards.append(cummulative_reward)
return np.mean(rewards)
|
{"/evaluation.py": ["/blackjack_dqn.py"], "/blackjack_dqn.py": ["/Agents/dqn.py"]}
|
38,702
|
cyuanli/DRL_Seminar_BlackJack
|
refs/heads/master
|
/dqn.py
|
import argparse
import Blackjack
from datetime import datetime
import gym
import numpy as np
import random
import tensorflow as tf
from tqdm import trange
# tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
class QNetworkModel(tf.keras.Model):
def __init__(self):
super(QNetworkModel, self).__init__()
self.feature_1 = tf.keras.layers.Dense(11, activation='linear')
self.feature_2 = tf.keras.layers.Dense(1, activation='relu')
self.q_value_1 = tf.keras.layers.Dense(2, activation='relu')
self.q_value_2 = tf.keras.layers.Dense(2, activation='linear')
def call(self, state):
x1 = self.feature_1(state[:, :52])
x1 = self.feature_2(x1)
x2 = self.feature_1(state[:, 52:])
x2 = self.feature_2(x2)
x = tf.concat([x1, x2], 1)
x = self.q_value_1(x)
x = self.q_value_2(x)
return x
class QNetwork():
def __init__(self, state_dim, action_space):
self.model = QNetworkModel()
self.loss = tf.keras.losses.MeanSquaredError()
self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
self.timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
def __call__(self, state):
if state.ndim == 1:
state = np.expand_dims(state, axis=0)
return self.model(state)
def train(self, experiences, gamma):
states = np.stack(experiences[:, 0])
actions = experiences[:, 1]
rewards = experiences[:, 2]
states_next = np.stack(experiences[:, 3])
terminals = experiences[:, 4]
q_next = self.__call__(states_next)
q_expected = rewards + (1 - terminals) * \
gamma * np.max(q_next, axis=1)
with tf.GradientTape() as tape:
q = self.__call__(states)
q_train = np.copy(q)
for i, action in enumerate(actions):
q_train[i][action] = q_expected[i]
loss = self.loss(q, q_train)
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(grads, self.model.trainable_variables))
def save(self):
self.model.save_weights(
'saved_models/dqn-{}.h5'.format(self.timestamp))
class ReplayBuffer():
def __init__(self, size=5000):
self.buffer = []
self.size = size
def __len__(self):
return len(self.buffer)
def add(self, experiences):
new_size = len(self.buffer) + len(experiences)
if new_size > self.size:
del self.buffer[0:new_size-self.size]
self.buffer.extend(experiences)
assert len(self.buffer) <= self.size
def sample(self, n):
return np.array(random.sample(self.buffer, n))
class BlackjackEnv():
def __init__(self):
self.env = gym.make('Blackjack-v1')
self.state_dim = 104
self.action_space = 2
def _transform_state(self, state):
one_hot = [False for _ in range(52)]
one_hot[state[1]] = True
return np.concatenate([state[0], one_hot])
def reset(self):
return self._transform_state(self.env.reset())
def step(self, action):
state, reward, terminal, _ = self.env.step(action)
return self._transform_state(state), reward, terminal
class Agent():
def __init__(self, env):
self.epsilon = 0.2
self.gamma = 1.
self.env = env
self.q_network = QNetwork(env.state_dim, env.action_space)
self.replay_buffer = ReplayBuffer()
def get_action(self, state, explore=True):
if explore and np.random.rand() < self.epsilon:
return np.random.randint(self.env.action_space)
q_values = self.q_network(state)
return np.argmax(q_values[0])
def train(self, n_episodes, n_batch_size, t_train_interval):
for t in trange(n_episodes, desc='Training', ncols=80):
state = self.env.reset()
terminal = False
episode = []
while not terminal:
action = self.get_action(state)
state_next, reward, terminal = self.env.step(action)
episode.append(
np.array([state, action, reward, state_next, terminal]))
state = state_next
self.replay_buffer.add(episode)
if t % t_train_interval == 0 and len(self.replay_buffer) >= n_batch_size:
batch = self.replay_buffer.sample(n_batch_size)
self.q_network.train(batch, self.gamma)
self.q_network.save()
def test(self, n_episodes):
rewards = []
for _ in trange(n_episodes, desc='Testing', ncols=80):
state = self.env.reset()
terminal = False
cummulative_reward = 0
while not terminal:
action = self.get_action(state, explore=False)
state, reward, terminal = self.env.step(action)
cummulative_reward += reward
rewards.append(cummulative_reward)
print('Average reward: {}'.format(np.mean(rewards)))
def main(n_train_episodes, n_batch_size, t_train_interval, n_test_episodes):
blackjack_env = BlackjackEnv()
agent = Agent(blackjack_env)
agent.train(n_train_episodes, n_batch_size, t_train_interval)
agent.test(n_test_episodes)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='DQN for coding challenge of DRL seminar')
parser.add_argument('--n_train', dest='n_train_episodes', default=20000,
type=int, help='number of episodes for training')
parser.add_argument('--batch_size', dest='n_batch_size', default=200,
type=int, help='size of training batches')
parser.add_argument('--train_interval', dest='t_train_interval', default=10,
type=int, help='interval between trainings in episodes')
parser.add_argument('--n_test', dest='n_test_episodes', default=10000,
type=int, help='number of episodes for testing')
args = parser.parse_args()
main(args.n_train_episodes, args.n_batch_size,
args.t_train_interval, args.n_test_episodes)
|
{"/evaluation.py": ["/blackjack_dqn.py"], "/blackjack_dqn.py": ["/Agents/dqn.py"]}
|
38,733
|
Dortov/Chart-Loader
|
refs/heads/main
|
/main.py
|
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget, QPushButton, QLabel, QFileDialog
from PyQt5.QtGui import QIcon
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from collections import OrderedDict
import datetime
from datetime import datetime
from datetime import date
from dateutil.relativedelta import relativedelta
from dl_charts import PlotCanvas # импортируем графики
from dl_gui import Ui_MainWindow # импортирем оболочку QT
class App(QMainWindow, Ui_MainWindow): # соденияем классы нашего окна и оболочки
def __init__(self, parent=None, *args, **kwargs):
QMainWindow.__init__(self)
self.setupUi(self) # какое-то обязательное техническое г., создающее экземпляр формы в компе
self.setWindowTitle("Data Loader")
self.item = 0 # переменная, отвечающая за выбранный в списке график. 0 = график не выбран
self.chart = PlotCanvas(self.widget, width=6, height=4,
chart_type='Zero') # Помещаем экземпляр Plotcanvas с графиком в виджет, self.widget - окно родитель
self.toolbar = NavigationToolbar(self.chart, self) # создаем тулбар, передавая аргментом экземпляр PlotCanvas
self.v_box_chart.addWidget(self.chart) # график и тулбар помещаем в QVBoxLayout, находящийся в Widget
self.v_box_chart.addWidget(self.toolbar)
# прописываем действие actionImport_data_file из верхнего менюбара при нажатии
self.actionImport_data_file.triggered.connect(self.show_import_dialog)
# прописываем действие actionConnect_to_DB из верхнего менюбара при нажатии. Через лямбду закидываем в исполняемую функцию аргумент (без лямбды не получится)
self.actionConnect_to_DB.triggered.connect(lambda: self.show_msg("Unavailable in this version"))
# прописываем действие actionCurrent_chart_to_PDF
self.actionCurrent_chart_to_PDF.triggered.connect(self.show_current_file_export_dialog)
# прописываем действие actionAll_charts_to_PDF
self.actionAll_charts_to_PDF.triggered.connect(self.show_all_charts_export_dialog)
# заполняем список QListWidget значениями (доступными графиками), при выборе которых в левой части окна будет выведен сам график
self.list.insertItem(0, '1. Loan Issue Values per Month (9 months)')
self.list.insertItem(1, '2. Loan Issue Values per Year')
self.list.insertItem(2, '3. Loan Issue Values in Cities per Month (9 months)')
self.list.insertItem(3, '4. Loan Quantity Values per Year')
self.list.insertItem(4, '5. Loan Issue Values Currency per Month (9 months)')
self.list.insertItem(5, '6. Current Portfolio, Sum per Product')
self.list.insertItem(6, '7. Current Portfolio, Loans Quantity per City')
self.list.insertItem(7, '8. Current Portfolio, Moscow')
self.list.insertItem(8, '9. Current Portfolio, Sankt-Petersburg')
self.list.insertItem(9, '10. Current Portfolio, Ekaterinburg')
self.list.insertItem(10, '11. Current Portfolio, Novosibirsk')
self.list.insertItem(11, '12. Current Portfolio, Sum per City')
self.list.insertItem(12, '13. Current Portfolio, Currency')
self.list.clicked.connect(self.listview_clicked)
self.show()
# функция выбора графика для демонстрации по выбору значения из списка справа
def listview_clicked(self):
if self.chart.file_loaded == True: # проверка переменной, ответственной за факт загрузки файла с данными
self.item = str(self.list.currentItem().text())
self.chart.plot(chart_type=self.item)
else: # если файл не был загружен, значения списка в левой части кликаются вхолостую и всплывает подсказка
self.show_msg("Please, import the data file or connect to DB")
def show_import_dialog(self): # функция загрузки данных из файла csv
file_pass = QFileDialog.getOpenFileName(self, 'Open file', '/home', "*")[0]
data = pd.read_csv(file_pass, delimiter='|') # считываем данные из файла в датафрейм
self.chart.calculation(df=data) # запускаем расчеты для построения графиков в классе
def show_current_file_export_dialog(self): # функция экспорта текущего графика в PDF
if self.item != 0: # проверка переменной, содержащей выбранный график (если он был выбран)
PDF_file_name = QFileDialog.getSaveFileName(self, "Save File", self.item + ".pdf", "PDF files (*.pdf)")[0]
self.chart.fig.savefig(PDF_file_name)
else: # если график выбран не был (а также если не был загружен дата файл - без этого не выбрать график), всплывает подсказка
self.show_msg("Please, choose the chart to PDF in the list")
def show_all_charts_export_dialog(
self): # функция экспорта всех графиков на один лист PDF. Работает мимо PlotCanvas, так как через него сбивала настройки figure
if self.chart.file_loaded == True: # проверка переменной, ответственной за факт загрузки файла с данными
PDF_file_name = \
QFileDialog.getSaveFileName(self, "Save File", "All portfolio charts.pdf", "PDF files (*.pdf)")[
0] # выбираем путь и название для файла
self.chart.prepare_all_charts(
PDF_file_name) # вызываем функцию для подготовки листа с чартами, выгрузка в файл прописана внутри той функции, путь для выгрузки передаем отсюда
else: # если файл не был загружен, значения списка в левой части кликаются вхолостую и всплывает подсказка
self.show_msg("Please, import the data file or connect to DB")
def show_msg(self, msg_text): # всплывающее окно с подсказками, принимает текст из кнопки или экшна
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Information")
msg.setText(msg_text)
# msg.setInformativeText("InformativeText")
# msg.setDetailedText("DetailedText")
okButton = msg.addButton("Ок", QMessageBox.AcceptRole)
# msg.addButton("I don't care!", QMessageBox.RejectRole)
msg.exec()
# if msg.clickedButton() == okButton:
# pass
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
{"/main.py": ["/dl_charts.py", "/dl_gui.py"]}
|
38,734
|
Dortov/Chart-Loader
|
refs/heads/main
|
/dl_charts.py
|
# файл Charts c графиками
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from collections import OrderedDict
import datetime
from datetime import datetime
from datetime import date
from dateutil.relativedelta import relativedelta
from PyQt5.QtWidgets import QSizePolicy
import random
class PlotCanvas(FigureCanvas): # определяем бумагу, на которой ниже будем рисовать график и которую будем помещать в виджет в окне
def __init__(self, parent=None, width=6, height=4, dpi=100, chart_type = 'Zero'): # в качестве одного из аргументов принимаем выбраннный тип графика, который будет нарисован в виджете
self.fig = Figure(figsize=(width, height), dpi=dpi, tight_layout=True) # последний аргумент не дает обрезать названия графиков внутри figure
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.plot(chart_type)
self.file_loaded = False
# переменная для определения факта загрузки файла, используется для разветвления действий внутри функции listview_clicked в файле DataLoader;
# значение меняется внутри функции calculation при загрузке
def plot(self, chart_type): # функция рисует выбранный пользователем в списке в левой части окна график по данным, полученным в ходе вычислений в функции calculation
self.fig.clear() # очищаем figure на случай, если ранее уже был выстроен график. Использовать axes.clear() не получается, так как при прорисовке pie объект теряет какие-то параметры (при последующей прорисовке графиков с рамками, рамок и осей не видно...)
self.fig.set_facecolor('seashell')
self.axes = self.fig.add_subplot(111)
self.axes.set_facecolor('seashell')
if chart_type == 'Zero': # по умолчанию помещаем в виджет инструкции по загрузке файла с данными или подключению к БД
self.axes.spines['right'].set_color('none') # убираем границы рамки
self.axes.spines['top'].set_color('none')
self.axes.spines['left'].set_color('none')
self.axes.spines['bottom'].set_color('none')
self.axes.get_xaxis().set_visible(False) # делаем оси невидимыми
self.axes.get_yaxis().set_visible(False)
self.axes.text(0,0.4,'To START please import the data file (csv): \n\nMenu – Import Data File \n\nor connect to data base: \n\nMenu – Connect to DB \n\n\n\nThank U for using DataLoader!', fontsize=8)
self.draw()
elif chart_type == '1. Loan Issue Values per Month (9 months)':
self.prepare_chart_1(self.axes)
self.draw()
elif chart_type == '2. Loan Issue Values per Year':
self.prepare_chart_2(self.axes)
self.draw()
elif chart_type == '3. Loan Issue Values in Cities per Month (9 months)':
self.prepare_chart_3(self.axes)
self.draw()
elif chart_type == '4. Loan Quantity Values per Year':
self.prepare_chart_4(self.axes)
self.draw()
elif chart_type == '5. Loan Issue Values Currency per Month (9 months)':
self.prepare_chart_5(self.axes)
self.draw()
elif chart_type == '6. Current Portfolio, Sum per Product':
self.prepare_chart_6(self.axes)
self.draw()
elif chart_type == '7. Current Portfolio, Loans Quantity per City':
self.prepare_chart_7(self.axes)
self.draw()
elif chart_type == '8. Current Portfolio, Moscow':
self.prepare_chart_8(self.axes)
self.draw()
elif chart_type == '9. Current Portfolio, Sankt-Petersburg':
self.prepare_chart_9(self.axes)
self.draw()
elif chart_type == '10. Current Portfolio, Ekaterinburg':
self.prepare_chart_10(self.axes)
self.draw()
elif chart_type == '11. Current Portfolio, Novosibirsk':
self.prepare_chart_11(self.axes)
self.draw()
elif chart_type == '12. Current Portfolio, Sum per City':
self.prepare_chart_12(self.axes)
self.draw()
elif chart_type == '13. Current Portfolio, Currency':
self.prepare_chart_13(self.axes)
self.draw()
elif chart_type == 'All charts':
self.prepare_all_charts()
self.draw()
# прорисовку каждого графика прописываем отдельно в своей функции, чтобы помещать несколько графиков в одной figure
def prepare_chart_1(self, axes):
axes.axis([0,9,0,1.8])
index = np.arange(len(self.x_issue_9m))
axes.set(title = 'Loan Issue Values per Month',
xlabel = 'Issue Month',
ylabel = 'Sum, billion rub.',
xticks = index+0.8,
# xticklabels = self.x_issue_9m
)
axes.set_xticklabels(self.x_issue_9m, fontsize = 7)
axes.bar(index+0.4, self.mortgage_sum_9m, color='b', alpha = 0.8, label='Mortgage')
axes.bar(index+0.4, self.car_sum_9m, color='g', alpha = 0.8, bottom = np.array(self.mortgage_sum_9m), label='Car lending')
axes.bar(index+0.4, self.consumer_sum_9m, color='r', alpha = 0.7, bottom = (np.array(self.car_sum_9m) + np.array(self.mortgage_sum_9m)), label='Consumer lending')
# далее проставляем значения столбцов по краям
for x, y in zip(index, self.mortgage_sum_9m): # zip объединяет несколько списков поочередно в кортежи
axes.text(x + 0.4, y - 0.02, y, ha='center', va = 'top', color = 'white')
# поскольку координата У текстовой метки car_sum_9m будет равна сумме (сумма ипотеки + сумма авто),
# то создаем третью последовательность для zip объекта с координатами У
car_sum_y = np.array(self.car_sum_9m) + np.array(self.mortgage_sum_9m)
for x, y, s in zip(index, car_sum_y, self.car_sum_9m):
axes.text(x + 0.4, y - 0.02, s, ha='center', va = 'top', color = 'white')
consumer_sum_y = np.array(self.car_sum_9m) + np.array(self.mortgage_sum_9m) + np.array(self.consumer_sum_9m)
for x, y, s in zip(index, consumer_sum_y, self.consumer_sum_9m):
axes.text(x + 0.4, y, s, ha='center', va = 'bottom')
# проставляем цвета
for i,t in zip(self.x_issue_9m_colors,axes.xaxis.get_ticklabels()):
t.set_color(i)
axes.legend(loc=0)
axes.grid(alpha = 0.3)
def prepare_chart_2(self, axes):
axes.axis([0,9,0,23])
index = np.arange(len(self.x_issue_years))
# index = list(range(len(x_issue_years))) вот так не канает... не тот тип получается.
axes.set(title = 'Loan Issue Values per Year',
xlabel = 'Issue Year',
ylabel = 'Sum, billion rub.',
xticks = index+0.8,
xticklabels = self.x_issue_years
)
axes.bar(index+0.4, self.mortgage_sum, color='b', alpha = 0.8, label='Mortgage')
axes.bar(index+0.4, self.car_sum, color='g', alpha = 0.8, bottom = np.array(self.mortgage_sum), label='Car lending')
axes.bar(index+0.4, self.consumer_sum, color='r', alpha = 0.7, bottom = (np.array(self.car_sum) + np.array(self.mortgage_sum)), label='Consumer lending')
# далее проставляем значения столбцов по краям
for x, y in zip(index, self.mortgage_sum): # zip объединяет несколько списков поочередно в кортежи
axes.text(x + 0.4, y - 0.02, y, ha='center', va = 'top', color = 'white')
# поскольку координата У текстовой метки car_sum_9m будет равна сумме (сумма ипотеки + сумма авто),
# то создаем третью последовательность для zip объекта с координатами У
car_sum_y = np.array(self.car_sum) + np.array(self.mortgage_sum)
for x, y, s in zip(index, car_sum_y, self.car_sum):
axes.text(x + 0.4, y - 0.02, s, ha='center', va = 'top', color = 'white')
consumer_sum_y = np.array(self.car_sum) + np.array(self.mortgage_sum) + np.array(self.consumer_sum)
for x, y, s in zip(index, consumer_sum_y, self.consumer_sum):
axes.text(x + 0.4, y, s, ha='center', va = 'bottom')
axes.legend(loc = 3)
axes.grid(alpha = 0.3)
def prepare_chart_3(self, axes):
axes.axis([0, 9, 0, 1])
index = np.arange(len(self.x_issue_9m))
axes.set(title='Loan Issue Values in Cities per Month',
xlabel='Issue Month',
ylabel='Sum, billion rub.',
xticks=index + 0.2,
# xticklabels=self.x_issue_9m
)
axes.set_xticklabels(self.x_issue_9m, fontsize = 7)
axes.plot(index + 0.2, self.moscow_sum_9m, 'b', alpha=0.8, label='Moscow')
axes.plot(index + 0.2, self.piter_sum_9m, color='g', alpha=0.8, label='Sankt-Petersburg')
axes.plot(index + 0.2, self.ekat_sum_9m, color='r', alpha=0.9, label='Ekaterinburg')
axes.plot(index + 0.2, self.novosib_sum_9m, color='m', alpha=0.9, label='Novosibirsk')
# далее проставляем значения столбцов по краям
for x, y in zip(index, self.moscow_sum_9m):
axes.text(x + 0.4, y + 0.02, y, ha='center', va='bottom', color='b')
for x, y in zip(index, self.piter_sum_9m):
axes.text(x + 0.4, y + 0.02, y, ha='center', va='bottom', color='g')
for x, y in zip(index, self.ekat_sum_9m):
axes.text(x + 0.4, y - 0.02, y, ha='left', va='top', color='r')
for x, y in zip(index, self.novosib_sum_9m):
axes.text(x + 0.4, y - 0.02, y, ha='right', va='top', color='m')
# назначаем цвета лейблам оси Х, используя созданный во втором графике список x_issue_9m_colors
for i, t in zip(self.x_issue_9m_colors, axes.xaxis.get_ticklabels()):
t.set_color(i)
axes.legend(loc=0)
axes.grid(alpha=0.3)
def prepare_chart_4(self, axes):
axes.axis([0, 9, 0, 9])
index = np.arange(len(self.x_issue_years))
axes.set(title='Loan Quantity Values per Year',
xlabel='Issue Year',
ylabel='Quantity, thousand loans',
xticks=index + 0.8,
xticklabels=self.x_issue_years
)
axes.bar(index + 0.4, self.mortgage_quantity_y, color='b', alpha=0.8, label='Mortgage')
axes.bar(index + 0.4, self.car_quantity_y, color='g', alpha=0.8, bottom=np.array(self.mortgage_quantity_y),
label='Car lending')
axes.bar(index + 0.4, self.consumer_quantity_y, color='r', alpha=0.7,
bottom=(np.array(self.car_quantity_y) + np.array(self.mortgage_quantity_y)), label='Consumer lending')
# далее проставляем значения столбцов по краям
for x, y in zip(index, self.mortgage_quantity_y):
axes.text(x + 0.4, y - 0.02, y, ha='center', va='top', color='white')
# поскольку координата У текстовой метки car_sum_9m будет равна сумме (сумма ипотеки + сумма авто),
# то создаем третью последовательность для zip объекта с координатами У
car_q_y = np.array(self.car_quantity_y) + np.array(self.mortgage_quantity_y)
for x, y, s in zip(index, car_q_y, self.car_quantity_y):
axes.text(x + 0.4, y - 0.02, s, ha='center', va='top', color='white')
consumer_q_y = np.array(self.car_quantity_y) + np.array(self.mortgage_quantity_y) + np.array(self.consumer_quantity_y)
for x, y, s in zip(index, consumer_q_y, self.consumer_quantity_y):
axes.text(x + 0.4, y - 0.02, s, ha='center', va='top', color='white')
axes.legend(loc=9)
axes.grid(alpha=0.3)
def prepare_chart_5(self, axes):
axes.axis([0, 9, 0, 1.6])
index = np.arange(len(self.x_issue_9m))
axes.set(title='Loan Issue Values Currency per Month',
xlabel='Issue Month',
ylabel='Sum, billion rub.',
xticks=index + 0.2,
# xticklabels=self.x_issue_9m
)
axes.set_xticklabels(self.x_issue_9m, fontsize = 7)
axes.plot(index + 0.2, self.rub_sum_9m, 'b', alpha=0.8, label='RUB')
axes.plot(index + 0.2, self.usd_sum_9m, color='g', alpha=0.8, label='USD')
axes.plot(index + 0.2, self.eur_sum_9m, color='r', alpha=0.9, label='EUR')
# далее проставляем значения столбцов по краям
for x, y in zip(index, self.rub_sum_9m):
axes.text(x + 0.4, y - 0.02, y, ha='center', va='top', color='b')
for x, y in zip(index, self.usd_sum_9m):
axes.text(x + 0.4, y + 0.1, y, ha='right', va='bottom', color='g')
for x, y in zip(index, self.eur_sum_9m):
axes.text(x + 0.4, y + 0.07, y, ha='left', va='bottom', color='r')
# назначаем цвета лейблам оси Х, используя созданный во втором графике список x_issue_9m_colors
for i, t in zip(self.x_issue_9m_colors, axes.xaxis.get_ticklabels()):
t.set_color(i)
axes.legend(loc=0)
axes.grid(alpha=0.3)
def prepare_chart_6(self, axes):
explode_341 = [0, 0.05, 0.2]
# добавляем список для аргумента explode, который отвечает за вырезанный "кусок пирога"
# вычисляем 1% от общего размера портфеля в млрд.р. для указания на основной диаграмме
one_percent_billion_341 = round((sum(self.current_common_products.values()) / 100 / 1000000000), 2)
axes.set(title='Whole Portfolio, Current Sum',
ylabel='Sum, 1% = {} billion rub.'.format(one_percent_billion_341)
)
axes.pie(list(self.current_common_products.values()),
labels=list(self.current_common_products.keys()),
colors=self.colors,
explode=explode_341,
shadow=True,
autopct='%1.1f%%',
startangle=55
)
# startangle отвечает за поворот пирога на нужный угол - к примеру, чтобы не было наложения тайтла и лейбла...
# autopct добавляет процентные значения на каждый кусок. Синтаксис не объяснен.
# shadow добавляет тень на диаграмму
axes.axis('equal')
axes.legend(loc=0)
def prepare_chart_7(self, axes):
one_percent_add = round((sum(self.current_common_products_q.values()) / 100 / 1000), 2)
axes.set(title='Current Quantity',
xlabel='Quantity, 1% = {} thousand loans'.format(one_percent_add)
)
axes.pie(list(self.current_common_products_q.values()),
labels=list(self.current_common_products.keys()),
colors=self.colors,
shadow=True,
autopct='%1.1f%%',
startangle=190
)
axes.axis('equal')
axes.legend(loc=0)
def prepare_chart_8(self, axes):
explode_32 = [0, 0.05, 0.2]
# вычисляем 1% от размера соответствующего портфеля в млрд.р. для указания на основной диаграмме
one_percent_billion_32 = round((sum(self.current_moscow_products.values()) / 100 / 1000000000), 2)
axes.set(title='MOSCOW, Current Sum',
# xlabel = 'Issue Month',
ylabel='Sum, 1% = {} billion rub.'.format(one_percent_billion_32)
)
axes.pie(list(self.current_moscow_products.values()),
labels=list(self.current_moscow_products.keys()),
colors=self.colors,
explode=explode_32,
shadow=True,
autopct='%1.1f%%',
startangle=55
)
axes.axis('equal')
axes.legend(loc=0)
def prepare_chart_9(self, axes):
explode_33 = [0, 0.05, 0.2]
# вычисляем 1% от общего размера соответствующего портфеля в млрд.р. для указания на основной диаграмме
one_percent_billion_33 = round((sum(self.current_piter_products.values()) / 100 / 1000000000), 2)
axes.set(title='SANKT-PETERSBURG, Current Sum',
# xlabel = 'Issue Month',
ylabel='Sum, 1% = {} billion rub.'.format(one_percent_billion_33)
)
axes.pie(list(self.current_piter_products.values()),
labels=list(self.current_piter_products.keys()),
colors=self.colors,
explode=explode_33,
shadow=True,
autopct='%1.1f%%',
startangle=55
)
axes.axis('equal')
axes.legend(loc=0)
def prepare_chart_10(self, axes):
explode_42 = [0, 0.05, 0.2]
# вычисляем 1% от размера соответствующего портфеля в млрд.р. для указания на основной диаграмме
one_percent_billion_42 = round((sum(self.current_ekat_products.values()) / 100 / 1000000000), 2)
axes.set(title='EKATERINBURG, Current Sum',
ylabel='Sum, 1% = {} billion rub.'.format(one_percent_billion_42)
)
axes.pie(list(self.current_ekat_products.values()),
labels=list(self.current_ekat_products.keys()),
colors=self.colors,
explode=explode_42,
shadow=True,
autopct='%1.1f%%',
startangle=55
)
axes.axis('equal')
axes.legend(loc=0)
def prepare_chart_11(self, axes):
explode_43 = [0, 0.05, 0.2]
# вычисляем 1% от общего размера соответствующего портфеля в млрд.р. для указания на основной диаграмме
one_percent_billion_43 = round((sum(self.current_novosib_products.values()) / 100 / 1000000000), 2)
axes.set(title='NOVOSIBIRSK, Current Sum',
# xlabel = 'Issue Month',
ylabel='Sum, 1% = {} billion rub.'.format(one_percent_billion_43)
)
axes.pie(list(self.current_novosib_products.values()),
labels=list(self.current_novosib_products.keys()),
colors=self.colors,
explode=explode_43,
shadow=True,
autopct='%1.1f%%',
startangle=55
)
axes.axis('equal')
axes.legend(loc=0)
def prepare_chart_12(self, axes):
explode_34 = [0, 0, 0, 0]
# вычисляем 1% от размера соответствующего портфеля в млрд.р. для указания на основной диаграмме
one_percent_billion_34 = round((sum(self.current_common_cities.values()) / 100 / 1000000000), 2)
axes.set(title='Whole Portfolio, Current Sum in Cities',
# xlabel = 'Issue Month',
ylabel='Sum, 1% = {} billion rub.'.format(one_percent_billion_34)
)
axes.pie(list(self.current_common_cities.values()),
labels=list(self.current_common_cities.keys()),
colors=['lime', 'green', 'red', 'blue'],
explode=explode_34,
shadow=True,
autopct='%1.1f%%',
startangle=85
)
axes.axis('equal')
axes.legend(loc=0)
def prepare_chart_13(self, axes):
explode_44 = [0, 0.05, 0.3]
# вычисляем 1% от общего размера соответствующего портфеля в млрд.р. для указания на основной диаграмме
one_percent_billion_44 = round((sum(self.current_common_currency.values()) / 100 / 1000000000), 2)
axes.set(title='Whole Portfolio, Currency',
ylabel='Sum, 1% = {} billion rub.'.format(one_percent_billion_44),
facecolor='white'
)
axes.pie(list(self.current_common_currency.values()),
labels=list(self.current_common_currency.keys()),
colors=['green', 'lime', 'pink'],
explode=explode_44,
shadow=True,
autopct='%1.1f%%',
startangle=0
)
axes.axis('equal')
axes.legend(loc=0)
def prepare_all_charts(self, PDF_file_name):
fig_all_ch = plt.figure()
fig_all_ch.set_figwidth(28)
fig_all_ch.set_figheight(14)
fig_all_ch.subplots_adjust(wspace=0.2, hspace=0.5, left=0.1, right=0.9, top=0.9, bottom=0.1)
# первые две строки графиков
ax_12 = fig_all_ch.add_subplot(4, 3, 2) # 4 строки, 3 ряда, 2ое место сверху вниз слева направо
ax_13 = fig_all_ch.add_subplot(4, 3, 3)
ax_25 = fig_all_ch.add_subplot(4, 3, 5)
ax_26 = fig_all_ch.add_subplot(4, 3, 6)
# вторая пара строк графиков
ax_32 = fig_all_ch.add_subplot(4, 4, 10)
ax_33 = fig_all_ch.add_subplot(4, 4, 11)
ax_34 = fig_all_ch.add_subplot(4, 4, 12)
ax_42 = fig_all_ch.add_subplot(4, 4, 14)
ax_43 = fig_all_ch.add_subplot(4, 4, 15)
ax_44 = fig_all_ch.add_subplot(4, 4, 16)
# отдельностоящие графики
ax_431 = fig_all_ch.add_subplot(2, 3, 1)
ax_445 = fig_all_ch.add_subplot(2, 4, 5)
ax_add = fig_all_ch.add_subplot(6, 8, 42)
# расскрашиваем axes
ax_12.set_facecolor('seashell')
ax_13.set_facecolor('seashell')
ax_25.set_facecolor('seashell')
ax_26.set_facecolor('seashell')
ax_32.set_facecolor('seashell')
ax_33.set_facecolor('seashell')
ax_34.set_facecolor('seashell')
ax_42.set_facecolor('seashell')
ax_43.set_facecolor('seashell')
ax_44.set_facecolor('seashell')
ax_431.set_facecolor('seashell')
ax_445.set_facecolor('seashell')
ax_add.set_facecolor('seashell')
# рисуем графики, обращаясь к соответствующим функциям
self.prepare_chart_2(ax_12)
self.prepare_chart_3(ax_13)
self.prepare_chart_4(ax_25)
self.prepare_chart_5(ax_26)
self.prepare_chart_8(ax_32)
self.prepare_chart_9(ax_33)
self.prepare_chart_12(ax_34)
self.prepare_chart_10(ax_42)
self.prepare_chart_11(ax_43)
self.prepare_chart_13(ax_44)
self.prepare_chart_1(ax_431)
self.prepare_chart_6(ax_445)
self.prepare_chart_7(ax_add)
fig_all_ch.savefig(PDF_file_name) # выгружаем figure в файл по принятому адресу и названию файла. PDF_file_name прилетает из главного файла.
def calculation(self, df): # функция производит расчет данных их загруженного файла для последующего использования в графиках
self.file_loaded = True # переменная для определения факта загрузки файла, сообщаем, что файл загружен
# проверка на корректность загруженного файла НЕ реализована
usd = 77
eur = 91 # внутренний курс валют для отчетов внутри банковской группы фиксируем здесь - меняться постоянно не будет
def billion(x): # готовим функции для расчетов: делим на миллиард и округляем до 2 цифр после запятой
return round(x/1000000000, 2)
def thousand(x):
return round(x/1000, 2)
df['sum_issue_RUB'] = 0 # создаем столбец для рублевого эквивалента суммы выданного кредита
for i in range(len(df['ID'])): # и заполняем его рублевыми суммами по курсу
if df.loc[i, 'currency'] == 'EUR':
df.loc[i, 'sum_issue_RUB'] = df.loc[i, 'sum_issue'] * eur
elif df.loc[i, 'currency'] == 'USD':
df.loc[i, 'sum_issue_RUB'] = df.loc[i, 'sum_issue'] * usd
else:
df.loc[i, 'sum_issue_RUB'] = df.loc[i, 'sum_issue']
# 1. Рисуем первый график (разбивка сумм выдачи кредита по годам и по продуктам)
# 1.1 Формируем данные для первого графика
# создаем три словаря для лет выдачи под каждый тип кредита - каждая пара будет служить вместо переменной по году и типу кредита
# объявлять переменные сразу не получится, если не знаем, какие годы включает период кредитования
# словаря сразу три на случай, если разные типы выдавались в разные периоды времени
self.mortgage_issue = {}
self.car_issue = {}
self.consumer_issue = {}
# заполняем словари годами, когда выдавались кредиты разных типов
for i in range(len(df['sum_issue_RUB'])):
d = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").year
# datetime.strptime("2018-01-31", "%Y-%m-%d") - конвертер строки в дату.
# Аргументы здесь https://www.delftstack.com/ru/howto/python/how-to-convert-string-to-datetime/
if (df.loc[i,'type'] == 'mortgage' and d not in self.mortgage_issue):
self.mortgage_issue.setdefault(d, 0)
elif (df.loc[i,'type'] == 'consumer' and d not in self.consumer_issue):
self.consumer_issue.setdefault(d, 0)
elif (df.loc[i,'type'] == 'car loan' and d not in self.car_issue):
self.car_issue.setdefault(d, 0)
# Обходим sum_issue_RUB и заполняем словари значениями
for i in range(len(df['sum_issue_RUB'])):
d = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").year
if df.loc[i,'type'] == 'mortgage':
self.mortgage_issue[d] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'type'] == 'car loan':
self.car_issue[d] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'type'] == 'consumer':
self.consumer_issue[d] += df.loc[i,'sum_issue_RUB']
# сортируем словари по возрастанию ключа.
#from collections import OrderedDict Импортируем библиотеку (сделали ранее)
self.mortgage_issue = dict(OrderedDict(sorted(self.mortgage_issue.items(), key=lambda t: t[0])))
# выражение лямбды означает сортировку по ключу. Есть еще сортировка по значению и длине строки ключа:
# https://docs.python.org/2/library/collections.html#collections.OrderedDict
self.consumer_issue = dict(OrderedDict(sorted(self.consumer_issue.items(), key=lambda t: t[0])))
self.car_issue = dict(OrderedDict(sorted(self.car_issue.items(), key=lambda t: t[0])))
# 1.2 Обрабатываем данные для первого графика
# собираем все значения лет из трех словарей (ключи), скидываем в один список
# ...и убираем дубли для создания списка значений на общей оси х. Сразу сортируем по возрастанию.
self.mortgage_years = list(self.mortgage_issue.keys()) # Возвращает все ключи словаря
self.car_years = list(self.car_issue.keys())
self.consumer_years = list(self.consumer_issue.keys())
self.x_issue_years = sorted(list(set(self.mortgage_years + self.car_years + self.consumer_years)))
# проверяем, все ли значения лет есть в трех словарях.
# Если нет, добавляем недостающие ключи со значением 0, тем самым унифицируя словари по годам
for i in self.x_issue_years:
if i not in self.mortgage_issue.keys():
self.mortgage_issue.setdefault(i, 0)
elif i not in self.car_issue.keys():
self.car_issue.setdefault(i, 0)
elif i not in self.consumer_issue.keys():
self.consumer_issue.setdefault(i, 0)
# готовим списки для оси Y, паралелльно округляя до миллиарда
self.mortgage_sum = list(map(billion, self.mortgage_issue.values())) # Возвращает все значения словаря
self.car_sum = list(map(billion, self.car_issue.values()))
self.consumer_sum = list(map(billion, self.consumer_issue.values()))
# 2. Второй график (разбивка сумм выдачи кредита за последние 9 месяцев по продуктам)
# 2.1 Формируем данные для второго графика
# вычисляем последние 9 месяцев от текущей даты и создаем три словаря (под каждый продукт) для значений в формате дата месяца:0
# по ключу (дате) будем сверять год и месяц и при соответствии прибавлять цифру в значение словаря
# словаря сразу три на случай, если в какой-то месяц кредиты какого-то типа не выдавались...
self.mortgage_issue_9m = {}
self.car_issue_9m = {}
self.consumer_issue_9m = {}
day = date.today()
for i in range(9):
self.mortgage_issue_9m.setdefault(day, 0)
self.car_issue_9m.setdefault(day, 0)
self.consumer_issue_9m.setdefault(day, 0)
day = day - relativedelta(months=1)
# Обходим sum_issue_RUB и заполняем словари значениями
for i in range(len(df['sum_issue_RUB'])):
y = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").year
m = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").month
for j in self.mortgage_issue_9m: # поскольку ключи в трех словарях одинаковые, на входе сравниваем с ключами любого словаря, а рассортировываем по нужным
if (j.month == m and j.year == y):
if df.loc[i,'type'] == 'mortgage':
self.mortgage_issue_9m[j] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'type'] == 'car loan':
self.car_issue_9m[j] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'type'] == 'consumer':
self.consumer_issue_9m[j] += df.loc[i,'sum_issue_RUB']
# сортируем словари по возрастанию ключа.
#from collections import OrderedDict Импортируем библиотеку (сделали ранее)
self.mortgage_issue_9m = dict(OrderedDict(sorted(self.mortgage_issue_9m.items(), key=lambda t: t[0])))
# выражение лямбды означает сортировку по ключу. Есть еще сортировка по значению и длине строки ключа:
# https://docs.python.org/2/library/collections.html#collections.OrderedDict
self.consumer_issue_9m = dict(OrderedDict(sorted(self.consumer_issue_9m.items(), key=lambda t: t[0])))
self.car_issue_9m = dict(OrderedDict(sorted(self.car_issue_9m.items(), key=lambda t: t[0])))
# 2.2 Обрабатываем данные для второго графика
# список текстовых меток для оси х формируем по ключам любого из наших трех словарей
self.x_issue_9m = sorted(list(self.mortgage_issue_9m.keys()))
for i in range(len(self.x_issue_9m)): # ...и сразу же переписываем значения в нужном формате даты - месяц-год
self.x_issue_9m[i] = datetime.strftime(self.x_issue_9m[i], "%b %Y")
# создаем список для цветов сезонов
self.x_issue_9m_colors = []
for i in self.x_issue_9m:
m = datetime.strptime(i, "%b %Y").month
if m in [12,1,2]:
self.x_issue_9m_colors.append('blue')
elif m in [3,4,5]:
self.x_issue_9m_colors.append('dimgrey')
elif m in [6,7,8]:
self.x_issue_9m_colors.append('green')
else:
self.x_issue_9m_colors.append('darkorange')
# готовим списки для оси Y, паралелльно округляя до миллиарда
self.mortgage_sum_9m = list(map(billion, self.mortgage_issue_9m.values()))
self.car_sum_9m = list(map(billion, self.car_issue_9m.values()))
self.consumer_sum_9m = list(map(billion, self.consumer_issue_9m.values()))
# 3. Рисуем третий график (количество кредитов по годам с разбивкой по продуктам)
# 3.1 Формируем данные для третьего графика
# как и в первом графике, создаем три словаря для лет выдачи под каждый тип кредита - каждая пара будет служить вместо переменной по году и типу кредита
# для заполнения годами (ключами) используем ранее созданный словарь x_issue_years, годы заново уже не парсим
self.mortgage_issue_quantity_y = {}
self.consumer_issue_quantity_y = {}
self.car_issue_quantity_y = {}
for i in self.x_issue_years:
self.mortgage_issue_quantity_y.setdefault(i, 0)
self.car_issue_quantity_y.setdefault(i, 0)
self.consumer_issue_quantity_y.setdefault(i, 0)
# Обходим df и заполняем словари значениями
for i in range(len(df['sum_issue_RUB'])):
d = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").year
if df.loc[i,'type'] == 'mortgage':
self.mortgage_issue_quantity_y[d] += 1
elif df.loc[i,'type'] == 'car loan':
self.car_issue_quantity_y[d] += 1
elif df.loc[i,'type'] == 'consumer':
self.consumer_issue_quantity_y[d] += 1
# 3.2 Обрабатываем данные для третьего графика
# готовим списки для оси Y, паралелльно округляя до тысячи
self.mortgage_quantity_y = list(map(thousand, self.mortgage_issue_quantity_y.values())) # Возвращает все значения словаря
self.car_quantity_y = list(map(thousand, self.car_issue_quantity_y.values()))
self.consumer_quantity_y = list(map(thousand, self.consumer_issue_quantity_y.values()))
# 4. Четвертый график (суммы выдачи за 9 месяцев с разбивкой по городам)
# 4.1 Формируем данные для второго графика
# вычисляем последние 9 месяцев от текущей даты и создаем 4 словаря (под каждый город) для значений в формате "город:0"
# заполняем по аналогии с графиком 2
self.moscow_issue_9m = {}
self.piter_issue_9m = {}
self.ekat_issue_9m = {}
self.novosib_issue_9m = {}
day = date.today()
for i in range(9):
self.moscow_issue_9m.setdefault(day, 0)
self.piter_issue_9m.setdefault(day, 0)
self.ekat_issue_9m.setdefault(day, 0)
self.novosib_issue_9m.setdefault(day, 0)
day = day - relativedelta(months=1)
# Обходим df и заполняем словари значениями
for i in range(len(df['sum_issue_RUB'])):
y = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").year
m = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").month
for j in self.moscow_issue_9m: # поскольку ключи в трех словарях одинаковые, на входе сравниваем с ключами любого словаря, а рассортировываем по нужным
if (j.month == m and j.year == y):
if df.loc[i,'city'] == 'Москва':
self.moscow_issue_9m[j] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'city'] == 'Санкт-Петербург':
self.piter_issue_9m[j] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'city'] == 'Новосибирск':
self.novosib_issue_9m[j] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'city'] == 'Екатеринбург':
self.ekat_issue_9m[j] += df.loc[i,'sum_issue_RUB']
# сортируем словари по возрастанию ключа.
#from collections import OrderedDict Импортируем библиотеку (сделали ранее)
self.moscow_issue_9m = dict(OrderedDict(sorted(self.moscow_issue_9m.items(), key=lambda t: t[0])))
# выражение лямбды означает сортировку по ключу. Есть еще сортировка по значению и длине строки ключа:
# https://docs.python.org/2/library/collections.html#collections.OrderedDict
self.piter_issue_9m = dict(OrderedDict(sorted(self.piter_issue_9m.items(), key=lambda t: t[0])))
self.ekat_issue_9m = dict(OrderedDict(sorted(self.ekat_issue_9m.items(), key=lambda t: t[0])))
self.novosib_issue_9m = dict(OrderedDict(sorted(self.novosib_issue_9m.items(), key=lambda t: t[0])))
# 4.2 Обрабатываем данные для второго графика
# список текстовых меток (месяц-год) для оси х берем из второго графика: x_issue_9m
# готовим списки для оси Y, паралелльно округляя до миллиарда
self.moscow_sum_9m = list(map(billion, self.moscow_issue_9m.values()))
self.piter_sum_9m = list(map(billion, self.piter_issue_9m.values()))
self.ekat_sum_9m = list(map(billion, self.ekat_issue_9m.values()))
self.novosib_sum_9m = list(map(billion, self.novosib_issue_9m.values()))
# 5. Пятый график (суммы выдачи за 9 месяцев с разбивкой по валюте)
# 5.1 Формируем данные для графика
# создаем 3 словаря (под три валюты) для значений в формате "валюта:0" и заполняем их месяцами
# заполняем по аналогии с графиком 2
self.rub_issue_9m = {}
self.usd_issue_9m = {}
self.eur_issue_9m = {}
day = date.today()
for i in range(9):
self.rub_issue_9m.setdefault(day, 0)
self.usd_issue_9m.setdefault(day, 0)
self.eur_issue_9m.setdefault(day, 0)
day = day - relativedelta(months=1)
# Обходим df и заполняем словари значениями
for i in range(len(df['sum_issue_RUB'])):
y = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").year
m = datetime.strptime(df.loc[i, 'date_issue'], "%Y-%m-%d").month
for j in self.rub_issue_9m: # поскольку ключи в трех словарях одинаковые, на входе сравниваем с ключами любого словаря, а рассортировываем по нужным
if (j.month == m and j.year == y):
if df.loc[i,'currency'] == 'RUB':
self.rub_issue_9m[j] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'currency'] == 'USD':
self.usd_issue_9m[j] += df.loc[i,'sum_issue_RUB']
elif df.loc[i,'currency'] == 'EUR':
self.eur_issue_9m[j] += df.loc[i,'sum_issue_RUB']
# сортируем словари по возрастанию ключа.
#from collections import OrderedDict Импортируем библиотеку (сделали ранее)
self.moscow_issue_9m = dict(OrderedDict(sorted(self.rub_issue_9m.items(), key=lambda t: t[0])))
# выражение лямбды означает сортировку по ключу. Есть еще сортировка по значению и длине строки ключа:
# https://docs.python.org/2/library/collections.html#collections.OrderedDict
self.piter_issue_9m = dict(OrderedDict(sorted(self.usd_issue_9m.items(), key=lambda t: t[0])))
self.ekat_issue_9m = dict(OrderedDict(sorted(self.eur_issue_9m.items(), key=lambda t: t[0])))
# 5.2 Обрабатываем данные для графика
# список текстовых меток (месяц-год) для оси х берем из второго графика: x_issue_9m
# готовим списки для оси Y, паралелльно округляя до миллиарда
self.rub_sum_9m = list(map(billion, self.rub_issue_9m.values()))
self.usd_sum_9m = list(map(billion, self.usd_issue_9m.values()))
self.eur_sum_9m = list(map(billion, self.eur_issue_9m.values()))
# Графики 6 - 13 (круговые диаграммы)
# Готовим данные сразу для всех диаграмм
# Создаем словари и руками заполняем известными ключами, которые не меняются
self.current_common_products = {'mortgage':0, 'car loan':0, 'consumer':0} # значения сумм текущего состояния кр.портф. общего в разбивке по продуктам
self.current_common_products_q = {'mortgage':0, 'car loan':0, 'consumer':0} # количественные значения текущего состояния кр.портф. общего в разбивке по продуктам
self.current_moscow_products = {'mortgage':0, 'car loan':0, 'consumer':0} # значения сумм текущего состояния кр.портф. Москвы в разбивке по продуктам
self.current_piter_products = {'mortgage':0, 'car loan':0, 'consumer':0} # значения сумм текущего состояния кр.портф. СПб в разбивке по продуктам
self.current_ekat_products = {'mortgage':0, 'car loan':0, 'consumer':0} # значения сумм текущего состояния кр.портф. Екб в разбивке по продуктам
self.current_novosib_products = {'mortgage':0, 'car loan':0, 'consumer':0} # значения сумм текущего состояния кр.портф. Новосиба в разбивке по продуктам
self.current_common_cities = {'Москва':0,'Санкт-Петербург':0,'Новосибирск':0,'Екатеринбург':0} # значения сумм текущего состояния кр.портф. в разбивке по городам
self.current_common_currency = {'USD':0,'EUR':0,'RUB':0} # значения сумм текущего состояния кр.портф. в разбивке по валюте
self.colors = ['deepskyblue', 'lime', 'red']
# Обходим df и заполняем словари текущими рублевыми значениями
for i in range(len(df['sum_issue_RUB'])):
self.current_common_products[df.loc[i,'type']] += int(df.loc[i,'sum_today_RUB'])
self.current_common_products_q[df.loc[i,'type']] += 1
self.current_common_cities[df.loc[i,'city']] += int(df.loc[i,'sum_today_RUB'])
self.current_common_currency[df.loc[i,'currency']] += int(df.loc[i,'sum_today_RUB'])
if df.loc[i,'city'] == 'Москва':
self.current_moscow_products[df.loc[i,'type']] += int(df.loc[i,'sum_today_RUB'])
if df.loc[i,'city'] == 'Санкт-Петербург':
self.current_piter_products[df.loc[i,'type']] += int(df.loc[i,'sum_today_RUB'])
if df.loc[i,'city'] == 'Новосибирск':
self.current_novosib_products[df.loc[i,'type']] += int(df.loc[i,'sum_today_RUB'])
if df.loc[i,'city'] == 'Екатеринбург':
self.current_ekat_products[df.loc[i,'type']] += int(df.loc[i,'sum_today_RUB'])
|
{"/main.py": ["/dl_charts.py", "/dl_gui.py"]}
|
38,735
|
Dortov/Chart-Loader
|
refs/heads/main
|
/dl_gui.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dl_gui.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(848, 436)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(10, 0, 581, 391))
self.widget.setObjectName("widget")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.widget)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 581, 391))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.v_box_chart = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.v_box_chart.setContentsMargins(0, 0, 0, 0)
self.v_box_chart.setObjectName("v_box_chart")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(600, 0, 241, 391))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.v_box = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.v_box.setContentsMargins(0, 0, 0, 0)
self.v_box.setObjectName("v_box")
self.list = QtWidgets.QListWidget(self.verticalLayoutWidget)
self.list.setObjectName("list")
self.v_box.addWidget(self.list)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 848, 21))
self.menubar.setObjectName("menubar")
self.menuMenu = QtWidgets.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
self.menuExport = QtWidgets.QMenu(self.menuMenu)
self.menuExport.setObjectName("menuExport")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionImport_data_file = QtWidgets.QAction(MainWindow)
self.actionImport_data_file.setObjectName("actionImport_data_file")
self.actionConnect_to_DB = QtWidgets.QAction(MainWindow)
self.actionConnect_to_DB.setObjectName("actionConnect_to_DB")
self.actionCurrent_chart_to_PDF = QtWidgets.QAction(MainWindow)
self.actionCurrent_chart_to_PDF.setObjectName("actionCurrent_chart_to_PDF")
self.actionAll_charts_to_PDF = QtWidgets.QAction(MainWindow)
self.actionAll_charts_to_PDF.setObjectName("actionAll_charts_to_PDF")
self.menuExport.addAction(self.actionCurrent_chart_to_PDF)
self.menuExport.addAction(self.actionAll_charts_to_PDF)
self.menuMenu.addAction(self.actionImport_data_file)
self.menuMenu.addAction(self.actionConnect_to_DB)
self.menuMenu.addAction(self.menuExport.menuAction())
self.menubar.addAction(self.menuMenu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.menuMenu.setTitle(_translate("MainWindow", "Menu"))
self.menuExport.setTitle(_translate("MainWindow", "Export"))
self.actionImport_data_file.setText(_translate("MainWindow", "Import data file"))
self.actionConnect_to_DB.setText(_translate("MainWindow", "Connect to DB"))
self.actionCurrent_chart_to_PDF.setText(_translate("MainWindow", "Current chart to PDF"))
self.actionAll_charts_to_PDF.setText(_translate("MainWindow", "All charts to PDF"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
{"/main.py": ["/dl_charts.py", "/dl_gui.py"]}
|
38,737
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/migrations/versions/dadbd821288e_.py
|
"""empty message
Revision ID: dadbd821288e
Revises: 1778db3149ec
Create Date: 2019-07-07 16:34:16.930812
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dadbd821288e'
down_revision = '1778db3149ec'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('like_event', 'event_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('like_event', 'user_id',
existing_type=sa.INTEGER(),
nullable=True)
op.create_foreign_key(None, 'like_event', 'users', ['user_id'], ['id'])
op.create_foreign_key(None, 'like_event', 'events', ['event_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'like_event', type_='foreignkey')
op.drop_constraint(None, 'like_event', type_='foreignkey')
op.alter_column('like_event', 'user_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('like_event', 'event_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,738
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/app.py
|
import os
from flask import Flask, render_template, redirect, url_for, request, flash
from components.users import users_blueprint
from components.events import events_blueprint
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import login_user, login_required, logout_user, LoginManager, UserMixin, current_user
from werkzeug.security import generate_password_hash, check_password_hash
from models.model import db, User, RatingUser, TicketType
from components.events.forms.forms import Event, UploadPhoto
app = Flask(__name__)
app.config['SECRET_KEY'] = 'super secret'
app.config['UPLOADED_PHOTOS_DEST'] = os.getcwd() + '/static/cover_img'
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app)
db.init_app(app)
migrate = Migrate(app, db, compare_type=True)
login_manager = LoginManager(app)
login_manager.login_view = 'users.login'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
POSTGRES = {
'user': os.environ['POSTGRES_USER'],
'pw': os.environ['POSTGRES_PWD'],
'db': os.environ['POSTGRES_DB'],
'host': os.environ['POSTGRES_HOST'],
'port': os.environ['POSTGRES_PORT'],
}
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://%(user)s:%(pw)s@%(host)s:\
%(port)s/%(db)s' % POSTGRES
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
@app.route('/', methods=['GET', 'POST'])
def index():
events = Event.query.all()
return render_template('index.html', events=events)
app.register_blueprint(users_blueprint, url_prefix='/users')
app.register_blueprint(events_blueprint, url_prefix='/events')
if __name__ == '__main__':
app.run(debug=True)
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,739
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/components/users/__init__.py
|
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import login_user, login_required, logout_user, LoginManager, UserMixin, current_user
from components.users.forms.forms import SignUpForm, LogInForm, EditProfile
from models.model import User, db, Event
from flask import Blueprint, render_template, redirect, url_for, flash
from flask_login import current_user
from flask_wtf.file import FileField, FileAllowed
users_blueprint = Blueprint('users', __name__, template_folder='templates')
@users_blueprint.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignUpForm()
if form.validate_on_submit():
new_user = User(name=form.name.data, username=form.username.data,
email=form.email.data)
new_user.set_password(form.password.data)
flash(
f'Hey {form.username.data.capitalize()}, you have successfully created a new account! Please login to create and buy events.', 'success')
db.session.add(new_user)
db.session.commit()
return redirect(url_for('index'))
return render_template('signup.html', form=form)
@users_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LogInForm()
if form.validate_on_submit():
# preprocess username
username = form.username.data.strip()
log_user = User.query.filter_by(username=username).first()
if log_user is None:
# flash('Invalid Username')
form.username.errors.append("Invalid Username")
return render_template('login.html', form=form, signup_modal_form=SignUpForm())
if not log_user.check_password(form.password.data):
form.password.errors.append("Invalid password")
return render_template('login.html', form=form, signup_modal_form=SignUpForm())
login_user(log_user)
return render_template('welcome.html')
return render_template('login.html', form=form, signup_modal_form=SignUpForm())
@users_blueprint.route('/welcome')
@login_required
def welcome():
return render_template('welcome.html')
@users_blueprint.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@users_blueprint.route('/editprofile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfile()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.username = form.username.data.lower()
current_user.email = form.email.data
flash(f'Your account has been updated!', 'success')
db.session.commit()
return render_template('profile.html', form=form)
@users_blueprint.route('/<int:user_id>/delete')
@login_required
def delete_user(user_id):
user = User.query.filter_by(id=user_id).first()
if user.id == current_user.id:
flash('Account deleted!', 'success')
db.session.delete(user)
db.session.commit()
return redirect(url_for('index'))
@users_blueprint.route('/eventscreated')
@login_required
def users_events():
user = current_user
events = user.creator_id
return render_template('users_events.html', events=events, user=user)
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,740
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/migrations/versions/1778db3149ec_.py
|
"""empty message
Revision ID: 1778db3149ec
Revises: 0ea6fdd0bc88
Create Date: 2019-07-07 16:14:44.629456
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1778db3149ec'
down_revision = '0ea6fdd0bc88'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('events', sa.Column('like_count', sa.Integer(), nullable=False))
op.drop_column('events', 'like')
op.add_column('like_event', sa.Column('isLiked', sa.Integer(), nullable=True))
op.drop_column('like_event', 'like_count')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('like_event', sa.Column('like_count', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_column('like_event', 'isLiked')
op.add_column('events', sa.Column('like', sa.INTEGER(), autoincrement=False, nullable=False))
op.drop_column('events', 'like_count')
# ### end Alembic commands ###
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,741
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/migrations/versions/29986b354dbb_.py
|
"""empty message
Revision ID: 29986b354dbb
Revises: dadbd821288e
Create Date: 2019-07-08 11:20:03.678173
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '29986b354dbb'
down_revision = 'dadbd821288e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('ticket_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('ticket_type', sa.String(length=15), nullable=True),
sa.Column('ticket_price', sa.Integer(), nullable=True),
sa.Column('quantity', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('orders',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('tickettype_id', sa.Integer(), nullable=True),
sa.Column('buyer_id', sa.Integer(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('quantity', sa.Integer(), nullable=True),
sa.Column('total_bill', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['buyer_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['tickettype_id'], ['ticket_types.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('orders')
op.drop_table('ticket_types')
# ### end Alembic commands ###
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,742
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/components/users/forms/forms.py
|
from flask_wtf import FlaskForm
from flask_login import current_user
from wtforms.fields import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, DataRequired, ValidationError
from models.model import User
class SignUpForm(FlaskForm):
name = StringField('Name', validators=[InputRequired()])
username = StringField('Username', validators=[InputRequired()])
email = StringField('Email', validators=[InputRequired()])
password = PasswordField('Password', validators=[InputRequired()])
pass_confirm = PasswordField(
'Confirm Password', validators=[DataRequired()])
submit = SubmitField('Sign Up')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError("Your username has been registered!")
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError("Email has been used!")
class LogInForm(FlaskForm):
username = StringField('Username', validators=[
DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Login')
class EditProfile(FlaskForm):
name = StringField('Name', validators=[InputRequired()])
username = StringField('Username', validators=[InputRequired()])
email = StringField('Email', validators=[InputRequired()])
submit = SubmitField('Update Profile')
def validate_username(self, username):
if username.data == current_user.username:
if User.query.filter_by(username=username.data).first():
raise ValidationError("Your username has been registered!")
def validate_email(self, email):
if email.data == current_user.email:
if User.query.filter_by(email=email.data).first():
raise ValidationError("Email has been used!")
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,743
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/components/events/__init__.py
|
import os
import secrets
from flask import Blueprint, render_template, redirect, url_for, flash, current_app
from flask_login import login_user, login_required, logout_user, LoginManager, UserMixin, current_user
from flask_wtf.file import FileField, FileAllowed
from flask_uploads import UploadSet, IMAGES
from models.model import db, Event, User, LikeEvent, TicketType
from components.events.forms.forms import CreateEventForm, EditEventForm, UploadPhoto, AddTicketType, BuyTicket
from models.model import Orders
events_blueprint = Blueprint('events', __name__, template_folder='templates')
photos = UploadSet('photos', IMAGES)
@events_blueprint.route('/create', methods=['GET', 'POST'])
@login_required
def createevent():
form = CreateEventForm()
if form.validate_on_submit():
new_event = Event(title=form.title.data, creator_id=current_user.id,
description=form.description.data, location=form.location.data, start_date=form.start_date.data
)
flash(
f'You have successfully created a new event', 'success')
db.session.add(new_event)
db.session.commit()
return redirect(url_for('index'))
else:
return render_template('create_event.html', form=form)
@events_blueprint.route('<int:event_id>/addtickettype', methods=['GET', 'POST'])
@login_required
def add_ticket_type(event_id):
form = AddTicketType()
event = Event.query.filter_by(id=event_id).one()
if form.validate_on_submit():
add_ticket = TicketType(ticket_type=form.ticket_type.data,
ticket_price=form.ticket_price.data, quantity=form.quantity.data)
event.ticket_type.append(add_ticket)
flash(
f'You added {form.ticket_type.data} ticket to the {event.title.capitalize()} event!', 'success')
db.session.add(add_ticket)
db.session.commit()
return redirect(url_for('index'))
return render_template('add_ticket_type.html', form=form)
@events_blueprint.route('/<int:event_id>', methods=['GET', 'POST'])
def eventcard(event_id):
eventcard = Event.query.filter_by(id=event_id).one()
user = User.query.filter_by(id=eventcard.creator_id).one()
tickets = TicketType.query.filter_by(event_id=eventcard.id).all()
return render_template('eventpage.html', eventcard=eventcard, user=user, tickets=tickets)
@events_blueprint.route('/<int:event_id>/edit', methods=['GET', 'POST'])
@login_required
def edit_event(event_id):
event = Event.query.filter_by(id=event_id).one()
form = EditEventForm()
photo_form = UploadPhoto()
if form.validate_on_submit():
event.title = form.title.data
event.description = form.description.data
event.location = form.location.data
event.start_date = form.start_date.data
event.end_date = form.end_date.data
db.session.commit()
flash(
f'Your event has been updated', 'success')
return redirect(url_for('index'))
return render_template('editevent.html', form=form, event=event, photo_form=photo_form)
@events_blueprint.route('/<int:event_id>/upload_image', methods=['GET', 'POST'])
def upload_image(event_id):
photo_form = UploadPhoto()
event = Event.query.filter_by(id=event_id).one()
if photo_form.validate_on_submit():
filename = photos.save(photo_form.cover_img.data)
file_url = photos.url(filename)
flash(f'Your image has been uploaded to your event', 'success')
event.cover_img = filename
db.session.commit()
else:
file_url = None
flash(f'File not accepted', 'danger')
return redirect(url_for('index'))
@events_blueprint.route('/<int:event_id>/delete')
@login_required
def delete_event(event_id):
event = Event.query.filter_by(id=event_id).first()
if event.creator_id == current_user.id:
flash('Event deleted!', 'success')
db.session.delete(event)
db.session.commit()
return redirect(url_for('index'))
@events_blueprint.route('/<event_id>/buyticket', methods=['POST', 'GET'])
@login_required
def buy_ticket(event_id):
event = Event.query.filter_by(id=event_id).one()
tickets = event.ticket_type
form = BuyTicket()
ticket_types = [(ticket.id, ticket.ticket_type + " - Voucher price: $" + str(
ticket.ticket_price) + " - Quantity left: " + str(ticket.quantity)) for ticket in tickets]
form.ticket_type.choices = ticket_types
if form.validate_on_submit():
ticket = TicketType.query.filter_by(id=form.ticket_type.data).first()
total_amount = form.quantity.data * ticket.ticket_price
purchase = Orders(quantity=form.quantity.data,
buyer_id=current_user.id,
event_id=event_id,
tickettype_id=form.ticket_type.data,
total_bill=total_amount)
db.session.add(purchase)
db.session.commit()
flash(f'Purchase successful!', 'success')
return redirect(url_for('index'))
return render_template('buy_ticket.html', event=event, tickets=tickets, form=form)
@events_blueprint.route('likeevent/<int:event_id>')
@login_required
def like_event(event_id):
like = LikeEvent.query.filter_by(
event_id=event_id, user_id=current_user.id).first()
event = Event.query.get(event_id)
if event.like_count == None:
event.like_count += 1
is_liked = LikeEvent(user_id=current_user.id,
event_id=event.id, like=1)
db.session.add(is_liked)
db.session.commit()
return redirect(url_for('index'))
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,744
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/models/model.py
|
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
# from components.users.forms.forms import SignUpForm
db = SQLAlchemy()
class RatingUser(db.Model):
__tablename__ = 'rating_users'
id = db.Column(db.Integer, primary_key=True)
rater_id = db.Column(db.Integer, db.ForeignKey(
'users.id'), primary_key=True)
target_user_id = db.Column(
db.Integer, db.ForeignKey('users.id'), primary_key=True)
rating = db.Column(db.Integer, nullable=False)
class LikeEvent(db.Model):
__tablename__ = 'like_event'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
event_id = db.Column(db.Integer, db.ForeignKey("events.id"))
isLiked = db.Column(db.Integer, default=0)
class TicketType(db.Model):
__tablename__ = 'ticket_types'
id = db.Column(db.Integer, primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey('events.id'))
ticket_type = db.Column(db.String(15))
ticket_price = db.Column(db.Integer)
quantity = db.Column(db.Integer)
order = db.relationship('Orders', backref='ticket_types')
class Orders(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
tickettype_id = db.Column(db.Integer, db.ForeignKey('ticket_types.id'))
buyer_id = db.Column(db.Integer, db.ForeignKey('users.id'))
event_id = db.Column(db.Integer, db.ForeignKey('events.id'))
quantity = db.Column(db.Integer, default=0)
total_bill = db.Column(db.Integer, nullable=False)
class Event(db.Model):
__tablename__ = 'events'
id = db.Column(db.Integer, primary_key=True)
creator_id = db.Column(db.Integer, db.ForeignKey(
'users.id'))
title = db.Column(db.String(255), nullable=False)
cover_img = db.Column(db.String(500), default=('download.jpeg'))
description = db.Column(db.String(500), nullable=False)
location = db.Column(db.String(255), nullable=False)
start_date = db.Column(db.DateTime, nullable=False)
end_date = db.Column(db.DateTime)
like_count = db.Column(db.Integer, nullable=False, default=0)
likes = db.relationship('LikeEvent', backref='events')
ticket_type = db.relationship('TicketType', backref='events')
event = db.relationship('Orders', backref='events')
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), nullable=False, unique=True)
name = db.Column(db.String(255), nullable=False)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(128), nullable=False)
rater_id = db.relationship(
'RatingUser', primaryjoin=(id == RatingUser.rater_id))
target_user_id = db.relationship(
'RatingUser', primaryjoin=(id == RatingUser.target_user_id))
creator_id = db.relationship(
'Event', primaryjoin=(id == Event.creator_id))
likes = db.relationship('LikeEvent', backref='users')
buyer = db.relationship('Orders', backref='users')
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,745
|
Nguyephi/ticketbox_python
|
refs/heads/master
|
/components/events/forms/forms.py
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms.fields import StringField, PasswordField, SubmitField, TextAreaField, IntegerField, SelectField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired, DataRequired, ValidationError
from models.model import Event
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
photos = UploadSet('photos', IMAGES)
class CreateEventForm(FlaskForm):
title = StringField('Title', validators=[InputRequired()])
description = TextAreaField('Description', validators=[InputRequired()])
location = StringField('Location', validators=[InputRequired()])
start_date = DateField('Start Date')
submit = SubmitField('Create')
class AddTicketType(FlaskForm):
ticket_type = StringField('Ticket Type', validators=[InputRequired()])
ticket_price = IntegerField('Ticket Type', validators=[InputRequired()])
quantity = IntegerField('Quantity', validators=[InputRequired()])
submit = SubmitField('Add Tickets')
class BuyTicket(FlaskForm):
ticket_type = SelectField('Select Ticket Type', coerce=int)
quantity = IntegerField('Quantity', validators=[InputRequired()])
submit = SubmitField('Buy Tickets')
class EditEventForm(FlaskForm):
title = StringField('Title', validators=[InputRequired()])
description = TextAreaField('Description', validators=[InputRequired()])
location = StringField('Location', validators=[InputRequired()])
start_date = DateField('Start Date')
end_date = DateField('End Date')
submit = SubmitField('Update Event Info')
class UploadPhoto(FlaskForm):
cover_img = FileField(validators=[FileAllowed(
photos, u'Image only!'), FileRequired(u'File was empty!')])
submit = SubmitField(u'Upload')
|
{"/app.py": ["/components/users/__init__.py", "/components/events/__init__.py", "/models/model.py", "/components/events/forms/forms.py"], "/components/users/__init__.py": ["/components/users/forms/forms.py", "/models/model.py"], "/components/users/forms/forms.py": ["/models/model.py"], "/components/events/__init__.py": ["/models/model.py", "/components/events/forms/forms.py"], "/components/events/forms/forms.py": ["/models/model.py"]}
|
38,768
|
Monish99/Invoice
|
refs/heads/master
|
/invoice/views.py
|
import mysql
from django.shortcuts import HttpResponse, render
from mysql.connector import Error
from .inv import someRandom
from .sndMail import det
def create_connection(hn, un, pw, dbn):
connection = None
try:
connection = mysql.connector.connect(
host=hn,
user=un,
passwd=pw,
database=dbn
)
print("MySQL Database connection successful")
except Error as err:
print(f"Error: '{err}'")
return connection
def read_query(con, que):
cursor = con.cursor()
result = None
try:
cursor.execute(que)
result = cursor.fetchall()
return result
except Error as err:
print(f"Error:", err)
def trainerBankDet(n):
que = "SELECT * FROM trainer where Name='" + n + "'"
TBD = read_query(conn, que)
print(TBD)
return TBD
conn = create_connection("localhost", "root", "", "genesis")
q = "SELECT Name from collage"
q1 = "SELECT Name from trainer"
res = str(read_query(conn, q1))
colName = str(read_query(conn, q))
l = colName.split("'")
cn = []
for i in range(len(l)):
if i % 2 == 1:
cn.append(l[i])
tn = []
l1 = res.split("'")
for i in range(len(l1)):
if i % 2 == 1:
tn.append(l1[i])
print("\n", colName)
def calcDays(sd, ed):
st = sd.split("-")
d = int(st[2])
m = int(st[1])
et = ed.split("-")
da = int(et[2])
mo = int(et[1])
nod = da - d
if m != mo:
mAbs = (mo - m) * 30
nod = mAbs + nod
return nod
def home(request):
return render(request, "home.html", {'colName': cn, 'trName': tn})
def display(request):
trainerName = request.GET["trName"]
collegeName = request.GET["colName"]
domain = request.GET["dom"]
#noOfDays = request.GET["Day"]
start_date = request.GET["SD"]
end_date = request.GET["ED"]
t_hrs = request.GET["TH"]
mode = request.GET["mode"]
print(start_date, "\n", end_date)
noOfDays = calcDays(start_date, end_date)
print(noOfDays)
print("type of NOD", type(noOfDays))
#result = "Duration of the training " + str(noOfDays)
#TBD = trainerBankDet(trainerName)
someRandom(trainerName, collegeName, mode, noOfDays, start_date)
det(trainerName, collegeName, t_hrs, noOfDays, mode, start_date, end_date)
return render(request, "home.html", {'message': 'Email Sent'})
|
{"/invoice/views.py": ["/invoice/inv.py", "/invoice/sndMail.py"]}
|
38,769
|
Monish99/Invoice
|
refs/heads/master
|
/invoice/sndMail.py
|
import smtplib
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import mysql
from mysql.connector import Error
def create_connection(hn, un, pw, dbn):
connection = None
try:
connection = mysql.connector.connect(
host=hn,
user=un,
passwd=pw,
database=dbn
)
print("MySQL Database connection successful")
except Error as err:
print(f"Error: '{err}'")
return connection
def read_query(con, que):
cursor = con.cursor()
result = None
try:
cursor.execute(que)
result = cursor.fetchall()
return result
except Error as err:
print(f"Error:", err)
def det(tn, cn, NoHrs, NoD, mode, stD, eDT):
conn = create_connection("localhost", "root", "", "genesis")
# Selecting PAy from trainer table
q = "SELECT Payment from trainer where Name='" + tn + "'"
res = str(read_query(conn, q))
paa = res.split("(")
paya = paa[1].split(",")
pay = paya[0]
# Fetching Food col from DB
q = "SELECT Food from collage where Name='" + cn + "'"
res = str(read_query(conn, q))
fo = res.split("'")
food = fo[1]
# Fetching email of the trainer
q = "SELECT Email from trainer where Name='" + tn + "'"
res = str(read_query(conn, q))
#print(res)
ma = res.split("'")
mail = ma[1]
#print(mail, "type", type(mail))
send_test_mail(cn, pay, NoHrs, NoD, mode, stD, eDT, food, mail)
def send_test_mail(cn, pay, NoHrs, NoD, mode, stD, eDT, food, mail):
# setting up the email
sender_email = "teamvoid35@gmail.com"
receiver_email = mail
msg = MIMEMultipart()
msg['Subject'] = 'Confirmation Mail'
msg['From'] = sender_email
msg['To'] = receiver_email
# formatting the email as req
colName = "Name of the college : " + cn
fee = "Remuneration " + pay + "/- per day incl of TDS"
hrs = "Total number of hours : " + NoHrs
days = "Totoal number of days : " + str(NoD)
mod = "Mode of training : " + mode
da = "Date : " + stD + " to " + eDT
foo = "Food : " + food
msgText = MIMEText('Greeting from Genesis!!!\n This is an email confirmation post of our telephonic conversation '
'about you associating with Genesis for our forthcoming project on the contractual basis. '
'PBF the details about the project.\n\n ' + colName + "\n" + fee + "\n" + hrs + "\n" + days + "\n" + mod + "\n" + da + "\n" + foo + "\n")
msg.attach(msgText)
# Attaching the Docx file
pdf = MIMEApplication(open("invoice.pdf", 'rb').read())
pdf.add_header('Content-Disposition', 'attachment', filename="invoice.pdf")
msg.attach(pdf)
try:
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("teamvoid35@gmail.com", "Gmail@123")
s.sendmail("teamvoid35@gmail.com", "monishraj350@gmail.com", msg.as_string())
s.quit()
except Exception as e:
print(e)
|
{"/invoice/views.py": ["/invoice/inv.py", "/invoice/sndMail.py"]}
|
38,770
|
Monish99/Invoice
|
refs/heads/master
|
/invoice/inv.py
|
from docx import Document
import mysql
from mysql.connector import Error
from docx2pdf import convert
from docx.enum.text import WD_ALIGN_PARAGRAPH
def create_connection(hn, un, pw, dbn):
connection = None
try:
connection = mysql.connector.connect(
host=hn,
user=un,
passwd=pw,
database=dbn
)
print("MySQL Database connection successful")
except Error as err:
print(f"Error: '{err}'")
return connection
def read_query(con, que):
cursor = con.cursor()
result = None
try:
cursor.execute(que)
result = cursor.fetchall()
return result
except Error as err:
print(f"Error:", err)
def someRandom(tr, col, mode, noDays, sd):
print("type of triner ", type(tr))
conn = create_connection("localhost", "root", "", "genesis")
# Fetching all details of trainer
que = "SELECT * FROM trainer where Name='" + tr + "'"
que1 = "SELECT BName FROM trainer where Name='" + tr + "'"
TBD = str(read_query(conn, que))
# bn = str(read_query(conn, que1))
print(TBD)
l = TBD.split("'")
for i in range(len(l)):
print(i, "vlaue ; ", l[i])
# Fetching trainer and college lacation for cal travel allowance
que = "SELECT Location FROM collage where Name='" + col + "'"
que1 = "SELECT Location FROM trainer where Name='" + tr + "'"
colLoc = str(read_query(conn, que))
trLoc = str(read_query(conn, que1))
colLoc1 = colLoc.split("'")
trLoc1 = trLoc.split("'")
colLoc = colLoc1[1]
trLoc = trLoc1[1]
print("col ; ", colLoc, "trloc ; ", trLoc)
# Fetching food details from DB
que1 = "SELECT Food FROM collage where Name='" + col + "'"
fo = str(read_query(conn, que1))
fo1 = fo.split("'")
fo = fo1[1]
print("foo : ", fo, "type", type(fo))
# Selecting Pay of trainer
que1 = "SELECT Payment FROM trainer where Name='" + tr + "'"
pa = str(read_query(conn, que1))
print("PA : ", pa)
pa1 = pa.split("'")
pa2 = pa1[0].split("(")
print("pa2 ", pa2)
pa1 = pa2[1].split(",")
print("pa1 ; ", pa1)
pa = pa1[0]
print("Start date", sd)
invGen(l, trLoc, colLoc, mode, fo, noDays, pa, sd, col)
def invGen(lst1, TLoc, CLoc, mode, foo, NoD, pay, stDate, col):
doc = Document()
my_image = doc.add_picture('invoice/some.png')
last_paragraph = doc.paragraphs[-1]
last_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
Phone = lst1[2].split(',')
print("aa : ", Phone)
# Adding Heading to docx
doc.add_heading('Genplus Training and Consulting', 0)
# Defining the dynamic properties that have to be printed in the docx like trainer name etc..
name = "Name(As given in Bank) : " + lst1[1]
bacc = "Bank Account Number : " + lst1[9]
ifsc = "IFSC : " + lst1[11]
pan = "PAN Number : " + lst1[13]
bn = "Bank Name : " + lst1[15]
ph = "Phone Number : " + Phone[1]
em = "Email : " + lst1[3]
loc = "Based Location : " + lst1[5]
# Adding each of the dynamic functionality like name
para = doc.add_paragraph()
run2 = para.add_run()
run2.add_text(name)
# Adding Acc no
para = doc.add_paragraph()
run2 = para.add_run()
run2.add_text(bacc)
# Adding IFSC code
para = doc.add_paragraph()
run2 = para.add_run()
run2.add_text(ifsc)
# Adding PAN
para = doc.add_paragraph()
run2 = para.add_run()
run2.add_text(pan)
# Adding Bank Name
para = doc.add_paragraph()
run2 = para.add_run()
run2.add_text(bn)
para = doc.add_paragraph()
run2 = para.add_run()
run2.add_text(ph)
para = doc.add_paragraph()
run2 = para.add_run()
run2.add_text(em)
para = doc.add_paragraph()
run2 = para.add_run()
run2.add_text(loc)
# Calculating Food and travel Allowance
food = "No"
travel = "No"
if mode == "offline":
if TLoc != CLoc:
travel = "1000"
else:
travel = "No"
if foo == "No":
food = '200'
else:
food = 'No'
# Formatting start_date as a str
stDate1 = stDate.split("-") # strdate[y, mon, day]
day = int(stDate1[2])
mon = stDate1[1]
yr = stDate1[0]
nod = int(NoD)
# Assigning all the data to be put into the table to a Tuple
data = []
for i in range(nod + 1):
date = str(day) + "/" + mon + "/" + yr
data1 = (date, col, pay, travel, food)
data.append(data1)
day = day + 1
fday = day
print()
da = tuple(data)
table = doc.add_table(rows=1, cols=5)
# Adding heading in the 1st row of the table
row = table.rows[0].cells
row[0].text = 'Date'
row[1].text = 'College'
row[2].text = 'Fees/day'
row[3].text = 'Travel Allowance'
row[4].text = 'Food Allowance'
# Adding data from the list to the table
day = int(stDate1[2])
print('day : ', day, "stdate[2] ; ", stDate1[2], "Fday ; ", fday)
for d, c, f, t, fo in data:
if t == "1000":
if day == int(stDate1[2]) or day == fday - 1:
t = "1000"
else:
t = 'No'
row = table.add_row().cells
row[0].text = d
row[1].text = c
row[2].text = f
row[3].text = t
row[4].text = fo
day = day + 1
# Empty row
row = table.add_row().cells
row = table.add_row().cells
# Calculating Total amount of each col
tot_fee = int(pay) * (int(NoD) + 1)
row[1].text = "Total"
row[2].text = str(tot_fee)
tot_travel = 0
if travel == '1000':
row[3].text = '2000'
tot_travel = 2000
tot_food = 0
if food == '200':
tot_food = 200 * (int(NoD) + 1)
row[4].text = str(tot_food)
row = table.add_row().cells
row = table.add_row().cells
# Calculating the Grand_total
G_tot = tot_fee + tot_food + tot_travel
row[3].text = "Grand Total"
row[4].text = str(G_tot)
# Styling the table
table.style = 'Light Grid Accent 1'
filePath = 'C:\\Users\\Monish\\invoiceGen\\Team_Void\\invoice.docx'
doc.save(filePath)
#print("This is random : ")
convert(filePath)
# someRandom('Monish', 'SJBIT', "offline", '6', "2021-04-05")
|
{"/invoice/views.py": ["/invoice/inv.py", "/invoice/sndMail.py"]}
|
38,771
|
Monish99/Invoice
|
refs/heads/master
|
/invoice/urls.py
|
from django.urls import path, include
from . import views
urlpatterns =[
path('',views.home),
path('display',views.display)
]
|
{"/invoice/views.py": ["/invoice/inv.py", "/invoice/sndMail.py"]}
|
38,772
|
Monish99/Invoice
|
refs/heads/master
|
/invoice/admin.py
|
from django.contrib import admin
from .models import Trainer, college
# Register your models here.
admin.site.register(Trainer)
admin.site.register(college)
|
{"/invoice/views.py": ["/invoice/inv.py", "/invoice/sndMail.py"]}
|
38,777
|
taehyeonjeon07/sueorang-sign-language
|
refs/heads/master
|
/dataset_split.py
|
import os
from util import make_dir
import shutil
dataset_folder = 'gestures'
gestures_list = os.listdir(dataset_folder)
gestures_list.sort()
print('gesture folder list -->', gestures_list)
print('\n',len(gestures_list) ,'of classes will be split(none class folder skip)')
n_of_valid_set = 100
make_dir(os.path.join(dataset_folder, 'valid'))
for gesture in gestures_list:
if gesture.isdigit() :
print('gesture :',gesture, 'is moving')
dest_path = os.path.join(dataset_folder, 'valid', gesture)
make_dir(dest_path)
origin_path = os.path.join(dataset_folder, gesture)
image_list = os.listdir(origin_path)
n_of_image = 0
for image in image_list:
if n_of_image >= n_of_valid_set:
break
shutil.move(os.path.join(origin_path, image), os.path.join(dest_path, image))
n_of_image += 1
|
{"/dataset_split.py": ["/util.py"]}
|
38,778
|
taehyeonjeon07/sueorang-sign-language
|
refs/heads/master
|
/util.py
|
import os
def make_dir(dir_name):
try:
if not(os.path.isdir(dir_name)):
os.makedirs(os.path.join(dir_name))
except OSError as e:
if e.errno != errno.EEXIST:
print("Failed to create directory!!!!!")
raise
|
{"/dataset_split.py": ["/util.py"]}
|
38,797
|
DmitryPorotov/FileWatcher
|
refs/heads/master
|
/ssh_client.py
|
from paramiko import transport, RSAKey, SFTPClient, client
import socket
import util
class SshClient:
def __init__(self, server: str, path_to: str, path_from: str, key_file: str = '~/.ssh/id_rsa'):
self._sftp_client = None # type: SFTPClient
parts = server.split("@")
self.server = parts[1]
self.user = parts[0]
self._path_to = util.resolve_home_dir(path_to, is_dir=True)
self._path_from = util.resolve_home_dir(path_from, is_dir=True)
self._p_key = RSAKey.from_private_key_file(util.resolve_home_dir(key_file))
self.key_file_name = key_file
self._connect()
def _connect(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.server, 22))
tr = transport.Transport(s)
tr.connect(username=self.user, pkey=self._p_key)
self._sftp_client = tr.open_sftp_client()
def put(self, rel_file_name, callback=None):
def cb(cur, total):
if cur == total and callback:
callback()
self._sftp_client.put(self._path_from + rel_file_name, self._path_to + rel_file_name, cb)
def mkdir(self, rel_dir_name, mask, callback):
self._sftp_client.mkdir(self._path_to + rel_dir_name, mask)
callback()
def remove(self, rel_file_name, callback):
self._sftp_client.remove(self._path_to + rel_file_name)
callback()
def rmdir(self, rel_dir_name, callback):
self._sftp_client.remove(self._path_to + rel_dir_name)
callback()
def move(self, rel_dir_name_from, rel_file_name):
self._sftp_client.rename(self._path_to + rel_dir_name_from, self._path_to + rel_file_name)
def symlink(self, destination_dir_name, original, callback):
cl = client.SSHClient()
cl.load_system_host_keys()
cl.connect(self.server, username=self.user)
stdin, stdout, stderr = cl.exec_command('ln -s {} {}'.format(original,
self._path_to + destination_dir_name))
callback()
def close(self):
self._sftp_client.close()
|
{"/ssh_client.py": ["/util.py"], "/test_sock.py": ["/ssh_client.py"], "/watcher.py": ["/path_watcher.py", "/db_handler.py", "/ssh_client.py"], "/path_watcher.py": ["/ssh_client.py", "/db_handler.py", "/util.py"]}
|
38,798
|
DmitryPorotov/FileWatcher
|
refs/heads/master
|
/db_handler.py
|
import sqlite3
import uuid
from _datetime import datetime
from pathlib import Path
class DbHandler:
def __init__(self):
self.conn = sqlite3.connect(str(Path.home()) + '/.local/share/FileWatcher/sync.db')
self.cur = self.conn.cursor()
self.cur.execute('''
CREATE TABLE IF NOT EXISTS syncs (
id TEXT PRIMARY KEY,
time TEXT,
path_to TEXT,
filename TEXT,
is_dir INTEGER,
action TEXT,
server TEXT,
retries INTEGER,
cookie INTEGER)
''')
self.conn.commit()
def save(self, db_row) -> list:
# self.cur.execute('SELECT * FROM syncs WHERE path_to = ? AND filename = ?', (db_row[0], db_row[1]))
# rows = self.cur.fetchall()
# ids = []
# if len(rows):
# for r in rows:
# ids.append(r[0])
# return ids
_id = uuid.uuid4().hex
self.cur.execute('INSERT INTO syncs VALUES (?,?,?,?,?,?,?,?,?)', (_id, datetime.now(), *db_row))
self.conn.commit()
return [_id]
def get_by_cookie(self, cookie):
self.cur.execute('SELECT * FROM syncs WHERE cookie = ?', (cookie,))
return self.cur.fetchall()
def delete(self, ids: list):
if len(ids):
for i in range(len(ids)):
ids[i] = "'" + ids[i] + "'"
cmd = 'DELETE FROM syncs WHERE id IN ({})'.format(','.join(ids))
self.cur.execute(cmd)
self.conn.commit()
def close(self):
self.conn.close()
|
{"/ssh_client.py": ["/util.py"], "/test_sock.py": ["/ssh_client.py"], "/watcher.py": ["/path_watcher.py", "/db_handler.py", "/ssh_client.py"], "/path_watcher.py": ["/ssh_client.py", "/db_handler.py", "/util.py"]}
|
38,799
|
DmitryPorotov/FileWatcher
|
refs/heads/master
|
/test_sock.py
|
import ssh_client
cl = ssh_client.SshClient('dmitrydev.adquant.net', '/root/test')
cl.connect()
|
{"/ssh_client.py": ["/util.py"], "/test_sock.py": ["/ssh_client.py"], "/watcher.py": ["/path_watcher.py", "/db_handler.py", "/ssh_client.py"], "/path_watcher.py": ["/ssh_client.py", "/db_handler.py", "/util.py"]}
|
38,800
|
DmitryPorotov/FileWatcher
|
refs/heads/master
|
/watcher.py
|
import json
from path_watcher import PathWatcher
from db_handler import DbHandler
from pathlib import Path
from ssh_client import SshClient
import signal
db = None
ssh_clients = []
def main():
conf = read_config()
path_watchers = []
global db
db = DbHandler()
for c in conf['sync']:
ssh_clients.append(SshClient(c['server'], c['dirs']['to'], c['dirs']['from'], c['key_file']))
path_watchers.append(PathWatcher(c['dirs']['from'],
ssh_clients[-1],
db))
signal.signal(signal.SIGINT, clean_up)
signal.signal(signal.SIGTERM, clean_up)
def clean_up():
db.close()
for c in ssh_clients:
c.close()
exit(0)
def read_config():
try:
conf = json.load(open(str(Path.home()) + '/.local/share/FileWatcher/config.json', 'r'))
except IOError:
o = {
'sync': [{
'server': '',
'dirs': {
'from': '',
'to': ''
}
}]
}
p = Path(str(Path.home()) + '/.local/share/FileWatcher/')
p.mkdir(exist_ok=True)
fd = open(str(Path.home()) + '/.local/share/FileWatcher/config.json', 'w+')
json.dump(o, fd)
print('Please configure directories to sync at ~/.local/share/FileWatcher/config.json')
exit(1)
return conf
if __name__ == "__main__":
main()
|
{"/ssh_client.py": ["/util.py"], "/test_sock.py": ["/ssh_client.py"], "/watcher.py": ["/path_watcher.py", "/db_handler.py", "/ssh_client.py"], "/path_watcher.py": ["/ssh_client.py", "/db_handler.py", "/util.py"]}
|
38,801
|
DmitryPorotov/FileWatcher
|
refs/heads/master
|
/util.py
|
from pathlib import Path
def resolve_home_dir(path, is_dir=False):
if '~' in path:
path = str(Path.home()) + path.split('~')[1]
if is_dir:
path += '/'
return path
|
{"/ssh_client.py": ["/util.py"], "/test_sock.py": ["/ssh_client.py"], "/watcher.py": ["/path_watcher.py", "/db_handler.py", "/ssh_client.py"], "/path_watcher.py": ["/ssh_client.py", "/db_handler.py", "/util.py"]}
|
38,802
|
DmitryPorotov/FileWatcher
|
refs/heads/master
|
/path_watcher.py
|
import asyncio
import os
import stat
import inotify.adapters
from ssh_client import SshClient
from db_handler import DbHandler
import util
import sys
from pathlib import Path
class PathWatcher:
_temp_files = ['___jb_old___', '___jb_tmp___', 'goutputstream-']
_attrib_event = 'IN_ATTRIB'
_copy_events = ['IN_CREATE', 'IN_MODIFY']
_delete_event = 'IN_DELETE'
_move_events = ['IN_MOVED_TO', 'IN_MOVED_FROM']
_all_events = [_attrib_event, _delete_event, *_copy_events, *_move_events]
def __init__(self, path_from, ssh_client, db):
self.path_from = util.resolve_home_dir(path_from, is_dir=True)
self.db = db # type: DbHandler
self._sftp = ssh_client # type: SshClient
self.evt_loop = asyncio.get_event_loop()
self.cookie_to_future_map = {}
try:
asyncio.ensure_future(self._start_watcher())
self.evt_loop.run_forever()
except FileNotFoundError:
print('Watch directory does not exist ' + self.path_from)
exit(1)
async def _start_watcher(self):
i = inotify.adapters.InotifyTree(self.path_from)
for evt in i.event_gen():
if evt is not None and not self._is_temp_file(evt[3]) and PathWatcher._is_sync_needed(evt[1]):
print(evt)
is_dir = 1 if (len(evt[1]) > 1 and evt[1][1] == 'IN_ISDIR') else 0
path = evt[2] if evt[2][-1] is '/' else evt[2] + '/'
action = 'copy'
if evt[1][0] == PathWatcher._delete_event:
action = 'delete'
elif evt[1][0] == PathWatcher._attrib_event:
action = 'attrib'
elif evt[1][0] == PathWatcher._move_events[0]:
action = 'move_to'
elif evt[1][0] == PathWatcher._move_events[1]:
action = 'move_from'
ids = self.db.save((
path,
evt[3],
is_dir,
action,
self._sftp.user + '@' + self._sftp.server,
0,
evt[0].cookie
))
def cb():
self.db.delete(ids)
if action in ['copy', 'delete', 'attrib']:
self._execute_transaction(action, path, evt[3], cb, is_dir)
else:
self._try_defer(evt[0].cookie, cb)
await asyncio.sleep(0)
def _try_defer(self, cookie, cb):
rows = self.db.get_by_cookie(cookie)
future = self.evt_loop.create_future()
self.cookie_to_future_map[str(cookie)] = future
if len(rows) == 1:
asyncio.ensure_future(self._defer(cookie, cb))
elif len(rows) == 2:
self._handle_move(rows)
self._clean_up_future_by_cookie(cookie)
else:
pass # TODO: handle db corruption
async def _defer(self, cookie, cb):
await asyncio.sleep(0.05)
rows = self.db.get_by_cookie(cookie)
if len(rows) == 1:
self._execute_transaction('copy' if rows[0][5] == 'move_to' else 'delete', rows[0][2],
rows[0][3], cb, bool(rows[0][4]))
elif len(rows) == 2:
self._handle_move(rows)
self._clean_up_future_by_cookie(cookie)
def _clean_up_future_by_cookie(self, cookie):
if str(cookie) in self.cookie_to_future_map:
self.cookie_to_future_map[str(cookie)].set_result(None)
del self.cookie_to_future_map[str(cookie)]
def _handle_move(self, rows):
if rows[0][5] == 'move_to':
row_to = rows[0]
row_from = rows[1]
else:
row_to = rows[1]
row_from = rows[0]
self._execute_transaction('move', row_to[2], row_to[3],
lambda: None, bool(rows[0][4]), row_from[2], row_from[3])
self.db.delete(list(map(lambda r: r[0], rows)))
def _execute_transaction(self, action, path, file_name, cb, is_dir, path_move_from='', file_name_move_from=''):
try:
rel_full_path = self._get_relative_path(path) + file_name
if action == 'copy':
if not is_dir:
if Path(path + file_name).is_symlink():
original = os.readlink(path + file_name)
self._sftp.symlink(self._get_relative_path(path), original, cb)
else:
self._sftp.put(rel_full_path, cb)
else:
mask = stat.S_IMODE(os.stat(rel_full_path).st_mode)
self._sftp.mkdir(rel_full_path, mask, cb)
elif action == 'delete':
if not is_dir:
self._sftp.remove(rel_full_path, cb)
else:
self._sftp.rmdir(rel_full_path, cb)
elif action == 'move':
self._sftp.move(self._get_relative_path(path_move_from) + file_name_move_from, rel_full_path)
elif action == 'attrib':
pass
except:
e = sys.exc_info()[0]
print(e)
raise e
pass
# TODO: give user a warning
def _is_temp_file(self, filename: str) -> bool:
for f in self._temp_files:
if f in filename:
return True
return False
@staticmethod
def _is_sync_needed(evt_types: list) -> bool:
if evt_types[0] in PathWatcher._all_events:
return True
return False
def _get_relative_path(self, path):
return path.split(self.path_from)[1]
|
{"/ssh_client.py": ["/util.py"], "/test_sock.py": ["/ssh_client.py"], "/watcher.py": ["/path_watcher.py", "/db_handler.py", "/ssh_client.py"], "/path_watcher.py": ["/ssh_client.py", "/db_handler.py", "/util.py"]}
|
38,804
|
kapitsa2811/semisupervised-vae-metric-embedding
|
refs/heads/master
|
/model/SSVAE.py
|
"""
---------------------------------------------------------------------
-- Author: Jhosimar George Arias Figueroa
---------------------------------------------------------------------
Semisupervised generative model with metric embedding auxiliary task
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from networks.CatVAENetwork import *
from losses.LossFunctions import *
from utils.partition import *
from utils.assignment import *
class SSVAE:
def __init__(self, params):
self.batch_size = params.batch_size
self.batch_size_val = params.batch_size_val
self.initial_temperature = params.temperature
self.decay_temperature = params.decay_temperature
self.num_epochs = params.num_epochs
self.loss_type = params.loss_type
self.num_classes = params.num_classes
self.w_gauss = params.w_gaussian
self.w_categ = params.w_categorical
self.w_recon = params.w_reconstruction
self.decay_temp_rate = params.decay_temp_rate
self.gaussian_size = params.gaussian_size
self.feature_size = params.feature_size
self.min_temperature = params.min_temperature
self.temperature = params.temperature # current temperature
self.verbose = params.verbose
self.sess = tf.Session()
self.network = CatVAENetwork(params)
self.losses = LossFunctions()
self.w_assign = params.w_assign
self.num_labeled = params.num_labeled
self.knn = params.knn
self.metric_loss = params.metric_loss
self.w_metric = tf.placeholder(tf.float32, [])
self.initial_w_metric = params.w_metric
self._w_metric = params.w_metric
self.anneal_metric_loss = params.anneal_w_metric
self.learning_rate = tf.placeholder(tf.float32, [])
self.lr = params.learning_rate
self.decay_epoch = params.decay_epoch
self.lr_decay = params.lr_decay
self.pretrain = params.pretrain
self.num_labeled_batch = params.num_labeled_batch
self.dataset = params.dataset
self.metric_margin = params.metric_margin
def create_dataset(self, is_training, data, labels, batch_size, x_labeled = None, y_labeled = None):
"""Create dataset given input data
Args:
is_training: (bool) whether to use the train or test pipeline.
At training, we shuffle the data and have multiple epochs
data: (array) corresponding array containing the input data
labels: (array) corresponding array containing the labels of the input data
batch_size: (int) size of each batch to consider from the data
x_labeled: (array) corresponding array containing the labeled input data
y_labeled: (array) corresponding array containing the labeles of the labeled input data
Returns:
output: (dict) contains what will be the input of the tensorflow graph
"""
num_samples = data.shape[0]
# create dataset object
if labels is None:
dataset = tf.data.Dataset.from_tensor_slices(data)
else:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
# shuffle data in training phase
if is_training:
dataset = dataset.shuffle(num_samples).repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
# create reinitializable iterator from dataset
iterator = dataset.make_initializable_iterator()
labeled_data = False
if labels is None:
data = iterator.get_next()
else:
data, labels = iterator.get_next()
# append labeled data to each batch
if x_labeled is not None:
labeled_data = True
_data = tf.concat([data, x_labeled], 0)
_labels = tf.concat([labels, y_labeled], 0)
iterator_init = iterator.initializer
if labeled_data:
output = {'data': _data, 'labels': _labels, 'iterator_init': iterator_init}
output['labels_semisupervised'] = y_labeled
else:
output = {'data': data, 'labels': labels, 'iterator_init': iterator_init}
output['labels_semisupervised'] = None
return output
def create_model(self, is_training, inputs, output_size):
"""Model function defining the graph operations.
Args:
is_training: (bool) whether we are in training phase or not
inputs: (dict) contains the inputs of the graph (features, labels...)
this can be `tf.placeholder` or outputs of `tf.data`
output_size: (int) size of the output layer
Returns:
model_spec: (dict) contains the graph operations or nodes needed for training / evaluation
"""
data, _labels = inputs['data'], inputs['labels']
# create network and obtain latent vectors that will be used in loss functions
latent_spec = self.network.encoder(data, self.num_classes, is_training)
gaussian, mean, logVar = latent_spec['gaussian'], latent_spec['mean'], latent_spec['logVar']
categorical, prob, log_prob = latent_spec['categorical'], latent_spec['prob_cat'], latent_spec['log_prob_cat']
_logits, features = latent_spec['logits'], latent_spec['features']
output = self.network.decoder(gaussian, categorical, output_size, is_training)
# reconstruction loss
if self.loss_type == 'bce':
loss_rec = self.losses.binary_cross_entropy(data, output)
elif self.loss_type == 'mse':
loss_rec = tf.losses.mean_squared_error(data, output)
else:
raise "invalid loss function... try bce or mse..."
# kl-divergence loss
loss_kl = self.losses.kl_gaussian(mean, logVar)
loss_kl_cat = self.losses.kl_categorical(prob, log_prob, self.num_classes)
# auxiliary task to assign labels and regularize the feature space
if _labels is not None:
labeled_ss = inputs['labels_semisupervised']
if labeled_ss is not None:
# assignment loss only if labeled data is available (training phase)
predicted_labels = assign_labels_semisupervised(features, labeled_ss, self.num_labeled_batch,
self.batch_size, self.num_classes, self.knn)
# use assigned labels and logits to calculate cross entropy loss
loss_assign = tf.losses.sparse_softmax_cross_entropy(labels=predicted_labels, logits=_logits)
else:
# predict labels from logits or softmax(logits) (validation/testing phase)
loss_assign = tf.constant(0.)
predicted_labels = tf.argmax(prob, axis=1)
# calculate accuracy using the predicted and true labels
accuracy = tf.reduce_mean( tf.cast( tf.equal(_labels, predicted_labels), tf.float32 ) )
# metric embedding loss
if self.metric_loss == 'triplet':
loss_metric = tf.contrib.losses.metric_learning.triplet_semihard_loss(predicted_labels, features, margin=self.metric_margin)
elif self.metric_loss == 'lifted':
loss_metric = tf.contrib.losses.metric_learning.lifted_struct_loss(predicted_labels, features, margin=self.metric_margin)
else:
raise "invalid metric loss... currently we support triplet and lifted loss"
else:
accuracy = tf.constant(0.)
loss_assign = tf.constant(0.)
loss_metric = tf.constant(0.)
predicted_labels = tf.constant(0.)
# variational autoencoder loss
loss_vae = self.w_recon * loss_rec
loss_vae += self.w_gauss * loss_kl
loss_vae += self.w_categ * loss_kl_cat
# total loss
loss_total = loss_vae + self.w_assign * loss_assign + self.w_metric * loss_metric
if is_training:
# use adam for optimization
optimizer = tf.train.AdamOptimizer(self.learning_rate)
# needed for batch normalization layer
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op_vae = optimizer.minimize(loss_vae)
train_op_tot = optimizer.minimize(loss_total)
# create model specification
model_spec = inputs
model_spec['variable_init_op'] = tf.global_variables_initializer()
model_spec['output'] = output
model_spec['features'] = features
model_spec['predicted_labels'] = predicted_labels
model_spec['true_labels'] = _labels
model_spec['loss_rec'] = loss_rec
model_spec['loss_kl'] = loss_kl
model_spec['loss_kl_cat'] = loss_kl_cat
model_spec['loss_total'] = loss_total
model_spec['loss_metric'] = loss_metric
model_spec['loss_assign'] = loss_assign
model_spec['accuracy'] = accuracy
# optimizers are only available in training phase
if is_training:
model_spec['train_op'] = train_op_tot
model_spec['train_vae'] = train_op_vae
return model_spec
def evaluate_dataset(self, is_training, num_batches, model_spec, labeled_data = None, labeled_labels = None):
"""Evaluate the model
Args:
is_training: (bool) whether we are training or not
num_batches: (int) number of batches to train/test
model_spec: (dict) contains the graph operations or nodes needed for evaluation
labeled_data: (array) corresponding array containing the labeled input data
labeled_labels: (array) corresponding array containing the labeles of the labeled input data
Returns:
(dic) average of loss functions and metrics for the given number of batches
"""
avg_accuracy = 0.0
avg_nmi = 0.0
avg_loss_rec = 0.0
avg_loss_kl = 0.0
avg_loss_cat = 0.0
avg_loss_total = 0.0
avg_loss_metric = 0.0
avg_loss_assign = 0.0
# initialize dataset iteratior
self.sess.run(model_spec['iterator_init'])
if is_training:
# pretraining will train only the variational autoencoder losses
if self.pretrain < 1:
train_optimizer = model_spec['train_op']
else:
train_optimizer = model_spec['train_vae']
# training phase
for j in range(num_batches):
# select randomly subsets of labeled data according to the batch size
_x_labeled, _y_labeled, _, _ = create_semisupervised_dataset(labeled_data, labeled_labels,
self.num_classes, self.num_labeled_batch)
# run the tensorflow flow graph
_, loss_rec, loss_kl, loss_metric, loss_assign, loss_cat, loss_total, accuracy = self.sess.run([train_optimizer,
model_spec['loss_rec'], model_spec['loss_kl'],
model_spec['loss_metric'], model_spec['loss_assign'],
model_spec['loss_kl_cat'], model_spec['loss_total'],
model_spec['accuracy']],
feed_dict={self.network.temperature: self.temperature
, self.w_metric: self._w_metric
, self.learning_rate: self.lr
, self.x_labeled: _x_labeled
, self.y_labeled: _y_labeled})
# accumulate values
avg_accuracy += accuracy
avg_loss_rec += loss_rec
avg_loss_kl += loss_kl
avg_loss_cat += loss_cat
avg_loss_total += loss_total
avg_loss_metric += loss_metric
avg_loss_assign += loss_assign
else:
# validation phase
for j in range(num_batches):
# run the tensorflow flow graph
loss_rec, loss_kl, loss_metric, loss_assign, loss_cat, loss_total, accuracy = self.sess.run([
model_spec['loss_rec'], model_spec['loss_kl'],
model_spec['loss_metric'], model_spec['loss_assign'],
model_spec['loss_kl_cat'], model_spec['loss_total'],
model_spec['accuracy']],
feed_dict={self.network.temperature: self.temperature
,self.w_metric: self._w_metric
,self.learning_rate: self.lr})
# accumulate values
avg_accuracy += accuracy
avg_loss_rec += loss_rec
avg_loss_kl += loss_kl
avg_loss_cat += loss_cat
avg_loss_total += loss_total
avg_loss_metric += loss_metric
avg_loss_assign += loss_assign
# average values by the given number of batches
avg_loss_rec /= num_batches
avg_loss_kl /= num_batches
avg_accuracy /= num_batches
avg_loss_cat /= num_batches
avg_loss_total /= num_batches
avg_loss_metric /= num_batches
avg_loss_assign /= num_batches
return {'avg_loss_rec': avg_loss_rec, 'avg_loss_kl': avg_loss_kl, 'avg_loss_cat': avg_loss_cat,
'total_loss': avg_loss_total, 'avg_accuracy': avg_accuracy,
'avg_loss_metric': avg_loss_metric, 'avg_loss_assign': avg_loss_assign}
def train(self, train_data, train_labels, val_data, val_labels, labeled_data, labeled_labels):
"""Train the model
Args:
train_data: (array) corresponding array containing the training data
train_labels: (array) corresponding array containing the labels of the training data
val_data: (array) corresponding array containing the validation data
val_labels: (array) corresponding array containing the labels of the validation data
labeled_data: (array) corresponding array containing the labeled input data
labeled_labels: (array) corresponding array containing the labeles of the labeled input data
Returns:
output: (dict) contains the history of train/val loss
"""
train_history_loss, val_history_loss = [], []
train_history_acc, val_history_acc = [], []
train_history_nmi, val_history_nmi = [], []
# placeholders for the labeled data
self.x_labeled = tf.placeholder(tf.float32, shape = [self.num_labeled_batch, labeled_data.shape[1]])
self.y_labeled = tf.placeholder(tf.int64, shape = [self.num_labeled_batch])
# create training and validation dataset
train_dataset = self.create_dataset(True, train_data, train_labels,
self.batch_size - self.num_labeled_batch, self.x_labeled, self.y_labeled)
val_dataset = self.create_dataset(True, val_data, val_labels, self.batch_size_val)
self.output_size = train_data.shape[1]
# create train and validation models
train_model = self.create_model(True, train_dataset, self.output_size)
val_model = self.create_model(False, val_dataset, self.output_size)
# set number of batches
num_train_batches = int(np.ceil(train_data.shape[0] / (1.0 * (self.batch_size - self.num_labeled_batch))))
num_val_batches = int(np.ceil(val_data.shape[0] / (1.0 * self.batch_size_val)))
# initialize global variables
self.sess.run( train_model['variable_init_op'] )
# training and validation phases
print('Training phase...')
for i in range(self.num_epochs):
# pretraining at each epoch
if self.pretrain > 0:
self.pretrain = self.pretrain - 1
# decay learning rate according to decay_epoch parameter
if self.decay_epoch > 0 and (i + 1) % self.decay_epoch == 0:
self.lr = self.lr * self.lr_decay
print('Decaying learning rate: %lf' % self.lr)
# evaluate train and validation datasets
train_loss = self.evaluate_dataset(True, num_train_batches, train_model, labeled_data, labeled_labels)
val_loss = self.evaluate_dataset(False, num_val_batches, val_model)
# get training results for printing
train_loss_rec = train_loss['avg_loss_rec']
train_loss_kl = train_loss['avg_loss_kl']
train_loss_cat = train_loss['avg_loss_cat']
train_loss_ass = train_loss['avg_loss_assign']
train_loss_met = train_loss['avg_loss_metric']
train_accuracy = train_loss['avg_accuracy']
train_total_loss = train_loss['total_loss']
# get validation results for printing
val_loss_rec = val_loss['avg_loss_rec']
val_loss_kl = val_loss['avg_loss_kl']
val_loss_cat = val_loss['avg_loss_cat']
val_loss_ass = val_loss['avg_loss_assign']
val_loss_met = val_loss['avg_loss_metric']
val_accuracy = val_loss['avg_accuracy']
val_total_loss = val_loss['total_loss']
# if verbose then print specific information about training
if self.verbose == 1:
print("(Epoch %d / %d) REC=Train: %.5lf; Val: %.5lf KL=Train: %.5lf; Val: %.5lf KL-Cat=Train: %.5lf; Val: %.5lf MET=Train: %.5lf; Val: %.5lf ASS=Train: %.5lf; Val: %.5lf ACC=Train %.5lf; Val %.5lf" % \
(i + 1, self.num_epochs, train_loss_rec, val_loss_rec, train_loss_kl, val_loss_kl, train_loss_cat, val_loss_cat, train_loss_met, val_loss_met, train_loss_ass, val_loss_ass, train_accuracy, val_accuracy))
else:
print("(Epoch %d / %d) Train Loss: %.5lf; Val Loss: %.5lf Train Accuracy: %.5lf; Val Accuracy: %.5lf" % \
(i + 1, self.num_epochs, train_total_loss, val_total_loss, train_accuracy, val_accuracy))
# save loss and accuracy of each epoch
train_history_loss.append(train_total_loss)
val_history_loss.append(val_total_loss)
train_history_acc.append(train_accuracy)
val_history_acc.append(val_accuracy)
if self.anneal_metric_loss == 1:
#anneal loss from initial_w_metric to 1 in the first 100 epochs
self._w_metric = np.minimum(self.initial_w_metric * np.exp(0.06908*(i+1)),1)
if self.verbose == 1:
print('Metric Weight: %.5lf' % self._w_metric)
if self.decay_temperature == 1:
# decay temperature of gumbel-softmax
self.temperature = np.maximum(self.initial_temperature*np.exp(-self.decay_temp_rate*(i + 1) ),self.min_temperature)
if self.verbose == 1:
print("Gumbel Temperature: %.5lf" % self.temperature)
return {'train_history_loss' : train_history_loss, 'val_history_loss': val_history_loss,
'train_history_acc': train_history_acc, 'val_history_acc': val_history_acc}
def test(self, test_data, test_labels, batch_size = -1):
"""Test the model with new data
Args:
test_data: (array) corresponding array containing the testing data
test_labels: (array) corresponding array containing the labels of the testing data
batch_size: (int) batch size used to run the model
Return:
accuracy for the given test data
"""
# if batch_size is not specified then use all data
if batch_size == -1:
batch_size = test_data.shape[0]
# create dataset
test_dataset = self.create_dataset(False, test_data, test_labels, batch_size)
true_labels = test_dataset['labels']
# perform a forward call on the encoder to obtain predicted labels
latent = self.network.encoder(test_dataset['data'], self.num_classes)
logits = latent['prob_cat']
predicted_labels = tf.argmax(logits, axis=1)
# calculate accuracy given the predicted and true labels
accuracy = tf.reduce_mean( tf.cast( tf.equal(true_labels, predicted_labels), tf.float32 ) )
# initialize dataset iterator
self.sess.run(test_dataset['iterator_init'])
# calculate number of batches given batch size
num_batches = int(np.ceil(test_data.shape[0] / (1.0 * batch_size)))
# evaluate the model
avg_accuracy = 0.0
for j in range(num_batches):
_accuracy = self.sess.run(accuracy,
feed_dict={self.network.temperature: self.temperature
,self.w_metric: self._w_metric
,self.learning_rate: self.lr})
avg_accuracy += _accuracy
# average the accuracy
avg_accuracy /= num_batches
return avg_accuracy
def latent_features(self, data, batch_size=-1):
"""Obtain latent features learnt by the model
Args:
data: (array) corresponding array containing the data
batch_size: (int) size of each batch to consider from the data
Returns:
features: (array) array containing the features from the data
"""
# if batch_size is not specified then use all data
if batch_size == -1:
batch_size = data.shape[0]
# create dataset
dataset = self.create_dataset(False, data, None, batch_size)
# we will use only the encoder network
latent = self.network.encoder(dataset['data'], self.num_classes)
encoder = latent['features']
# obtain the features from the input data
self.sess.run(dataset['iterator_init'])
num_batches = data.shape[0] // batch_size
features = np.zeros((data.shape[0], self.feature_size))
for j in range(num_batches):
features[j*batch_size:j*batch_size + batch_size] = self.sess.run(encoder,
feed_dict={self.network.temperature: self.temperature
,self.w_metric: self._w_metric
,self.learning_rate: self.lr})
return features
def reconstruct_data(self, data, batch_size=-1):
"""Reconstruct Data
Args:
data: (array) corresponding array containing the data
batch_size: (int) size of each batch to consider from the data
Returns:
reconstructed: (array) array containing the reconstructed data
"""
# if batch_size is not specified then use all data
if batch_size == -1:
batch_size = data.shape[0]
# create dataset
dataset = self.create_dataset(False, data, None, batch_size)
# reuse model used in training
model_spec = self.create_model(False, dataset, data.shape[1])
# obtain the reconstructed data
self.sess.run(model_spec['iterator_init'])
num_batches = data.shape[0] // batch_size
reconstructed = np.zeros(data.shape)
pos = 0
for j in range(num_batches):
reconstructed[pos:pos + batch_size] = self.sess.run(model_spec['output'],
feed_dict={self.network.temperature: self.temperature
,self.w_metric: self._w_metric
,self.learning_rate:self.lr})
pos += batch_size
return reconstructed
def plot_latent_space(self, data, labels, save=False):
"""Plot the latent space learnt by the model
Args:
data: (array) corresponding array containing the data
labels: (array) corresponding array containing the labels
save: (bool) whether to save the latent space plot
Returns:
fig: (figure) plot of the latent space
"""
# obtain the latent features
features = self.latent_features(data)
# plot only the first 2 dimensions
fig = plt.figure(figsize=(8, 6))
plt.scatter(features[:, 0], features[:, 1], c=labels, marker='o',
edgecolor='none', cmap=plt.cm.get_cmap('jet', 10), s = 10)
plt.colorbar()
if(save):
fig.savefig('latent_space.png')
return fig
def generate_data(self, num_elements=1, category=0):
"""Generate data for a specified category
Args:
num_elements: (int) number of elements to generate
category: (int) category from which we will generate data
Returns:
generated data according to num_elements
"""
# gaussian noise for each element
noise = tf.random_normal([num_elements, self.gaussian_size],mean = 0, stddev = 1, dtype= tf.float32)
indices = (np.ones(num_elements)*category).astype(int).tolist()
# category is specified with a one-hot array
categorical = tf.one_hot(indices, self.num_classes)
# use the gaussian noise and category to generate data from the generator
out = self.network.decoder(noise, categorical, self.output_size)
return self.sess.run(out, feed_dict={self.network.temperature: self.temperature
,self.w_metric: self._w_metric
,self.learning_rate:self.lr})
def random_generation(self, num_elements=1):
"""Random generation for each category
Args:
num_elements: (int) number of elements to generate
Returns:
generated data according to num_elements
"""
# gaussian noise for each element
noise = tf.random_normal([num_elements * self.num_classes, self.gaussian_size],
mean = 0, stddev = 1, dtype= tf.float32)
# categories for each element
arr = np.array([])
for i in range(self.num_classes):
arr = np.hstack([arr,np.ones(num_elements) * i] )
indices = arr.astype(int).tolist()
categorical = tf.one_hot(indices, self.num_classes)
# use the gaussian noise and categories to generate data from the generator
out = self.network.decoder(noise, categorical, self.output_size)
return self.sess.run(out, feed_dict={self.network.temperature: self.temperature
,self.w_metric: self._w_metric
,self.learning_rate:self.lr})
def style_generation(self, data):
"""Style transfer generation for each category given a predefined style
Args:
data: (array) corresponding array containing the input style
Returns:
generated data according to the style of data
"""
# convert data to tensor
num_elem = data.shape[0]
data = np.repeat(data, self.num_classes, axis=0)
tf_data = tf.convert_to_tensor(data)
# get latent gaussian features from the encoder
latent = self.network.encoder(tf_data, self.num_classes)
gaussian = latent['gaussian']
# set one-hot values for each category
indices = np.tile(range(self.num_classes), num_elem)
categorical = tf.one_hot(indices, self.num_classes)
# use the gaussian features and categories to generate data from the generator
out = self.network.decoder(gaussian, categorical, self.output_size)
return self.sess.run(out, feed_dict={self.network.temperature: self.temperature
,self.w_metric: self._w_metric
,self.learning_rate:self.lr})
|
{"/model/SSVAE.py": ["/networks/CatVAENetwork.py", "/losses/LossFunctions.py", "/utils/partition.py", "/utils/assignment.py"]}
|
38,805
|
kapitsa2811/semisupervised-vae-metric-embedding
|
refs/heads/master
|
/utils/partition.py
|
"""
---------------------------------------------------------------------
-- Author: Jhosimar George Arias Figueroa
---------------------------------------------------------------------
Util functions for partitioning input data
"""
import numpy as np
def partition_train_val(x_train, y_train, proportion, num_classes, shuffle=True):
"""
Partition data in train and validation
Args:
x_train: (array) corresponding array containing the input data
y_train: (array) corresponding array containing the labels of the input data
proportion: (float) proportion of examples to consider in the train dataset (0.0-1.0)
num_classes: (int) number of labels
Returns:
train_data: (array) corresponding array containing partitioned train data
train_labels: (array) corresponding array containing partitioned labels of train data
val_data: (array) corresponding array containing partitioned validation data
val_labels: (array) corresponding array containing partitioned labels of validation data
"""
# initialize numpy arrays
train_data_indices = np.array([], dtype=np.int32)
val_data_indices = np.array([], dtype=np.int32)
# iterate over the number of classes
for i in range(0, num_classes):
# get indices of a specific class
subdata = np.where(y_train == i)[0]
num_samples = subdata.shape[0]
# randomly partition the indices based on specified proportion
indices = np.random.permutation(num_samples)
train_size = int(proportion * num_samples)
train_indices, val_indices = indices[:train_size], indices[train_size:]
# get partitioned indices
train_subdata, val_subdata = subdata[train_indices], subdata[val_indices]
# concatenate indices of all classes
train_data_indices = np.hstack([train_data_indices, train_subdata])
val_data_indices = np.hstack([val_data_indices, val_subdata])
if shuffle:
np.random.shuffle(train_data_indices)
np.random.shuffle(val_data_indices)
# get new data based on the partitioned proportions
train_data, train_labels = x_train[train_data_indices], y_train[train_data_indices]
val_data, val_labels = x_train[val_data_indices], y_train[val_data_indices]
return train_data, train_labels, val_data, val_labels
def create_semisupervised_dataset(x, y, num_classes, num_labeled=100):
"""
Partition data into labeled and unlabeled data given labeled size per class
Args:
x_train: (array) corresponding array containing the input data
y_train: (array) corresponding array containing the labels of the input data
num_classes: (int) number of classes
num_labeled: (float) number of examples to consider in the labeled dataset
Returns:
x_labeled: (array) corresponding array containing partitioned labeled data
y_labeled: (array) corresponding array containing partitioned labels of labeled data
x_unlabaled: (array) corresponding array containing partitioned unlabeled data
y_unlabeled: (array) corresponding array containing partitioned labels of unlabeled data
"""
if num_labeled % num_classes != 0:
raise "wished number of labeled samples not divisible by number of classes"
# get number of labeled data per class (balanced partition)
labeled_per_class = num_labeled//num_classes
x_labeled = [0] * num_classes
y_labeled = [0] * num_classes
x_unlabeled = [0] * num_classes
y_unlabeled = [0] * num_classes
# iterate over the number of classes
for i in range(num_classes):
# get indices of a specific class
indices = np.where(y == i)[0]
# randomly partition the indices based on specified proportion
indices = np.random.permutation(indices)
x_labeled[i] = x[indices[:labeled_per_class]]
y_labeled[i] = y[indices[:labeled_per_class]]
x_unlabeled[i] = x[indices[labeled_per_class:]]
y_unlabeled[i] = y[indices[labeled_per_class:]]
return np.vstack(x_labeled), np.hstack(y_labeled), np.vstack(x_unlabeled), np.hstack(y_unlabeled)
def flatten_array(x):
"""
Flatten to 2D array
Args:
x: (array) corresponding array containing data
Returns:
flatten: (array) corresponding array containing the flatten data
"""
shape = np.prod(x.shape[1:])
return x.reshape(-1,shape)
|
{"/model/SSVAE.py": ["/networks/CatVAENetwork.py", "/losses/LossFunctions.py", "/utils/partition.py", "/utils/assignment.py"]}
|
38,806
|
kapitsa2811/semisupervised-vae-metric-embedding
|
refs/heads/master
|
/networks/CatVAENetwork.py
|
"""
---------------------------------------------------------------------
-- Author: Jhosimar George Arias Figueroa
---------------------------------------------------------------------
Categorical Variational Autoencoder Networks
"""
import tensorflow as tf
class CatVAENetwork:
eps = 1e-8
def __init__(self, params):
if params is not None:
self.temperature = tf.placeholder(tf.float32, [])
self.feature_size = params.feature_size
self.gaussian_size = params.gaussian_size
self.hard_gumbel = params.hard_gumbel
self.loss_type = params.loss_type
self.dataset = params.dataset
def latent_gaussian(self, hidden, gaussian_size):
"""Sample from the Gaussian distribution
Args:
hidden: (array) [batch_size, n_features] features obtained by the encoder
gaussian_size: (int) size of the gaussian sample vector
Returns:
(dict) contains the nodes of the mean, log of variance and gaussian
"""
out = hidden
mean = tf.layers.dense(out, units=gaussian_size)
logVar = tf.layers.dense(out, units=gaussian_size)
noise = tf.random_normal(tf.shape(mean), mean = 0, stddev = 1, dtype= tf.float32)
z = mean + tf.sqrt(tf.exp(logVar) + self.eps) * noise
return {'mean': mean, 'logVar': logVar, 'gaussian': z}
def sample_gumbel(self, shape):
"""Sample from Gumbel(0, 1)
Args:
shape: (array) containing the dimensions of the specified sample
"""
U = tf.random_uniform(shape, minval=0, maxval=1)
return -tf.log(-tf.log(U + self.eps) + self.eps)
def gumbel_softmax(self, logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: (array) [batch_size, n_class] unnormalized log-probs
temperature: (float) non-negative scalar
hard: (boolean) if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
y: (array) [batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
gumbel_softmax_sample = logits + self.sample_gumbel(tf.shape(logits))
y = tf.nn.softmax(gumbel_softmax_sample / self.temperature)
if hard:
k = tf.shape(logits)[-1]
y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y
def encoder_conv32x32(self, input_data, num_classes, is_training=False):
"""Convolutional inference network for 32x32x3 input images
Args:
input_data: (array) [batch_size, n_features=3072] input images
num_classes: (int) number of classification classes
is_training: (bool) whether we are in training phase or not
Returns:
(dict) contains the features, gaussian and categorical information
"""
with tf.variable_scope('encoder_conv32x32', reuse=not is_training):
out = input_data
out = tf.reshape(out, [-1, 32, 32, 3])
# number of filters, kernels and stride
filters = [16, 32, 64, 128, 256, self.feature_size]
kernels = [5, 5, 5, 3, 4, 8]
stride = [1, 1, 1, 1, 2, 1]
# encoding from input image to deterministic features
for i, num_filters in enumerate(filters):
out = tf.layers.conv2d(out, num_filters, kernel_size=kernels[i], strides=stride[i],
padding="valid")
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
out = tf.layers.flatten(out)
# defining layers to learn the gaussian distribution
gaussian = self.latent_gaussian(out, self.gaussian_size)
# defining layers to learn the categorical distribution
logits = tf.layers.dense(out, units=num_classes)
categorical = self.gumbel_softmax(logits, self.temperature, self.hard_gumbel)
prob = tf.nn.softmax(logits)
log_prob = tf.log(prob + self.eps)
# keep graph output operations that will be used in loss functions
output = gaussian
output['categorical'] = categorical
output['prob_cat'] = prob
output['log_prob_cat'] = log_prob
output['features'] = out
output['logits'] = logits
return output
def decoder_conv32x32(self, gaussian, categorical, output_size, is_training=False):
"""Convolutional generative network for 32x32x3 input images
Args:
gaussian: (array) [batch_size, gaussian_size] latent gaussian vector
categorical: (array) [batch_size, num_classes] latent categorical vector
output_size: (int) size of the output image
is_training: (bool) whether we are in training phase or not
Returns:
(array) array containing the generated/reconstructed image
"""
with tf.variable_scope('decoder_conv32x32', reuse=not is_training):
# convert gaussian and categorical to same shape
gaussian = tf.layers.dense(gaussian, units=self.feature_size)
categorical = tf.layers.dense(categorical, units=self.feature_size)
# add categorical and gaussian vectors
out = gaussian + categorical
# reshape vector for convolutional layers
out = tf.reshape(out, [-1, 1, 1, self.feature_size])
# number of filters, kernels and stride
filters = [256, 128, 64, 32, 16, 3]
kernels = [8, 4, 3, 5, 5, 5]
strides = [1, 2, 1, 1, 1, 1]
# decoding from categorical and gaussian to output image
for i, num_filters in enumerate(filters):
out = tf.layers.conv2d_transpose(out, num_filters, kernel_size=kernels[i], strides=strides[i],
padding='valid')
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
out = tf.reshape(out, [-1, 32 * 32 * 3])
# define output layer according to loss function
if self.loss_type == 'bce':
out = tf.layers.dense(out, units=output_size, activation=tf.nn.sigmoid)
else:
out = tf.layers.dense(out, units=output_size)
return out
def encoder_conv(self, input_data, num_classes, is_training=False):
"""Convolutional inference network for 28x28x1 input images
Args:
input_data: (array) [batch_size, n_features=784] input images
num_classes: (int) number of classification classes
is_training: (bool) whether we are in training phase or not
Returns:
(dict) contains the features, gaussian and categorical information
"""
with tf.variable_scope('encoder_cnn', reuse=not is_training):
out = input_data
out = tf.reshape(out, [-1, 28, 28, 1])
# number of filters, kernels and stride
filters = [16, 32, 64, 128, self.feature_size]
kernels = [5, 5, 4, 5, 3]
stride = [1, 1, 2, 2, 1]
# encoding from input image to deterministic features
for i, num_filters in enumerate(filters):
out = tf.layers.conv2d(out, num_filters, kernel_size=kernels[i], strides=stride[i],
padding="valid")
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
out = tf.layers.flatten(out)
# defining layers to learn the gaussian distribution
gaussian = self.latent_gaussian(out, self.gaussian_size)
# defining layers to learn the categorical distribution
logits = tf.layers.dense(out, units=num_classes)
categorical = self.gumbel_softmax(logits, self.temperature, self.hard_gumbel)
prob = tf.nn.softmax(logits)
log_prob = tf.log(prob + self.eps)
# keep graph output operations that will be used in loss functions
output = gaussian
output['categorical'] = categorical
output['prob_cat'] = prob
output['log_prob_cat'] = log_prob
output['features'] = out
output['logits'] = logits
return output
def decoder_conv(self, gaussian, categorical, output_size, is_training=False):
"""Convolutional generative network for 28x28x1 input images
Args:
gaussian: (array) [batch_size, gaussian_size] latent gaussian vector
categorical: (array) [batch_size, num_classes] latent categorical vector
output_size: (int) size of the output image
is_training: (bool) whether we are in training phase or not
Returns:
(array) array containing the generated/reconstructed image
"""
with tf.variable_scope('decoder_conv', reuse=not is_training):
# convert gaussian and categorical to same shape
gaussian = tf.layers.dense(gaussian, units=self.feature_size)
categorical = tf.layers.dense(categorical, units=self.feature_size)
# add categorical and gaussian vectors
out = gaussian + categorical
# reshape vector for convolutional layers
out = tf.reshape(out, [-1, 1, 1, self.feature_size])
# number of filters, kernels and stride
filters = [128, 64, 32, 16, 1]
kernels = [3, 5, 4, 5, 5]
strides = [1, 2, 2, 1, 1]
# decoding from categorical and gaussian to output image
for i, num_filters in enumerate(filters):
out = tf.layers.conv2d_transpose(out, num_filters, kernel_size=kernels[i], strides=strides[i],
padding='valid')
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
out = tf.reshape(out, [-1, 28 * 28])
# define output layer according to loss function
if self.loss_type == 'bce':
out = tf.layers.dense(out, units=output_size, activation=tf.nn.sigmoid)
else:
out = tf.layers.dense(out, units=output_size)
return out
def encoder_fc(self, input_data, num_classes, is_training=False):
"""Fully connected inference network
Args:
input_data: (array) [batch_size, n_features=784] input images
num_classes: (int) number of classification classes
is_training: (bool) whether we are in training phase or not
Returns:
(dict) contains the features, gaussian and categorical information
"""
with tf.variable_scope('encoder_fc', reuse=not is_training):
out = input_data
# encoding from input image to deterministic features
out = tf.layers.dense(out, units=500)
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
out = tf.layers.dense(out, units=500)
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
out = tf.layers.dense(out, units=self.feature_size)
# defining layers to learn the gaussian distribution
gaussian = self.latent_gaussian(out, self.gaussian_size)
# defining layers to learn the categorical distribution
logits = tf.layers.dense(out, units=num_classes)
categorical = self.gumbel_softmax(logits, self.temperature, self.hard_gumbel)
prob = tf.nn.softmax(logits)
log_prob = tf.log(prob + self.eps)
# keep graph output operations that will be used in loss functions
output = gaussian
output['categorical'] = categorical
output['prob_cat'] = prob
output['log_prob_cat'] = log_prob
output['features'] = out
output['logits'] = logits
return output
def decoder_fc(self, gaussian, categorical, output_size, is_training=False):
"""Fully connected generative network
Args:
gaussian: (array) [batch_size, gaussian_size] latent gaussian vector
categorical: (array) [batch_size, num_classes] latent categorical vector
output_size: (int) size of the output image
is_training: (bool) whether we are in training phase or not
Returns:
(array) array containing the generated/reconstructed image
"""
with tf.variable_scope('decoder_fc', reuse=not is_training):
# convert gaussian and categorical to same shape
gaussian = tf.layers.dense(gaussian, units=self.feature_size)
categorical = tf.layers.dense(categorical, units=self.feature_size)
# add categorical and gaussian vectors
out = gaussian + categorical
# decoding from categorical and gaussian to output image
out = tf.layers.dense(out, units=self.feature_size)
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
out = tf.layers.dense(out, units=500)
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
out = tf.layers.dense(out, units=500)
out = tf.layers.batch_normalization(out, training=is_training)
out = tf.nn.relu(out)
# define output layer according to loss function
if self.loss_type == 'bce':
out = tf.layers.dense(out, units=output_size, activation=tf.nn.sigmoid)
else:
out = tf.layers.dense(out, units=output_size)
return out
def encoder(self, input_data, num_classes, is_training=False):
"""Inference/Encoder network
Args:
input_data: (array) [batch_size, n_features] input images
num_classes: (int) number of classification classes
is_training: (bool) whether we are in training phase or not
Returns:
(dict) contains the features, gaussian and categorical information
"""
if self.dataset == 'mnist':
# for the mnist dataset we use the 28x28x1 convolutional network
latent_spec = self.encoder_conv(input_data, num_classes, is_training)
else:
# for the svhn dataset we use the 32x32x3 convolutional network
latent_spec = self.encoder_conv32x32(input_data, num_classes, is_training)
return latent_spec
def decoder(self, gaussian, categorical, output_size, is_training=False):
"""Generative/Decoder network of our model
Args:
gaussian: (array) [batch_size, gaussian_size] latent gaussian vector
categorical: (array) [batch_size, num_classes] latent categorical vector
output_size: (int) size of the output image
is_training: (bool) whether we are in training phase or not
Returns:
(array) array containing the generated/reconstructed image
"""
if self.dataset == 'mnist':
# for the mnist dataset we use the 28x28x1 convolutional network
output = self.decoder_conv(gaussian, categorical, output_size, is_training)
else:
# for the svhn dataset we use the 32x32x3 convolutional network
output = self.decoder_conv32x32(gaussian, categorical, output_size, is_training)
return output
|
{"/model/SSVAE.py": ["/networks/CatVAENetwork.py", "/losses/LossFunctions.py", "/utils/partition.py", "/utils/assignment.py"]}
|
38,807
|
kapitsa2811/semisupervised-vae-metric-embedding
|
refs/heads/master
|
/utils/assignment.py
|
"""
---------------------------------------------------------------------
-- Author: Jhosimar George Arias Figueroa
---------------------------------------------------------------------
Util functions for assignment of unlabeled data
"""
import tensorflow as tf
def bincount_matrix(x, num_classes):
"""Count number of occurrences of each value in array of non-negative values.
Args:
x: (array) corresponding array containing non-negative values
num_classes: (int) number of classification classes
Returns:
output: (array) corresponding array containing number of occurrences
"""
x = tf.cast(x, tf.int32)
max_x_plus_1 = tf.constant(num_classes, dtype=tf.int32)
ids = x + max_x_plus_1*tf.range(tf.shape(x)[0])[:,None]
out = tf.reshape(tf.bincount(tf.layers.flatten(ids),
minlength=max_x_plus_1*tf.shape(x)[0]), [-1, num_classes])
return out
def assign_labels_semisupervised(features, labels, num_labeled, batch_size, num_classes, knn):
"""Assign labels to unlabeled data based on the k-nearest-neighbors
Args:
features: (array) corresponding array containing the features of the input data
labels: (array) corresponding array containing the labels of the labeled data
num_labeled: (int) num of labeled data per batch
batch_size: (int) training batch size
num_classes: (int) number fo classification classes
knn: (int) number of k-nearest neighbors to use
Returns:
output: (array) corresponding array containing the labels assigned to all the data
"""
dot_product = tf.matmul(features, tf.transpose(features))
# get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
# this also provides more numerical stability (the diagonal of the result will be exactly 0).
# shape (batch_size,)
square_norm = tf.diag_part(dot_product)
# compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = tf.expand_dims(square_norm, 1) - 2.0 * dot_product + tf.expand_dims(square_norm, 0)
# because of computation errors, some distances might be negative so we put everything >= 0.0
distances = tf.maximum(distances, 0.0)
# get distances of unlabeled data w.r.t. labeled data
distances = tf.slice(distances, [0, batch_size - num_labeled], [batch_size - num_labeled, -1])
# negate distances
neg_one = tf.constant(-1.0, dtype=tf.float32)
neg_distances = tf.multiply(distances, neg_one)
# get top K largest distances, because we negated the distances, we will get the closest ones
_, idx = tf.nn.top_k(neg_distances, knn)
# get the true labels of the K-nearest neighbors
knn_labels = tf.gather(labels, idx)
# count repeated labels
count = bincount_matrix(knn_labels, num_classes)
# assign the label of the maximum obtained from k-nn (majority vote)
assignment = tf.argmax(count, axis=1)
# return the assigned labels for the unlabeled data and labels of the labaled data
return tf.concat([assignment, labels], 0)
|
{"/model/SSVAE.py": ["/networks/CatVAENetwork.py", "/losses/LossFunctions.py", "/utils/partition.py", "/utils/assignment.py"]}
|
38,808
|
kapitsa2811/semisupervised-vae-metric-embedding
|
refs/heads/master
|
/losses/LossFunctions.py
|
# -*- coding: utf-8 -*-
"""
---------------------------------------------------------------------
-- Author: Jhosimar George Arias Figueroa
---------------------------------------------------------------------
Loss functions used for training our model
"""
import tensorflow as tf
class LossFunctions:
eps = 1e-8
def binary_cross_entropy(self, real, predictions, average=True):
"""Binary Cross Entropy between the true and predicted outputs
loss = (1/n) * -Σ(real*log(predicted) + (1 - real)*log(1 - predicted))
Args:
real: (array) corresponding array containing the true labels
predictions: (array) corresponding array containing the predicted labels
average: (bool) whether to average the result to obtain a value
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
loss = -tf.reduce_sum( real * tf.log(predictions + self.eps) +
(1 - real) * tf.log(1 - predictions + self.eps), axis = 1 )
if average:
return tf.reduce_mean(loss)
else:
return tf.reduce_sum(loss)
def mean_squared_error(self, real, predictions, average=True):
"""Mean Squared Error between the true and predicted outputs
loss = (1/n)*Σ(real - predicted)^2
Args:
real: (array) corresponding array containing the true labels
predictions: (array) corresponding array containing the predicted labels
average: (bool) whether to average the result to obtain a value
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
loss = tf.square(real - predictions)
if average:
return tf.reduce_mean(loss)
else:
return tf.reduce_sum(loss)
def kl_gaussian(self, mean, logVar, average=True):
"""KL Divergence between the posterior and a prior gaussian distribution (N(0,1))
loss = (1/n) * -0.5 * Σ(1 + log(σ^2) - σ^2 - μ^2)
Args:
mean: (array) corresponding array containing the mean of our inference model
logVar: (array) corresponding array containing the log(variance) of our inference model
average: (bool) whether to average the result to obtain a value
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
loss = -0.5 * tf.reduce_sum(1 + logVar - tf.exp(logVar) - tf.square(mean + self.eps), 1 )
if average:
return tf.reduce_mean(loss)
else:
return tf.reduce_sum(loss)
def kl_categorical(self, qx, log_qx, k, average=True):
"""KL Divergence between the posterior and a prior uniform distribution (U(0,1))
loss = (1/n) * Σ(qx * log(qx/px)), because we use a uniform prior px = 1/k
loss = (1/n) * Σ(qx * (log(qx) - log(1/k)))
Args:
qx: (array) corresponding array containing the probs of our inference model
log_qx: (array) corresponding array containing the log(probs) of our inference model
k: (int) number of classes
average: (bool) whether to average the result to obtain a value
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
loss = tf.reduce_sum(qx * (log_qx - tf.log(1.0/k)), 1)
if average:
return tf.reduce_mean(loss)
else:
return tf.reduce_sum(loss)
|
{"/model/SSVAE.py": ["/networks/CatVAENetwork.py", "/losses/LossFunctions.py", "/utils/partition.py", "/utils/assignment.py"]}
|
38,831
|
MhdMousaHAMAD/collaborative-filtering
|
refs/heads/master
|
/recommenders/collaborative_filtering.py
|
from math import sqrt
from sklearn.cross_validation import KFold
from sklearn.metrics import mean_squared_error
import csv
import time
# Return Pearson Correlation Coefficient for rows at key1 and key2 in dataset dictionary
# Rows represent users or items when the dictionary is transposed
def similarity_pearson(dataset, key1, key2):
# Get mutual items
mutual_items = {}
for item in dataset[key1]:
if item in dataset[key2]:
mutual_items[item] = 1
# If there are no ratings in common, return 0
if len(mutual_items) == 0:
return 0
# Sum of the ratings
sum1 = sum([dataset[key1][item] for item in mutual_items])
sum2 = sum([dataset[key2][item] for item in mutual_items])
# Sum of the rating squares
sum1_squares = sum([pow(dataset[key1][item], 2) for item in mutual_items])
sum2_squares = sum([pow(dataset[key2][item], 2) for item in mutual_items])
# Sum of the products
sum_product = sum([dataset[key1][item] * dataset[key2][item] for item in mutual_items])
# Calculate r (Pearson score)
numerator = sum_product - (sum1 * sum2 / len(mutual_items))
denominator = sqrt(
(sum1_squares - pow(sum1, 2) / len(mutual_items)) * (sum2_squares - pow(sum2, 2) / len(mutual_items)))
if denominator == 0:
return 0
score = numerator / denominator
# Normalize score to be between 0 and 1
score = (score - (-1)) / (1 - (-1))
return score
# Return Cosine Similarity for items at key1 and key2 in dataset dictionary
def similarity_cosine(dataset, key1, key2):
# Get mutual items
mutual_items = {}
for item in dataset[key1]:
if item in dataset[key2]:
mutual_items[item] = 1
# If there are no ratings in common, return 0
if len(mutual_items) == 0:
return 0
# Sum of the rating squares
sum1_squares = sum([pow(dataset[key1][item], 2) for item in dataset[key1]])
sum2_squares = sum([pow(dataset[key2][item], 2) for item in dataset[key2]])
# Sum of the products
sum_product = sum([dataset[key1][item] * dataset[key2][item] for item in mutual_items])
# Calculate score
numerator = sum_product
denominator = sqrt(sum1_squares) * sqrt(sum2_squares)
if denominator == 0:
return 0
score = numerator / denominator
return score
class CollaborativeFiltering:
# Transform rows into columns and vice versa
# Transform dataset from user-centric to item-centric and vice-versa
# Returns the transposed dataset
@staticmethod
def __transpose_dataset(dataset):
transposed_dataset = {}
for item_i in dataset:
for item_j in dataset[item_i]:
transposed_dataset.setdefault(item_j, {})
transposed_dataset[item_j][item_i] = dataset[item_i][item_j]
return transposed_dataset
# Cross validate item-based collaborative filtering
# "external_similarities" is an optional parameter to pass item-item (user-user) similarities ...
# These similarities could be computed using some external resources like items' (users') meta-data
# These similarities should be provided in a dictionary of key01-key02 keys,
# where key01, key02 are user_ids in UBCF or item_ids in IBCF
@staticmethod
def cross_validate(path,
item_based=True,
k=30,
similarity=similarity_cosine,
n_folds=10,
models_directory='',
load_models=False,
external_similarities=None,
alpha=0.5):
rmse = 0
model_name = ''
if item_based:
model_name = 'items'
else:
model_name = 'users'
# Read dataset and split it
dataset = []
with open(path, newline='') as file:
reader = csv.reader(file, delimiter='\t', quotechar='|')
for row in reader:
(user_id, movie_id, rating) = (int(row[0]), int(row[1]), float(row[2]))
dataset.append((user_id, movie_id, rating))
''' ***
# This code generates one similarity model for all folds
# It is faster but not the best choice,
# as it uses ratings from the testset in computing similarity
if not load_models:
cf = CollaborativeFiltering(k, similarity)
cf.set_dataset(dataset)
cf.train(item_based=item_based)
cf.save_model(models_directory + '/{}_model_f{}_sim_{}.csv'.format(model_name, 'All', similarity.__name__))
'''
# Use shuffle=True to shuffle the folds selection
folds = KFold(n=len(dataset), n_folds=n_folds, shuffle=True)
fold = 0
for train_indices, test_indices in folds:
fold += 1
training_set = [dataset[i] for i in train_indices]
test_set = [dataset[i] for i in test_indices]
print("Fold (%d) started (%s)" % (fold, time.strftime('%y_%m_%d_%H_%M_%S')))
cf = CollaborativeFiltering(k, similarity)
cf.set_dataset(training_set)
# Saving models for later faster use to test the implementation
# '''***
if load_models:
if models_directory:
cf.load_model(models_directory + '/{}_model_f{}_sim_{}.csv'.format(model_name, fold, similarity.__name__))
else:
cf.train(item_based=item_based)
if models_directory:
cf.save_model(models_directory + '/{}_model_f{}_sim_{}.csv'.format(model_name, fold, similarity.__name__))
# '''
'''***'''
# if models_directory:
# cf.load_model(models_directory + '/{}_model_f{}_sim_{}.csv'.format(model_name, 'All', similarity.__name__))
# Inject the external similarities if they were provided
if external_similarities is not None:
cf.modify_pairwise_similarity(external_similarities, alpha=alpha)
cf.predict_missing_ratings(item_based=item_based)
predict_set = cf.predict_for_set(test_set)
rmse += mean_squared_error([rec[2] for rec in test_set], [rec[2] for rec in predict_set]) ** 0.5
print("Fold (%d) finished with accumulated RMSE of (%f) (%s)" % (fold, rmse, time.strftime('%y_%m_%d_%H_%M_%S')))
return rmse / float(n_folds)
def cross_validate_item_based(path,
k=30,
similarity=similarity_cosine,
n_folds=10,
models_directory='',
load_models=False,
external_similarities=None,
alpha=0.5):
CollaborativeFiltering.cross_validate(path=path,
item_based=True,
k=k,
similarity=similarity,
n_folds=n_folds,
models_directory=models_directory,
load_models=load_models,
external_similarities=external_similarities,
alpha=alpha)
def cross_validate_user_based(path,
k=30,
similarity=similarity_cosine,
n_folds=10,
models_directory='',
load_models=False,
external_similarities=None,
alpha=0.5):
CollaborativeFiltering.cross_validate(path=path,
item_based=False,
k=k,
similarity=similarity,
n_folds=n_folds,
models_directory=models_directory,
load_models=load_models,
external_similarities=external_similarities,
alpha=alpha)
# Constructor
def __init__(self, k=25, similarity=similarity_pearson):
self.__dataset = {}
self.__pairwise_similarity = {}
self.__mean_user_ratings = {}
self.k = k
self.similarity = similarity
# Normalize dataset by subtracting mean user ratings
def __normalize_dataset(self):
for user in self.__dataset:
for item in self.__dataset[user]:
self.__dataset[user][item] -= self.__mean_user_ratings[user]
# Denormalize dataset by adding mean user ratings
def __denormalize_dataset(self):
for user in self.__dataset:
for item in self.__dataset[user]:
self.__dataset[user][item] += self.__mean_user_ratings[user]
# Set the dataset from a triples list
# The triples must be in the following format (user, item, rating)
def set_dataset(self, dataset):
self.__dataset = {}
for (user_id, movie_id, rating) in dataset:
self.__dataset.setdefault(int(user_id), {})
self.__dataset[int(user_id)][int(movie_id)] = float(rating)
# Set mean user ratings
self.__mean_user_ratings = {}
for user in self.__dataset:
self.__mean_user_ratings[user] = sum(self.__dataset[user].values()) / len(self.__dataset[user].values())
# Load dataset from a csv file that is formatted as triples
# The triples must be in the following format (user, item, rating)
def load_dataset(self, path):
dataset = []
with open(path, newline='') as file:
reader = csv.reader(file, delimiter='\t', quotechar='|')
for row in reader:
(user_id, movie_id, rating) = (int(row[0]), int(row[1]), float(row[2]))
dataset.append((user_id, movie_id, rating))
self.set_dataset(dataset)
return dataset
# Calculate pairwise similarity scores
# user-user similarity for UBCF
# item-item similarity for IBCF
def calculate_pairwise_similarity(self, item_based=True):
self.__pairwise_similarity = {}
dataset_centered = self.__dataset
# If the algorithm it item-based collaborative filtering,
# invert the dataset to be item-centric
if item_based:
dataset_centered = CollaborativeFiltering.__transpose_dataset(self.__dataset)
c = 0
# key_i, key_j are user_ids in UBCF or item_ids in IBCF
for key_i in dataset_centered:
# Status updates for large datasets
c += 1
if c % 100 == 0:
print("Pairwise_Similarity: %d / %d (%s)" % (c, len(dataset_centered), time.strftime('%y_%m_%d_%H_%M_%S')))
self.__pairwise_similarity.setdefault(key_i, {})
# Calculate how similar this object to other objects
for key_j in dataset_centered:
# If the similarity is calculated before, don't calculate it again
if key_j in self.__pairwise_similarity:
if key_i in self.__pairwise_similarity[key_j]:
self.__pairwise_similarity[key_i][key_j] = self.__pairwise_similarity[key_j][key_i]
continue
# If key_i is item_j set the similarity to one
if key_i == key_j:
self.__pairwise_similarity[key_i][key_j] = 1
continue
self.__pairwise_similarity[key_i][key_j] = self.similarity(dataset_centered, key_i, key_j)
# Train the model
# This method is simply calling calculate_pairwise_similarity
def train_item_based(self):
self.calculate_pairwise_similarity(item_based=True)
def train_user_based(self):
self.calculate_pairwise_similarity(item_based=False)
def train(self, item_based=True):
self.calculate_pairwise_similarity(item_based=item_based)
# Save the trained model into a CSV file as triples
# The triples are in the following format (key01, key02, similarity_score),
# where key is user_id in UBCF or item_id in IBCF
# The trained model is the pairwise similarity
def save_model(self, path):
with open(path, 'w', newline='') as file:
writer = csv.writer(file, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for key_i in self.__pairwise_similarity:
for key_j in self.__pairwise_similarity[key_i]:
writer.writerow([key_i, key_j, self.__pairwise_similarity[key_i][key_j]])
# Load a trained model from a CSV file that is formatted as triples
# The triples must be in the following format (key01, key02, similarity_score),
# where key is user_id in UBCF or item_id in IBCF
# The trained model is the pairwise similarity
def load_model(self, path):
self.__pairwise_similarity = {}
with open(path, newline='') as file:
reader = csv.reader(file, delimiter='\t', quotechar='|')
for row in reader:
key_i = int(row[0])
key_j = int(row[1])
similarity = float(row[2])
self.__pairwise_similarity.setdefault(key_i, {})
self.__pairwise_similarity[key_i][key_j] = similarity
# Predict missing ratings in the dataset
def predict_missing_ratings_item_based(self):
# For each item in pairwise_similarity, sort its similar items
# according to the similarity scores
pairwise_similarity_sorted = {}
print("Sorting started (%s)" % (time.strftime('%y_%m_%d_%H_%M_%S')))
for item in self.__pairwise_similarity:
pairwise_similarity_sorted[item] = sorted(self.__pairwise_similarity[item].items(),
key=lambda rec: rec[1],
reverse=True)
print("Sorting finished (%s)" % (time.strftime('%y_%m_%d_%H_%M_%S')))
# Loop over all users
c = 0
for user in self.__dataset:
# Status updates for large datasets
c += 1
if c % 100 == 0:
print("Missing_Ratings: %d / %d (%s)" % (c, len(self.__dataset), time.strftime('%y_%m_%d_%H_%M_%S')))
# Loop over all items
for item in pairwise_similarity_sorted:
# Ignore if this user has already rated this item
if item in self.__dataset[user]:
continue
neighbours = 0
weighted_similarity = 0
similarities_sum = 0
# Loop over similar items
for (similar_item, similarity) in pairwise_similarity_sorted[item]:
# Check if the similar item is the item itself
if similar_item == item:
continue
# We are only interested in items that have been rated by the user
if similar_item not in self.__dataset[user]:
continue
neighbours += 1
# We are only interested in the k nearest neighbours
if neighbours > self.k:
break
weighted_similarity += similarity * self.__dataset[user][similar_item]
similarities_sum += similarity
if similarities_sum > 0:
self.__dataset[user][item] = weighted_similarity / similarities_sum
def predict_missing_ratings_user_based(self):
# For each user in pairwise_similarity, sort its similar users
# according to the similarity scores
pairwise_similarity_sorted = {}
print("Sorting started (%s)" % (time.strftime('%y_%m_%d_%H_%M_%S')))
for user in self.__pairwise_similarity:
pairwise_similarity_sorted[user] = sorted(self.__pairwise_similarity[user].items(),
key=lambda rec: rec[1],
reverse=True)
print("Sorting finished (%s)" % (time.strftime('%y_%m_%d_%H_%M_%S')))
# Invert the dataset to be item-centric
dataset_item_centric = CollaborativeFiltering.__transpose_dataset(self.__dataset)
# Loop over all items
c = 0
for item in dataset_item_centric:
# Status updates for large datasets
c += 1
if c % 100 == 0:
print("Missing_Ratings: %d / %d (%s)" % (c, len(dataset_item_centric), time.strftime('%y_%m_%d_%H_%M_%S')))
# Loop over all users
for user in pairwise_similarity_sorted:
# Ignore if this user has already rated this item
if user in dataset_item_centric[item]:
continue
neighbours = 0
weighted_similarity = 0
similarities_sum = 0
# Loop over similar users
for (similar_user, similarity) in pairwise_similarity_sorted[user]:
# Check if the similar user is the user itself
if similar_user == user:
continue
# We are only interested in users that have rated this item
if similar_user not in dataset_item_centric[item]:
continue
neighbours += 1
# We are only interested in the k nearest neighbours
if neighbours > self.k:
break
weighted_similarity += similarity * dataset_item_centric[item][similar_user]
similarities_sum += similarity
if similarities_sum > 0:
self.__dataset[user][item] = weighted_similarity / similarities_sum
def predict_missing_ratings(self, item_based=True):
if item_based:
self.predict_missing_ratings_item_based()
else:
self.predict_missing_ratings_user_based()
# Predict how the user would rate the item in each tuple in the list
# The tuples must be in one of the following formats (user, item) or (user, item, rating)
# If the rating is provided it will be overwritten
def predict_for_set(self, predict_set):
result = []
# Remove the rating if it is already provided
predict_set = [(rec[0], rec[1]) for rec in predict_set]
for (user, item) in predict_set:
rating = 0
if user in self.__dataset:
if item in self.__dataset[user]:
rating = self.__dataset[user][item]
else:
# Set average user ratings in case of any problem
rating = self.__mean_user_ratings[user]
# Post-process rating in case of any problems
if rating < 1:
rating = 1
if rating > 5:
rating = 5
result.append((user, item, rating))
return result
# Load dataset from a csv file and predicts how the user would rate the item in each tuple in the file
# The tuples must be in the following format (user, item)
def predict_for_set_with_path(self, path):
# Read dataset
dataset = []
with open(path, newline='') as file:
reader = csv.reader(file, delimiter='\t', quotechar='|')
for row in reader:
(user_id, movie_id) = (int(row[0]), int(row[1]))
dataset.append((user_id, movie_id))
# Predict
return self.predict_for_set(dataset)
# Predict how a user would rate an item
def predict(self, user, item):
rating = 0
if user in self.__dataset:
if item in self.__dataset[user]:
rating = self.__dataset[user][item]
else:
# Set average user ratings in case of any problem
rating = self.__mean_user_ratings[user]
# Post-process rating in case of any problems
if rating < 1:
rating = 1
if rating > 5:
rating = 5
return rating
# Modify pairwise similarity by external similarities
# These similarities could be computed using other resources like the text describing an item (a user)
# These similarities should be provided in a dictionary of key01-key02 keys,
# where key01, key02 are user_ids in UBCF or item_ids in IBCF
# The modification is based on the weighted sum
# sim = ((1 - alpha) * sim) + (alpha * external_sim)
# pairwise_similarity should be computed before calling this function
def modify_pairwise_similarity(self, external_similarities, alpha=0.5):
for key_i in self.__pairwise_similarity:
# If key_i doesn't have similarity scores in external_similarities, skip it
if key_i not in external_similarities:
continue
for key_j in self.__pairwise_similarity[key_i]:
# If key_j doesn't have similarity score with key_i in external_similarities, skip it
if key_j not in external_similarities[key_i]:
continue
self.__pairwise_similarity[key_i][key_j] = ((1 - alpha) * self.__pairwise_similarity[key_i][key_j]) + \
(alpha * external_similarities[key_i][key_j])
# Set the pairwise similarity matrix to some external similarities matrix
# This function should normally not be used
# It is just for testing purposes
def set_pairwise_similarity(self, external_similarities):
self.__pairwise_similarity = external_similarities
|
{"/main.py": ["/recommenders/collaborative_filtering.py"]}
|
38,832
|
MhdMousaHAMAD/collaborative-filtering
|
refs/heads/master
|
/main.py
|
from recommenders.collaborative_filtering import CollaborativeFiltering
from recommenders.collaborative_filtering import similarity_cosine
from recommenders.collaborative_filtering import similarity_pearson
import csv
# Read dataset
def read_dataset(path):
dataset = []
with open(path, newline='') as file:
reader = csv.reader(file, delimiter='\t', quotechar='|')
for row in reader:
(user_id, movie_id, rating) = (row[0], row[1], float(row[2]))
dataset.append((user_id, movie_id, rating))
return dataset
# Write dataset
def write_dataset(dataset, path):
with open(path, 'w', newline='') as file:
writer = csv.writer(file, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for instance in dataset:
writer.writerow(instance)
return dataset
# Cross validation
rmse = CollaborativeFiltering.cross_validate('data/training.dat',
item_based=True,
k=2,
similarity=similarity_cosine,
n_folds=2,
# models_directory='models', # when provided, the models would be saved
models_directory='',
load_models=False)
print("RMSE: %.3f" % rmse)
# Prediction
ibcf = CollaborativeFiltering(k=2, similarity=similarity_pearson)
ibcf.load_dataset('data/training.dat')
ibcf.train(item_based=True)
# Model can be trained and saved and then loaded again without training
# ibcf.save_model('models/model_sim_{}.csv'.format(ibcf.similarity.__name__))
# ibcf.load_model('models/model_sim_{}.csv'.format(ibcf.similarity.__name__))
ibcf.predict_missing_ratings(item_based=True)
predictions = ibcf.predict_for_set_with_path('data/predict.dat')
write_dataset(predictions, 'output/predictions.dat')
print(predictions)
|
{"/main.py": ["/recommenders/collaborative_filtering.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.