code
stringlengths
17
6.64M
class Node(Common): "\n Provide methods for calculating 'Plan Points' and 'Actual Points'.\n " def __init__(self, regression=False, log_level=Log.error): self.LogLevel = log_level self.regression = regression def __O_N(self, n): return n def __O_1(self, n): return 1 def __O_NlogN(self, n): return (n * math.log(int(n))) def __simpleOp(self, target, plan, f): _removedRows = self.count_removed_rows(target) if self.regression: target['PlanPoints'] = f(max(target['Plan Rows'], target['Actual Rows'])) target['ActualPoints'] = f(target['Actual Rows']) else: "\n This part is executed if no using regression parameters, so we should\n estimate the accurate 'Plan Rows' on the fly even during the query\n processing.\n\n 'ExpectedRows' stores the estimated 'Plan Rows'.\n " if (target['CurrentState'] == State.FINISHED): target['ExpectedRows'] = plan['Actual Rows'] target['PlanPoints'] = f((target['Actual Rows'] + _removedRows)) target['ActualPoints'] = f(target['PlanPoints']) else: target['ExpectedRows'] = max(plan['Plan Rows'], plan['Actual Rows']) target['PlanPoints'] = f((target['ExpectedRows'] + _removedRows)) target['ActualPoints'] = f((target['Actual Rows'] + _removedRows)) def __simpleListOp(self, plan, f): if ('Plans' not in plan): self.__simpleOp(plan, plan, f) else: _plans = plan['Plans'] if isinstance(_plans, list): for p in _plans: self.__simpleOp(plan, p, f) else: self.__simpleOp(plan, _plans, f) '\n Join operations\n ' def __outer(self, outer, inner): return outer def __heuristics(self, plan_rows, estimate_rows, actual_rows): return max(plan_rows, actual_rows) def __joinOp_with_estimating_plan_rows_on_the_fly(self, plan, estimate_rows, estimatePoints, removed_rows): "\n This method is called if no using regression parameters, so we should\n estimate the accurate 'Plan Rows' on the fly even during the query\n processing.\n\n 'ExpectedRows' stores the estimated 'Plan Rows' and the estimating method\n is heuristics.\n " _plans = plan['Plans'] if isinstance(_plans, list): _expected_rows = [] _plan_points = [] _actual_points = [] for p in _plans: _expected_rows.append(p['ExpectedRows']) _plan_points.append(p['PlanPoints']) _actual_points.append(p['ActualPoints']) _estimated_points = estimatePoints(_expected_rows[0], _expected_rows[1]) plan['PlanPoints'] = _estimated_points if (plan['CurrentState'] == State.FINISHED): plan['ExpectedRows'] = plan['PlanPoints'] plan['ActualPoints'] = plan['Actual Rows'] elif ((_estimated_points - removed_rows) == plan['Actual Rows']): plan['ExpectedRows'] = plan['Actual Rows'] plan['ActualPoints'] = _estimated_points else: plan['ExpectedRows'] = estimate_rows(plan['Plan Rows'], (_estimated_points - removed_rows), plan['Actual Rows']) plan['ActualPoints'] = (plan['Actual Rows'] + removed_rows) else: print('******Never reach********') def __joinOp_with_estimated_plan_rows(self, plan, removed_rows): '\n This method is called when the "Plan Rows" in all nodes are already replaced\n by the estimated Plan Rows using the regression parameters.\n Therefore, \'ExpectedRows\' is not needed.\n ' def get_children(plan): _PR = [[], []] _AR = [[], []] for i in range(0, 2): p = plan['Plans'][i] k = (0 if (p['Parent Relationship'] == 'Outer') else 1) _PR[k] = p['Plan Rows'] _AR[k] = p['Actual Rows'] return (_PR[0], _PR[1], _AR[0], _AR[1]) (PlanOuter, PlanInner, ActualOuter, ActualInner) = get_children(plan) if (PlanOuter < ActualOuter): PlanOuter = ActualOuter if (PlanInner < ActualInner): PlanInner = ActualInner if (('Hash Join' == plan['Node Type']) or ('Merge Join' == plan['Node Type'])): _coef = plan['Coefficient'] _coef2 = plan['Coefficient2'] _intercept = plan['Intercept'][0] if (_coef2 > 0): plan['PlanPoints'] = (PlanOuter * PlanInner) plan['ActualPoints'] = ((ActualOuter * ActualInner) + removed_rows) else: plan['PlanPoints'] = (PlanOuter + PlanInner) plan['ActualPoints'] = ((ActualOuter + ActualInner) + removed_rows) else: plan['PlanPoints'] = (PlanOuter * PlanInner) plan['ActualPoints'] = ((ActualOuter * ActualInner) + removed_rows) if (Log.debug1 <= self.LogLevel): print('Debug1: __joinOp3 type={} PlanPoints={} ActualPoints={}'.format(plan['Node Type'], plan['PlanPoints'], plan['ActualPoints'])) def __joinOp(self, plan, estimate_rows, estimatePoints): removed_rows = self.count_removed_rows(plan) if self.regression: self.__joinOp_with_estimated_plan_rows(plan, removed_rows) else: self.__joinOp_with_estimating_plan_rows_on_the_fly(plan, estimate_rows, estimatePoints, removed_rows) '\n Public method\n ' def calc(self, plan, regression): self.regression = regression _nodeType = plan['Node Type'] if (_nodeType == 'Result'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Seq Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Sample Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Index Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Index Only Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Bitmap Index Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Bitmap Heap Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Tid Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Function Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Table Function Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Values Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'CTE Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Named Tuplestore Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'WorkTable Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Foreign Scan'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Aggregate'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'SetOp'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Limit'): self.__simpleOp(plan, plan, self.__O_N) elif (_nodeType == 'Hash'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'ProjectSet'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'Subquery Scan'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'Custom Scan'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'Materialize'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'Sort'): self.__simpleListOp(plan, self.__O_NlogN) elif (_nodeType == 'Incremental Sort'): self.__simpleListOp(plan, self.__O_NlogN) elif (_nodeType == 'Gather'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'Gather Merge'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'LockRows'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'Unique'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'WindowAgg'): self.__simpleListOp(plan, self.__O_N) elif (_nodeType == 'Append'): self.__joinOp(plan, max, operator.add) elif (_nodeType == 'Merge Append'): self.__joinOp(plan, max, operator.add) elif (_nodeType == 'Recursive Union'): self.__joinOp(plan, max, operator.mul) elif (_nodeType == 'Nested Loop'): self.__joinOp(plan, max, operator.mul) elif (_nodeType == 'Merge Join'): self.__joinOp(plan, self.__heuristics, self.__outer) elif (_nodeType == 'Hash Join'): self.__joinOp(plan, self.__heuristics, operator.add) elif (_nodeType == 'BitmapAnd'): self.__joinOp(plan, max, operator.add) elif (_nodeType == 'BitmapOr'): self.__joinOp(plan, max, operator.add)
class CalcNode(Node): "\n Provide methods to estimate the progress of the query.\n\n Estimation Outline:\n\n To estimate the progress of the query, this module defines a concept internally\n called 'point'. It is a non-dimensional value, and it is like the concept of\n cost in the optimizer in the field of database systems.\n\n The point is simpler than the cost, it increments 1 when a row is read by scan\n type operations and when a row is processed by join and scan type operations.\n Examples:\n 1. If 100 rows are read by Seq Scan, the point increments 100.\n 2. If Nested Loop Join processes the outer 100 rows and the inner 20 rows,\n the point increments 2000.\n\n When estimating the progress of the query, the 'Plan Points' and the\n 'Actual Points' are calculated in each node by the calc_node() method, where\n the 'Plan Points' is the estimated point to get eventually, and the\n 'Actual Points' is the actual point.\n\n Then, all 'Plan Points' and 'Actual Points' are counted up in all nodes by the\n count_point() method, and returns 'Actual Points' / 'Plan Points' as the\n progress of the query.\n " def __init__(self, log_level=Log.error): self.LogLevel = log_level self.regression = False def __set_objects(self, plan, state, is_outer_running): '\n Add four objects in each node: "ExpectedRows", "ActualPoints", "PlanPoints"\n and "CurrentState"; set the appropriate state to the \'CurrentState\' object.\n ' 'Delete unnecessary objects.' for _i in ('Startup Cost', 'Total Cost', 'Plan Width', 'Actual Startup Time', 'Actual duration Time', 'BufferUsage_Start', 'WalUsage_Start', 'BufferUsage', 'WalUsage'): if (_i in plan): del plan[_i] "Add 'ExpectedRows', 'ActualPoints' and 'PlanPoints'." if ('Actual Rows' in plan): plan.update(ExpectedRows=0, ActualPoints=0, PlanPoints=0) if (Log.debug5 <= self.LogLevel): print('Debug5: NodeType={}'.format(plan['Node Type'])) "\n Add an object 'CurrentState' and Set the appropriate state.\n\n If no use of the regression parameters to estimate the progress,\n the CurrentState can be changed by applying the rules after this step,\n therefore, the setting state is tentative.\n Otherwise, i.e. if using the regression parameters, the value\n of CurrentState is not used.\n " if (state == State.WAITING): if ((plan['Actual Rows'] == 0) and (plan['Actual Loops'] == 0) and (self.count_removed_rows(plan) == 0)): if (Log.debug5 <= self.LogLevel): print(('Debug5: WAITING -> WAITING ActualRows == 0 ' + 'AND ActualLoops == 0 AND RemovedRows == 0')) plan.update(CurrentState=state) return state else: if (Log.debug5 <= self.LogLevel): print(('Debug5: WAITING -> RUNNING ActualRows != 0 ' + 'OR ActualLoops != 0 OR RemovedRows != 0')) plan.update(CurrentState=State.RUNNING) return State.RUNNING elif (state == State.RUNNING): if (is_outer_running and self.isScan(plan)): if (Log.debug5 <= self.LogLevel): print('Debug5: RUNNING -> FINISHED Outer running AND scan') plan.update(CurrentState=State.FINISHED) return State.FINISHED else: if (Log.debug5 <= self.LogLevel): print('Debug5: RUNING -> RUNING !Outer running OR !scan') plan.update(CurrentState=State.RUNNING) return state else: if (Log.debug5 <= self.LogLevel): print('Debug5: FINISHED -> FINISHED') plan.update(CurrentState=State.FINISHED) return state else: return state '\n Public method\n ' def prepare_calc_node(self, plans, regression=False): "\n Prepare to execute the calc_node() method. That is, add four objects,\n which are described in the __set_objects() method, in all nodes of plans,\n and set the appropriate state to the 'CurrentState' object.\n " def op(Plans, state): if isinstance(Plans, list): _is_outer_running = False for plan in Plans: _state = self.__set_objects(plan, state, _is_outer_running) if (Log.debug3 <= self.LogLevel): print('Debug3: NodeType={} CurrentState={}'.format(plan['Node Type'], plan['CurrentState'])) _is_outer_running = (True if (plan['Actual Loops'] > 0) else False) if ('Plans' in plan): op(plan['Plans'], _state) return else: _state = self.__set_objects(Plans, state, True) if (Log.debug3 <= self.LogLevel): print('Debug3: NodeType={} CurrentState={}'.format(Plans['Node Type'], Plans['CurrentState'])) if ('Plans' in Plans): op(Plans['Plans'], _state) return if regression: op(plans, State.FINISHED) else: op(plans, State.WAITING) return plans def calc_node(self, plans, depth, regression): "\n Dig down the plan tree to the specific node, which is specified by depth,\n and Calculate the 'Plan Points' and the 'Actual Points'.\n " def incr(plan): if ('Node Type' in plan): self._count += 1 def op(Plans): if isinstance(Plans, list): for plan in Plans: incr(plan) if (self._depth == self._count): self.calc(plan, self.regression) return elif ('Plans' in plan): op(plan['Plans']) else: incr(Plans) if (self._depth == self._count): self.calc(Plans, self.regression) return elif ('Plans' in Plans): op(Plans['Plans']) self._depth = depth self._count = 0 self.regression = regression op(plans) def count_points(self, plans): "\n Count up the 'Plan Points' and the 'Actual Points', and Return the proportion\n of the actual and planned points.\n " def op(Plans): 'Count up the planned and actual points recursively.' self._i += 1 if isinstance(Plans, list): for plan in Plans: if ('Plans' in plan): if ('ActualPoints' in plan): self._plan_points += plan['PlanPoints'] self._actual_points += plan['ActualPoints'] op(plan['Plans']) elif ('Plans' in Plans): if ('ActualPoints' in Plans): self._plan_points += Plans['PlanPoints'] self._actual_points += Plans['ActualPoints'] op(Plans['Plans']) return self._i = 0 self._actual_points = 0 self._plan_points = 0 op(plans) if (self._plan_points == 0): return 0.0 return min((self._actual_points / self._plan_points), 1)
class QueryProgress(MergePlan, Replace, Rules, CalcNode): def __init__(self, base_dir='.', log_level=Log.error): self.set_base_dir(base_dir) self.LogLevel = log_level def _progress(self, merged_plan, serverId=None, queryid=None, planid=None): '\n Estimate the progress of merged_plan.\n This is called by query_progress() and check()@tools/sampling_plan.py.\n\n Parameters\n ----------\n merged_plan : dict\n\n Returns\n -------\n progress : float\n 0.0 <= progress <= 1.0\n ' def delete_objects(plan): for _i in ('Parallel Aware', 'Relation Name', 'Alias', 'Startup Cost', 'Total Cost', 'Plan Width', 'Scan Direction', 'Index Name', 'Index Cond', 'Heap Fetches', 'Original Hash Buckets', 'Hash Batches', 'Original Hash Batches', '"Hash Buckets', 'Hash Cond', 'Join Filter', 'Peak Memory Usage', 'Inner Unique'): if (_i in plan): del plan[_i] return plan Plans = merged_plan['Plan'] self.delete_unnecessary_objects(delete_objects, Plans) _numNode = self.count_nodes(Plans) _regression = False if ((serverId is not None) and (queryid is not None) and (planid is not None)): _reg_param = self.get_regression_param(serverId, queryid, planid) else: _reg_param = None if (_reg_param is not None): if self.check_formatted_regression_params(serverId, queryid): if (Log.info <= self.LogLevel): print('Info: Using formatted regression params.') else: _regression = True '\n If there are already the regression parameters of this query,\n replace the Plan Rows with the estimated rows using the regression parameters.\n ' self.replace_plan_rows(merged_plan['Plan'], _reg_param['Plan'], _numNode, queryid, planid) if (Log.info <= self.LogLevel): print('Info: Using regression params.') elif (Log.info <= self.LogLevel): print('Info: Using rules.') "\n Cut the top node if the node type is 'ModifyTable', i.e. the query is INSERT, DELETE or UPDATE.\n " if (Plans['Node Type'] == 'ModifyTable'): _numNode -= 1 if ('Plans' in Plans): _plans = Plans['Plans'] if isinstance(_plans, list): for _plan in _plans: fp = _plan break else: fp = _plans Plans = fp '\n Prepare the nodes of Plans to calculate the "Plan Points"\n and "Actual Points".\n ' Plans = self.prepare_calc_node(Plans, _regression) if (not _regression): self.apply_rules(Plans) '\n Calculate the "Plan Points" and "Actual Points" in order,\n from the bottom node to the top node.\n ' _i = _numNode while (0 < _i): self.calc_node(Plans, _i, _regression) _i -= 1 '\n Count up the "Plan Points" and "Actual Points".\n ' return round(self.count_points(Plans), 6) '\n Public methods\n ' def make_progress_bar(self, percent, small=False): '\n Make the value of percent with progress-bar.\n\n Parameters\n ----------\n percent : int or float\n 0.0 <= percent <= 100.0\n\n small : bool\n If true, the range of progress-bar is 25 [byte]; otherwise 50 [byte].\n\n Returns\n -------\n _p_bar : str\n progress-bar.\n ' qSteps = [' ', u'▎', u'▍', u'▋'] percent = max(0, percent) percent = min(100, percent) _p = int(percent) _p_bar = (str(u'▎') if (_p == 0) else '') for _i in range(0, (lambda p, small: ((p // 4) if small else (p // 2)))(_p, small)): _p_bar += str(u'█') _p_bar += str(qSteps[(lambda p, small: ((p % 4) if small else ((p % 2) * 2)))(_p, small)]) for _i in range((lambda p, small: (2 if (p == 0) else (((p // 4) + 1) if small else ((_p // 2) + 1))))(_p, small), (lambda small: (25 if small else 50))(small)): _p_bar += ' ' if (_p < 100): _p_bar += str(u'▏') return _p_bar def query_progress(self, plan_list, server_id=None): "\n Estimate the progresses of the plans in plan_list.\n\n Note: Don't use for EXPLAIN ANALYZE statement because this method will crash.\n\n Parameters\n ----------\n plan_list : [results, ... ]\n A list of the results of pg_query_plan().\n results := [\n worker_type ->str,\n queryid ->int,\n planid ->int,\n plan_json ->str,\n hash_query ->int\n ]\n\n Returns\n -------\n progress : [float, ...]\n A list of progresses.\n " def set_queryid_to_parallel_worker(plan_list): "\n In PG version 13, the queryIds of parallel workers are always set to 0,\n so, we should set the leader's queryId to the parallel workers' queryIds.\n Note:\n If nested_level = 0 for all queries, it is easy to set the parallel\n worker's queryid. Otherwise, i.e. there are queries whose nested_level\n are more than 1, it is not easy to associate the queryid of the leader\n with the queryids of parallel workers.\n This function uses the hash values of the query strings in order to\n associate leader's query with parallel worker's queries.\n " _hash = {} for _plan in plan_list: _queryid = _plan[1] _hash_query = _plan[4] if (_queryid != 0): _hash[str(_hash_query)] = _queryid for _plan in plan_list: _queryid = _plan[1] _hash_query = _plan[4] if (_queryid == 0): _plan[1] = _hash[str(_hash_query)] def get_unique_queryid(plan_list): '\n Get the unique queryIds in the plan_list.\n Examples:\n 1. If plan_list has three plans of which are a leader and\n two parallel workers, this method returns only the queryId\n of the leader because all queryId are the same value.\n 2. If plan_list has two plans which are different nested levels,\n This method returns two queryIds because the two plans are\n different queries in each.\n ' _tmp = [] for _plan in plan_list: _queryid = _plan[1] _tmp.append(int(_queryid)) _ret = sorted(set(_tmp)) return _ret def prepare_merge_plans(queryid, plan_list): "\n Separate the leader's plan and the worker's plan from plan_list,\n and transfer from str type to dict type.\n Then, return them with their planId.\n " _worker_plans = [] for _plan in plan_list: _worker_type = _plan[0] _queryid = _plan[1] _plan_json = _plan[3] if (_queryid == queryid): if (_worker_type == 'leader'): _planid = _plan[2] _leader_plan = json.loads(_plan_json) else: _worker_plans.append(json.loads(_plan_json)) 'If there is no parallel worker, _worker_plans is an empty list.' return (_planid, _leader_plan, _worker_plans) set_queryid_to_parallel_worker(plan_list) _ret = [] for _queryid in get_unique_queryid(plan_list): (_planid, _leader_plan, _worker_plans) = prepare_merge_plans(_queryid, plan_list) _merged_plan = self.merge_plans(_leader_plan, _worker_plans) _progress = self._progress(_merged_plan, server_id, _queryid, _planid) if (Log.debug1 <= self.LogLevel): print('Debug1: queryid={} => progress={}'.format(_queryid, _progress)) _ret.append((_queryid, _progress)) return _ret
class CalcRegression(): '\n Functions to calculate the regression params.\n ' def set_log_level(self, log_level): self.LogLevel = log_level def __rss(self, X, Y): assert (len(X) != 0) return (math.sqrt(sum(list(map((lambda x: (x ** 2)), list(map(operator.sub, X, Y)))))) / len(X)) def scan(self, X, Y): '\n linear regression\n * Model(no bias): y = a * x\n * Loss function: Mean Square Error\n ' _sumY = sum(Y) _sumX = sum(X) if (Log.debug3 <= self.LogLevel): print('Debug3: ----- SCAN ----') print('Debug3: ===> X = {}'.format(X)) print('Debug3: ===> Y = {}'.format(Y)) if ((250 * _sumY) < _sumX): if (Log.debug3 <= self.LogLevel): print('Debug3: ==> coef = 0 intercept = {}'.format(float(round((_sumY / len(Y)), 5)))) return (0.0, float(round((_sumY / len(Y)), 5))) else: if (Log.debug3 <= self.LogLevel): if (_sumX == 0): print('Debug3: ==> coef = 0 intercept = {}'.format(float(round((_sumY / len(Y)), 5)))) else: print('Debug3: ==> coef = {} intercept = 0'.format(float(round((_sumY / _sumX), 5)))) if (_sumX == 0): return (0.0, float(round((_sumY / len(Y)), 5))) else: return (float((_sumY / _sumX)), 0.0) def gather(self, X, Y): '\n linear regression\n * Model(no bias): y = a * x\n * Loss function: Mean Square Error\n ' _sumY = sum(Y) _sumX = sum(X) if (Log.debug3 <= self.LogLevel): print('Debug3: ---- GATHER ----') print('Debug3: ===> X = {}'.format(X)) print('Debug3: ===> Y = {}'.format(Y)) if (Log.debug3 <= self.LogLevel): if (_sumX == 0): print('Debug3: ==> coef = 0 intercept = {}'.format(float(round((_sumY / len(Y)), 5)))) else: print('Debug3: ==> coef = {} intercept = 0'.format(float(round((_sumY / _sumX), 5)))) if (_sumX == 0): return (0.0, float(round((_sumY / len(Y)), 5))) else: return (float((_sumY / _sumX)), 0.0) def nested_loop(self, Xouter, Xinner, Y): '\n Multiple linear regression\n * Model(no bias): Y = a * Xinner * Xouter\n * Loss function: Mean Square Error\n ' '\n _sumY = 0; _sumX = 0\n for i in range(0, len(Y)):\n _sumY += Y[i] * Xinner[i] * Xouter[i]\n _sumX += Xinner[i] **2 * Xouter[i] **2\n ' _sumY = sum(list(map(operator.mul, list(map(operator.mul, Xinner, Xouter)), Y))) _sumX = sum(list(map(operator.mul, list(map((lambda x: (x ** 2)), Xinner)), list(map((lambda x: (x ** 2)), Xouter))))) if (Log.debug3 <= self.LogLevel): print('Debug3: +++++ NESTED LOOP JOIN +++++') print('Debug3: ===> Xouter = {}'.format(Xouter)) print('Debug3: ===> Xinner = {}'.format(Xinner)) print('Debug3: ===> Y = {}'.format(Y)) if (_sumX == 0): print('Debug3: ==> coef=1') else: print('Debug3: ==> coef={}'.format(str(round((_sumY / _sumX), 5)))) return (1.0 if (_sumX == 0) else float((_sumY / _sumX))) def merge_or_hash_join(self, Xouter, Xinner, Y, add_bias_0=True): def multi_regression(Xouter, Xinner, Y, add_bias_0=True): _X = [] _Y = [] 'Format _Y and _X' for i in range(0, len(Y)): _Y.append(Y[i]) _X.append([Xouter[i], Xinner[i]]) if add_bias_0: _X.append([0.0, 0.0]) _Y.append(0.0) if (Log.debug3 <= self.LogLevel): print('Debug3: ****MERGE OR HASH JOIN*****') print('Debug3: ===> Xouter = {}'.format(Xouter)) print('Debug3: ===> Xinner = {}'.format(Xinner)) print('Debug3: ===> X ={}'.format(_X)) print('Debug3: ===> Plan Rows ={} Y={}'.format(Y, _Y)) '\n Calc regression\n Multiple linear regression\n * Model(no bias): Y = a1 * Xouter + a2 * Xinner\n * Loss function: Mean Square Error\n ' scireg = LinearRegression() scireg.fit(_X, _Y) _list = scireg.coef_.tolist() _coef = [float(round(_list[n], 5)) for n in range(len(_list))] _intercept = float(round((scireg.intercept_ + 0.0), 5)) 'Predict and calculate RMSE.' _y_pred = scireg.predict(_X) _rmse = np.sqrt(mean_squared_error(_Y, _y_pred)) del scireg return (_coef, _intercept, _rmse) def single_regression(X, Y, add_bias_0=True): _X = [] _Y = [] 'Format _Y and _X' for i in range(0, len(Y)): _Y.append(Y[i]) _X.append([X[i]]) if add_bias_0: _X.append([0.0]) _Y.append(0.0) if (Log.debug3 <= self.LogLevel): print('Debug3: ****MERGE OR HASHOIN*****') print('Debug3: ===> X={}'.format(X)) print('Debug3: ===> Plan Rows ={} Y={}'.format(Y, _Y)) '\n Calc regression\n Multiple linear regression\n * Model: Y = a * X + b\n * Loss function: Mean Square Error\n ' scireg = LinearRegression() scireg.fit(_X, _Y) _list = scireg.coef_.tolist() _coef = [float(round(_list[n], 5)) for n in range(len(_list))] _intercept = float(round((scireg.intercept_ + 0.0), 5)) 'Predict and calculate RMSE.' _y_pred = scireg.predict(_X) _rmse = np.sqrt(mean_squared_error(_Y, _y_pred)) del scireg return (float(_coef[0]), float(_intercept), _rmse) def reg(Xouter, Xinner, Y): _sumY = sum(list(map(operator.mul, list(map(operator.mul, Xinner, Xouter)), Y))) _sumX = sum(list(map(operator.mul, list(map((lambda x: (x ** 2)), Xinner)), list(map((lambda x: (x ** 2)), Xouter))))) _coef = (1.0 if (_sumX == 0) else float((_sumY / _sumX))) _mse = (sum(list(map((lambda x: (x ** 2)), list(map(operator.sub, Y, list(map((lambda y: (_coef * y)), list(map(operator.mul, Xouter, Xinner))))))))) / len(Y)) return (_coef, np.sqrt(_mse)) '\n Calcuate regression parameters.\n ' (coef, intercept, rmse) = multi_regression(Xouter, Xinner, Y) if ((coef[0] < 0) or (coef[1] < 0)): (coef, intercept, rmse) = multi_regression(Xouter, Xinner, Y, False) _coef = [float(coef[0]), float(coef[1])] _reg = 0 _intercept = float(round((intercept + 0.0), 5)) _rmse = rmse (coef, intercept, rmse) = single_regression(Xouter, Y) if (coef < 0): (coef, intercept, rmse) = single_regression(Xouter, Y, False) if (rmse < _rmse): _coef = [float(coef), 0.0] _intercept = float(round((intercept + 0.0), 5)) _rmse = rmse (coef, intercept, rmse) = single_regression(Xinner, Y) if (coef < 0): (coef, intercept, rmse) = single_regression(Xinner, Y, False) if (rmse < _rmse): _coef = [0.0, float(coef)] _intercept = float(round((intercept + 0.0), 5)) _rmse = rmse '\n # Note: This is not used because it makes the results significantly unstable.\n (coef, rmse) = reg(Xouter, Xinner, Y)\n if rmse < _rmse:\n _coef = [0, 0]\n _intercept = 0\n _reg = coef\n ' if (Log.debug3 <= self.LogLevel): print('Debug3: ==> coef={} reg={} intercept={}'.format(_coef, _reg, _intercept)) return (_coef, _reg, _intercept)
class Regression(Repository, CalcRegression): def __init__(self, base_dir='.', log_level=Log.error): self.ServerId = '' self.Level = 0 self.set_base_dir(base_dir) self.LogLevel = log_level def __set_serverId(self, serverId): self.ServerId = serverId '\n Handle self.Level value.\n ' def __init_level(self): self.Level = 0 def __incr_level(self): self.Level += 1 def __get_level(self): return self.Level def __delete_objects(self, plan): "Delete all objects except 'Node Type', 'Plan(s)' and some." for k in list(plan): '\n Use list(plan) instead of plan.keys() to avoid\n "RuntimeError: dictionary changed size during iteration" error.\n ' if ((k != 'Node Type') and (k != 'Plans') and (k != 'Plan') and (k != 'Relation Name') and (k != 'Schema') and (k != 'Alias') and (k != 'Parent Relationship') and (k != 'MergeFlag')): plan.pop(k) return plan def __calc_regression(self, plan, reg, queryid, planid, depth): '\n Calculate the regression parameters of plan, and Set the results into reg.\n ' self.__incr_level() _level = self.__get_level() _node_type = plan['Node Type'] '\n nested loop type\n ' for n in ('Append', 'Merge Append', 'Recursive Union', 'Nested Loop', 'BitmapAnd', 'BitmapOr'): if (n == _node_type): (_Xouter, _Xinner, _RR) = self.get_inputs(plan) '\n Calculate the regression parameter.\n ' if (Log.debug3 <= self.LogLevel): print('Debug3: === NodeType={}'.format(n)) print('Debug3: *** Y ActualRows={}'.format(plan['Actual Rows'])) print('Debug3: *** Xouter ={} Xinner ={}'.format(_Xouter, _Xinner)) _Y = plan['Actual Rows'] _coef = self.nested_loop(_Xouter, _Xinner, _Y) '\n Set the result to the reg dict.\n ' if (type(_coef) is list): reg.update(Coefficient=_coef) else: reg.update(Coefficient=[_coef]) return '\n hash or merge join\n ' for n in ('Merge Join', 'Hash Join'): if (n == _node_type): (_Xouter, _Xinner, _RR) = self.get_inputs(plan) '\n Calculate the regression parameter.\n ' if (Log.debug3 <= self.LogLevel): print('Debug3: HASH or MERGE depth={} RR={} queryid={} planid={}'.format(depth, _RR, queryid, planid)) if (Log.debug3 <= self.LogLevel): print('Debug3: === NodeType={}'.format(n)) print('Debug3: *** Y ActualRows={}'.format(plan['Actual Rows'])) print('Debug3: *** Xouter ={} Xinner ={}'.format(_Xouter, _Xinner)) _Y = plan['Actual Rows'] (_coef, _reg, _intercept) = self.merge_or_hash_join(_Xouter, _Xinner, _Y) '\n Set the result to the reg dict.\n ' if (type(_coef) is list): reg.update(Coefficient=_coef) else: reg.update(Coefficient=[_coef]) reg.update(Coefficient2=[round((_reg + 0.0), 5)]) reg.update(Intercept=[round((_intercept + 0.0), 5)]) return '\n scan type\n ' 'Calculate the regression parameter.' if (Log.debug3 <= self.LogLevel): print('Debug3: === NodeType={}'.format(_node_type)) print('Debug3: *** Plan Rows={} NormalizeParam={} NormalizePlanParam={}'.format(plan['Plan Rows'], plan['NormalizeParam'], plan['NormalizePlanParam'])) print('Debug3: *** Actual Rows={}'.format(plan['Actual Rows'])) (_coef, _intercept) = self.scan(plan['Plan Rows'], plan['Actual Rows']) '\n Set the result to the reg dict.\n ' if (type(_coef) is list): reg.update(Coefficient=_coef) else: reg.update(Coefficient=[_coef]) reg.update(Intercept=[round((_intercept + 0.0), 5)]) return def __set_relations(self, Plans, depth): '\n Set "Relation Name" in Plans by gathering children\'s "Relation Name" up if Plans does not have it.\n\n For example, if the node type of Plans is "Sort" and the node type of Plans\' child is "Seq Scan",\n the relation name of Plans is set to the relation name of Plans\' child.\n\n "Node Type":"Sort" ==> "Node Type":"Sort", "Relation Name":"tbl1"\n -> "Node Type":"Seq Scan", "Relation Name":"tbl1"\n\n\n If the node type of Plans is "Nested Loop", the relation name of Plans is set to the pair of the\n relation names of Plans\' outer and inner children.\n\n "Node Type":"Nested Loop" ==> "Node Type":"Nested Loop","Relation Name":"[tbl1, tbl2]"\n -> "Node Type":"Seq Scan", "Relation Name":"tbl1"\n -> "Node Type":"Seq Scan", "Relation Name":"tbl2"\n\n "Node Type":"Merge Join" ==> "Node Type":"Merge Join", "Relation Name":"[[tbl1, tbl2], tbl3]"\n -> "Node Type":"Nested Loop","Relation Name":"[tbl1, tbl2]"\n -> "Node Type":"Seq Scan", "Relation Name":"tbl1"\n -> "Node Type":"Seq Scan", "Relation Name":"tbl2"\n -> "Node Type":"Seq Scan", "Relation Name":"tbl3"\n ' def get_relations(plan): if ('Relation Name' not in plan): if ('Plans' in plan): __plan = plan['Plans'] elif ('Plan' in plan): __plan = plan['Plan'] else: return if isinstance(__plan, list): __outer_plan = __plan[0] __inner_plan = (__plan[1] if (2 <= len(__plan)) else None) if (__inner_plan is None): if ('Relation Name' in __outer_plan): plan.update([('Relation Name', __outer_plan['Relation Name'])]) if ('Schema' in __outer_plan): plan.update([('Schema', __outer_plan['Schema'])]) if ('Alias' in __outer_plan): plan.update([('Alias', __outer_plan['Alias'])]) else: if (('Relation Name' in __outer_plan) and ('Relation Name' in __inner_plan)): plan.update([('Relation Name', [__outer_plan['Relation Name'], __inner_plan['Relation Name']])]) if (('Schema' in __outer_plan) and ('Schema' in __inner_plan)): plan.update([('Schema', [__outer_plan['Schema'], __inner_plan['Schema']])]) if (('Alias' in __outer_plan) and ('Alias' in __inner_plan)): plan.update([('Alias', [__outer_plan['Alias'], __inner_plan['Alias']])]) else: if ('Relation Name' in __plan): plan.update([('Relation Name', __plan['Relation Name'])]) if ('Schema' in __plan): plan.update([('Schema', __plan['Schema'])]) if ('Alias' in __plan): plan.update([('Alias', __plan['Alias'])]) def incr(plan): if ('Node Type' in plan): self._count += 1 def op(Plans): if isinstance(Plans, list): for i in range(0, len(Plans)): incr(Plans[i]) if (self._depth == self._count): get_relations(Plans[i]) return elif ('Plans' in Plans[i]): op(Plans[i]['Plans']) else: incr(Plans) if (self._depth == self._count): get_relations(Plans) return elif ('Plans' in Plans): op(Plans['Plans']) self._depth = depth self._count = 0 op(Plans) def __add_relations(self, Plans): '\n Add "Relation Name"\n ' i = self.count_nodes(Plans) while (0 < i): self.__set_relations(Plans['Plan'], i) i -= 1 def __regression(self, Plans, reg_param, queryid, planid): '\n Calculate the regression parameters of Plans, and Set the results into\n reg_param.\n\n Parameters\n ----------\n Plans : dict\n A plan grouped with the same queryid-planid.\n reg_param : dict\n A dict type skeleton with the same structure as Plans.\n queryid : int\n planid : int\n\n Returns\n -------\n reg_param: dict\n A dict which contains the regression parameter in each node.\n\n ' def incr(plan): if ('Node Type' in plan): self._count += 1 def op(Plans, reg_param, queryid, planid): if isinstance(Plans, list): for i in range(0, len(Plans)): incr(Plans[i]) self.__calc_regression(Plans[i], reg_param[i], queryid, planid, self._count) if ('Plans' in Plans[i]): op(Plans[i]['Plans'], reg_param[i]['Plans'], queryid, planid) return else: incr(Plans) self.__calc_regression(Plans, reg_param, queryid, planid, self._count) if ('Plans' in Plans): op(Plans['Plans'], reg_param['Plans'], queryid, planid) return self._count = 0 op(Plans, reg_param, queryid, planid) def __get_sort_space_used(self, Plans, queryid, planid): '\n Get and return the max "Sort Space Used" value if "Sort Space Type" is "Disk"\n\n Parameters\n ----------\n Plans : dict\n A plan grouped with the same queryid-planid.\n queryid : int\n planid : int\n\n Returns\n -------\n Max "Sort Space Used" value.\n\n ' def pickup_sort_space_used(plan, queryid, planid): if ('Sort Space Type' in plan): _type = plan['Sort Space Type'] _used = plan['Sort Space Used'] for i in range(len(_type)): if (_type[i] == 'Disk'): if (self._max_sort_space_used < _used[i]): self._max_sort_space_used = _used[i] def op(Plans, queryid, planid): if isinstance(Plans, list): for i in range(0, len(Plans)): pickup_sort_space_used(Plans[i], queryid, planid) if ('Plans' in Plans[i]): op(Plans[i]['Plans'], queryid, planid) return else: pickup_sort_space_used(Plans, queryid, planid) if ('Plans' in Plans): op(Plans['Plans'], queryid, planid) return self._max_sort_space_used = 0 op(Plans, queryid, planid) return (None if (self._max_sort_space_used == 0) else self._max_sort_space_used) '\n Public method\n ' def regression(self, serverId, work_mem=True): "\n Calculate the regression parameters of all serverId's query plans\n in the repository.\n " if (self.check_serverId(serverId) == False): if (Log.error <= self.LogLevel): print("Error: serverId '{}' is not registered.".format(serverId)) sys.exit(1) self.__set_serverId(serverId) self.set_log_level(self.LogLevel) if (Log.info <= self.LogLevel): print('Info: Calculating regression parameters.') '\n Check the grouping stat file.\n ' _grouping_seqid = self.get_seqid_from_grouping_stat(self.ServerId) '\n Check the regression stat file.\n ' self.check_regression_dir(self.ServerId) _regression_seqid = self.get_seqid_from_regression_stat(self.ServerId) if (Log.debug3 <= self.LogLevel): print('Debug3: _grouping_seqid={} _regression_seqid={}'.format(_grouping_seqid, _regression_seqid)) '\n Calculate the regression parameters.\n ' if (_regression_seqid < _grouping_seqid): for _hash_subdir in self.get_grouping_dir_list(self.ServerId): _gsdirpath = self.get_grouping_subdir_path(self.ServerId, _hash_subdir) if os.path.isdir(_gsdirpath): _gsdirlist = self.get_grouping_subdir_list(self.ServerId, _hash_subdir) for f in _gsdirlist: _gpath = self.path(_gsdirpath, f) _qp_id = str(f).split('.') _queryid = _qp_id[0] _planid = _qp_id[1] if (Log.debug3 <= self.LogLevel): print('Debug3: >>>>>> gpath={}'.format(_gpath)) _json_dict = self.read_plan_json(_gpath) _reg_param = self.read_plan_json(_gpath) self.__add_relations(_reg_param) self.delete_unnecessary_objects(self.__delete_objects, _reg_param) '\n Calculate the regression parameters in each plan\n and Store into _reg_param.\n ' self.__init_level() self.__regression(_json_dict['Plan'], _reg_param['Plan'], _queryid, _planid) '\n Add "Sort Space Used" item if "Sort Space Type" is "Disk".\n ' if (work_mem == True): self.__init_level() _max_sort_space_used = self.__get_sort_space_used(_json_dict['Plan'], _queryid, _planid) if (_max_sort_space_used is not None): _reg_param.update({'SortSpaceUsed': _max_sort_space_used}) '\n Write the result (regression parameters) to the regression\n directory.\n ' _rsdirpath = self.get_regression_subdir_path(self.ServerId, _hash_subdir) if (os.path.exists(_rsdirpath) == False): os.makedirs(_rsdirpath) _rpath = self.path(_rsdirpath, f) self.write_plan_json(_reg_param, _rpath) if (Log.debug3 <= self.LogLevel): print('Debug3: Rpath={}'.format(_rpath)) print('Debug3: reg_param={}'.format(_reg_param)) 'Update stat file' self.update_regression_stat_file(self.ServerId, _grouping_seqid)
class Replace(Common): def __init__(self, log_level=Log.info): self.__numNode = 0 self.__ar = [] self.LogLevel = log_level def __calc(self, plan, param, queryid, planid, depth): def get_children_plan_rows(plan): _X = [[], []] _NP = [[], []] _NPP = [[], []] for i in range(0, 2): p = plan['Plans'][i] k = (0 if (p['Parent Relationship'] == 'Outer') else 1) _X[k] = p['Plan Rows'] _NP[k] = p['NormalizeParam'] _NPP[k] = p['NormalizePlanParam'] return (_X[0], _X[1], _NP[0], _NP[1], _NPP[0], _NPP[1]) def get_inputs(plan): (_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner) = get_children_plan_rows(plan) if (Log.debug3 <= self.LogLevel): print('Debug3: Xouter={} Xinner={} normalize({}, {}) normalizePlanPalam({}, {})'.format(_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner)) _RR = self.count_removed_rows(plan) return (_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner, _RR) '\n Start processing\n ' _node_type = plan['Node Type'] if (Log.debug1 <= self.LogLevel): print('Debug1: count={} depth={} Node Type={}'.format(self._count, self._depth, plan['Node Type'])) '\n nested loop type\n ' for n in ('Append', 'Merge Append', 'Recursive Union', 'Nested Loop', 'BitmapAnd', 'BitmapOr'): if (n == _node_type): (_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner, _RR) = get_inputs(plan) '\n Correct Plan Rows using the regression parameter\n and the EstimatedRows of both inner and outer nodes.\n ' if (Log.debug1 <= self.LogLevel): print('Debug1: Y ActualRows={} NormalizeParam={}'.format(plan['Actual Rows'], plan['NormalizeParam'])) print('Debug1: Xouter ={} NormalizeParam={}'.format(_Xouter, _NPouter)) print('Debug1: Xinner ={} NormalizeParam={}'.format(_Xinner, _NPinner)) _EstimatedRows = round(((param['Coefficient'][0] * _Xouter) * _Xinner)) if (Log.debug1 <= self.LogLevel): print('Debug1: EstimatedRows({}) = Coef({}) * Xouter({}) * Xinner({})'.format(_EstimatedRows, param['Coefficient'][0], _Xouter, _Xinner)) plan.update(Coefficient=param['Coefficient'][0]) plan.update({'Plan Rows': _EstimatedRows}) if (Log.debug1 <= self.LogLevel): plan.update(OriginalPlanRows=plan['Plan Rows']) return '\n hash or merge join\n ' for n in ('Merge Join', 'Hash Join'): if (n == _node_type): (_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner, _RR) = get_inputs(plan) '\n Correct Plan Rows using the regression parameter\n and the EstimatedRows of both inner and outer nodes.\n ' if (Log.debug1 <= self.LogLevel): print('Debug1: Y ActualRows={} NormalizeParam={}'.format(plan['Actual Rows'], plan['NormalizeParam'])) print('Debug1: Xouter ={} NormalizeParam={}'.format(_Xouter, _NPouter)) print('Debug1: Xinner ={} NormalizeParam={}'.format(_Xinner, _NPinner)) if ((param['Coefficient'][0] == 0) and (param['Coefficient'][1] == 0)): _EstimatedRows = round((((param['Coefficient2'][0] * _Xouter) * _Xinner) + param['Intercept'][0])) plan.update(Coefficient=[0, 0]) plan.update(Coefficient2=param['Coefficient2'][0]) plan.update(Intercept=param['Intercept']) if (Log.debug1 <= self.LogLevel): print('Debug1: EstimatedRows({}) = Coef({}) * Xouter({}) * Xinner({}) + {}'.format(_EstimatedRows, param['Coefficient2'][0], _Xouter, _Xinner, param['Intercept'])) else: _EstimatedRows = round((((param['Coefficient'][0] * _Xouter) + (param['Coefficient'][1] * _Xinner)) + param['Intercept'][0])) plan.update(Coefficient=param['Coefficient']) plan.update(Coefficient2=0) plan.update(Intercept=param['Intercept']) if (Log.debug1 <= self.LogLevel): print('Debug1: EstimatedRows({}) = Coef({}) * Xouter({}) + Coef({}) * Xinner({}) + {}'.format(_EstimatedRows, param['Coefficient'][0], _Xouter, param['Coefficient'][1], _Xinner, param['Intercept'])) plan.update({'Plan Rows': _EstimatedRows}) if (Log.debug1 <= self.LogLevel): plan.update(OriginalPlanRows=plan['Plan Rows']) return '\n scan type\n ' "Calculate 'EstimatedRows' using the regression parameter." if ('Plan Rows' in plan): _EstimatedRows = ((param['Coefficient'][0] * plan['Plan Rows']) + param['Intercept'][0]) _EstimatedRows = round(((_EstimatedRows * plan['NormalizePlanParam']) / plan['NormalizeParam'])) if (Log.debug1 <= self.LogLevel): print('Debug1: EstimatedRows({}) = [Coef({}) * PlanRows({}) + Intercept({})] * NormalizePlan({}) / Normalize({})'.format(_EstimatedRows, param['Coefficient'][0], plan['Plan Rows'], param['Intercept'][0], plan['NormalizePlanParam'], plan['NormalizeParam'])) plan.update(Coefficient=param['Coefficient'][0]) plan.update({'Plan Rows': _EstimatedRows}) if (Log.debug1 <= self.LogLevel): plan.update(OriginalPlanRows=plan['Plan Rows']) return def __replace(self, Plans, Reg_Params, depth, queryid, planid): "\n In the depth-th node of Plans, Replace the 'Plan Rows' of original Plans\n with the estimated 'Plan Rows' using the regression params:Reg_Params.\n " def incr(plan): if ('Node Type' in plan): self._count += 1 def op(Plans, Reg_Params, queryid, planid): if isinstance(Plans, list): for i in range(0, len(Plans)): incr(Plans[i]) if (self._depth == self._count): self.__calc(Plans[i], Reg_Params[i], queryid, planid, self._count) return elif ('Plans' in Plans[i]): op(Plans[i]['Plans'], Reg_Params[i]['Plans'], queryid, planid) else: incr(Plans) if (self._depth == self._count): self.__calc(Plans, Reg_Params, queryid, planid, self._count) return elif ('Plans' in Plans): op(Plans['Plans'], Reg_Params['Plans'], queryid, planid) self._depth = depth self._count = 0 op(Plans, Reg_Params, queryid, planid) '\n Public method\n ' def replace_plan_rows(self, Plans, Reg_Params, numNode, queryid, planid): "\n Replace the 'Plan Rows' of original Plans with the estimated 'Plan Rows'\n using the regression parameters:Reg_Params.\n\n Parameters\n ----------\n Plans : dict\n A Plans\n\n Reg_Params : dict\n A dict that stores the regression parameters of Plans.\n\n numNode : int\n The number of nodes of Plans.\n\n queryid : int\n planid : int\n " "\n 'Plan Rows' of the Plans are estimated and replaced from the bottom\n of the Plans to the top.\n\n If the node type is like Scan, e.g., Seq Scan and Index Only Scan,\n the 'Plan Rows' is replaced with the estimated 'Plan Rows' which is\n calculated using the corresponding regression parameters.\n\n Otherwise, that is, the node type is like Join, e.g., Nested Loop and\n Hash Join, the 'Plan Rows' is calculated using the corresponding regression\n parameters and the estimated 'Plan Rows' of both inner and outer nodes.\n\n The simplest example is shown below:\n\n This example plan is a nested loop join. The outer and inner nodes of\n the nested loop are Seq Scan and Index Only Scan, respectively.\n The 'Plan Rows' generated by the optimizer and the regression parameters\n created by repo_mgr.py of all nodes are shown below:\n\n ```\n Nested Loop (Plan Rows = 1) (Reg_Params[Node1][coef] = 0.05)\n -> Outer:Seq Scan (Plan Rows = 3) (Reg_Params[Node2][coef] = 2.0)\n -> Inner:Index Only Scan (Plan Rows = 10) (Reg_Params[Node3][coef] = 1.0)\n ```\n\n In the following, how to replace 'Plan Rows' are shown.\n\n (1) Estimate the inner rows.\n The estimated inner rows can be calculated as follows:\n estimated inner Plan Rows = Reg_Params[Node3][coef] * Inner Plan Rows\n = 1.0 * 10\n = 10\n In this case, the Plan Rows and the estimated Plan Rows are equal, so no need to replace.\n ```\n Nested Loop (Plan Rows = 1)\n -> Outer:Seq Scan (Plan Rows = 3)\n -> Inner:Index Only Scan (Plan Rows = 10) (Reg_Params[Node3][coef] = 1.0)\n ```\n\n (2) Estimate the outer rows.\n The estimated outer row can be calculated as follows:\n estimated outer Plan Rows = Reg_Params[Node2][coef] * Inner Plan Rows\n = 2.0 * 3\n = 6\n Then, replace the original 'Plan Rows' with the estimated one.\n ```\n Nested Loop (Plan Rows = 1)\n -> Outer:Seq Scan (Plan Rows = 6) (Reg_Params[Node2][coef] = 2.0)\n -> Inner:Index Only Scan (Plan Rows = 10)\n ```\n\n (3) Estimate the nested loop's plan rows.\n The estimated nested loop's rows can be calculated as follows:\n estimated nested loop's Rows = Reg_Params[Node1][coef] * 'estimated outer Plan Rows' * 'estimated inner Plan Rows'\n = 0.05 * 6 * 10\n = 3\n Then, replace the original 'Plan Rows' with the estimated one.\n ```\n Nested Loop (Plan Rows = 3) (Reg_Params[Node1][coef] = 0.05)\n -> Outer:Seq Scan (Plan Rows = 6)\n -> Inner:Index Only Scan (Plan Rows = 10)\n ```\n " i = numNode if (Log.debug1 <= self.LogLevel): print('Debug1: >>> Start replace') while (0 < i): if (Log.debug1 <= self.LogLevel): print('Debug1: >>> replace i = {}'.format(i)) self.__replace(Plans, Reg_Params, i, queryid, planid) i -= 1
class Repository(Common): def __init__(self, base_dir='.', log_level=Log.error): self.set_base_dir(base_dir) self.LogLevel = log_level DEFAULT_DIR_MODE = 504 DEFAULT_HOSTS_CONF_MODE = 416 def secure_check(self, path, ref_mode): if (os.path.exists(path) == False): if (Log.notice <= self.LogLevel): print("Notice: '{}' is not found.".format(path)) return True MASK = int(511) _mode = int((os.stat(path).st_mode & MASK)) _mode |= ref_mode _mode ^= ref_mode return (True if (int(_mode) == 0) else False) '\n stat file related functions.\n ' def __create_stat_file(self, serverId, path): stat = configparser.ConfigParser() stat[serverId] = {'seqid': '0'} with open(path, 'w') as configfile: stat.write(configfile) def __update_stat_file(self, serverId, max_seqid, _dir): _dirpath = self.dirpath([serverId, _dir]) _path = self.path(_dirpath, self.STAT_FILE) stat = configparser.ConfigParser() stat[serverId] = {'seqid': max_seqid} with open(_path, 'w') as configfile: stat.write(configfile) def __get_seqid_from_stat_file(self, serverId, _dir): _dirpath = self.dirpath([serverId, _dir]) _path = self.path(_dirpath, self.STAT_FILE) if os.path.exists(_dirpath): stat = configparser.ConfigParser() stat.read(_path) if stat[serverId]: return int(stat[serverId]['seqid']) else: return int(0) '\n Check and create directory if not found.\n ' def __check_dir(self, serverId, _dir, additional_dir_list): _dirpath = self.dirpath([serverId]) if (os.path.exists(_dirpath) == False): os.mkdir(_dirpath, self.DEFAULT_DIR_MODE) _dirpath = self.dirpath([serverId, _dir]) _path = self.path(_dirpath, self.STAT_FILE) if (os.path.exists(_dirpath) == False): os.mkdir(_dirpath, self.DEFAULT_DIR_MODE) for d in additional_dir_list: os.mkdir((_dirpath + d), self.DEFAULT_DIR_MODE) self.__create_stat_file(serverId, _path) def __reset_dir(self, serverId, _dir, update_stat_file): if (self.check_serverId(serverId) == False): if (Log.error <= self.LogLevel): print("Error: serverId '{}' is not registered.".format(serverId)) sys.exit(1) _rsdirpath = self.dirpath([serverId, _dir]) if os.path.exists(_rsdirpath): if (_dir == self.TABLES_DIR): if (Log.debug2 <= self.LogLevel): print("Debug2: rm dir '{}'".format(_rsdirpath)) shutil.rmtree(_rsdirpath) else: _d = ((str(_rsdirpath) + '/') + '[0-9][0-9][0-9]') _dirs = glob.glob(_d, recursive=True) for _dir in _dirs: if (Log.debug2 <= self.LogLevel): print("Debug2: rm '{}'".format(_dir)) shutil.rmtree(_dir) update_stat_file(serverId, 0) '\n Public methods\n ' def set_base_dir(self, base_dir='.'): self.base_dir = (base_dir + '/') def get_conf_file_path(self): _path = (((self.base_dir + self.REPOSITORY_DIR) + '/') + self.CONF_FILE) if os.path.exists(_path): if (self.secure_check(_path, self.DEFAULT_HOSTS_CONF_MODE) == False): print("Error: {}'s mode should be set to {} or more secure.".format(self.CONF_FILE, oct(self.DEFAULT_HOSTS_CONF_MODE))) sys.exit(1) return _path def check_serverId(self, serverId): if (self.is_serverId_valid(serverId) == False): if (Log.error <= self.LogLevel): print("Error: serverId='{}' is invalid.".format(serverId)) print('\tserverId must be the following regular expression:[A-z0-9_]+') sys.exit(1) _path = self.get_conf_file_path() config = configparser.ConfigParser() config.read(_path) return config.has_section(serverId) def get_serverId(self, host, port): _path = self.get_conf_file_path() _config = configparser.ConfigParser() _config.read(_path) _ret = None for section in _config.sections(): if (('host' in _config[section]) and ('port' in _config[section])): if ((_config[section]['host'] == host) and (_config[section]['port'] == port)): _ret = section break return _ret def dirpath(self, dirlist): _dir = ((self.base_dir + self.REPOSITORY_DIR) + '/') if isinstance(dirlist, list): for d in dirlist: _dir += (d + '/') else: _dir += (dirlist + '/') return _dir def path(self, dirpath, filename): return (dirpath + filename) '\n top dir\n ' def create_repo(self): 'Create a repository.' def create_conf_file(path): config = configparser.ConfigParser() config['server_1'] = {'host': 'localhost', 'port': '5432', 'username': 'postgres', 'input_password': 'false', 'password': ''} config['server_2'] = {} with open(path, 'w') as configfile: config.write(configfile) if os.path.exists((self.base_dir + self.REPOSITORY_DIR)): print("Error: directory '{}' already exists.".format((self.base_dir + self.REPOSITORY_DIR))) sys.exit(1) if (os.path.exists(self.base_dir) == False): os.mkdir(self.base_dir) os.mkdir((self.base_dir + self.REPOSITORY_DIR), mode=self.DEFAULT_DIR_MODE) _conf_file_path = self.get_conf_file_path() create_conf_file(_conf_file_path) os.chmod(_conf_file_path, self.DEFAULT_HOSTS_CONF_MODE) def is_serverId_valid(self, serverId): return (True if (re.search('\\w+', serverId, flags=0).group() == serverId) else False) def check_host_conf_file(self): _path = self.get_conf_file_path() print('Checking hosts.conf mode....') if (self.secure_check(_path, self.DEFAULT_HOSTS_CONF_MODE) == True): print('\tReport: {} is secure.'.format(self.CONF_FILE)) else: print("\tError: {}'s mode should be set to {} or more secure.".format(self.CONF_FILE, oct(self.DEFAULT_HOSTS_CONF_MODE))) print('Checking serverIds....') _config = configparser.ConfigParser() _config.read(_path) _ret = True for s in _config.sections(): if (self.is_serverId_valid(s) == False): print("\tError: serverId '{}' is invalid name.".format(s)) _ret = False if (_ret == True): print('\tReport: All serverIds are valid.') def check_dirs(self): print('Checking directories....') _path = ((self.base_dir + self.REPOSITORY_DIR) + '/') _dirlist = os.listdir(_path) for _dir in _dirlist: _dirpath = (_path + _dir) if os.path.isdir(_dirpath): if (self.secure_check(_dirpath, self.DEFAULT_DIR_MODE) == True): print('\tReport: {} is secure.'.format(_dirpath)) else: print("\tError: {}'s mode should be set to {} or more secure.".format(_dirpath, oct(self.DEFAULT_DIR_MODE))) for subdir in (self.TABLES_DIR, self.GROUPING_DIR, self.REGRESSION_DIR, self.FORMATTED_REGRESSION_PARAMS_DIR): _subdirpath = ((_dirpath + '/') + subdir) if (self.secure_check(_subdirpath, self.DEFAULT_DIR_MODE) == True): print('\tReport: {} is secure.'.format(_subdirpath)) else: print("\tError: {}'s mode should be set to {} or more secure.".format(_subdirpath, oct(self.DEFAULT_DIR_MODE))) def rename_serverId(self, old_serverId, new_serverId): def mv_dir(old_serverId, new_serverId): _dirpath = ((self.base_dir + self.REPOSITORY_DIR) + '/') os.rename((_dirpath + old_serverId), (_dirpath + new_serverId)) if (self.is_serverId_valid(new_serverId) == False): print("Error: new serverId '{}' is invalid name.".format(new_serverId)) sys.exit(1) if (self.check_serverId(old_serverId) == False): print("Error: old serverId '{}' does not exit.".format(old_serverId)) (sys, exit(1)) mv_dir(old_serverId, new_serverId) _conf_path = self.get_conf_file_path() _conf_tmp_path = (_conf_path + '.tmp') os.rename(_conf_path, _conf_tmp_path) try: fp_conf = open(_conf_path, mode='w') with open(_conf_tmp_path, mode='r') as fp_conf_tmp: for _line in fp_conf_tmp: if (str((('[' + old_serverId) + ']')) in _line): _line = str(((('[' + new_serverId) + ']') + '\n')) fp_conf.write(_line) os.remove(_conf_tmp_path) except Exception as e: os.rename(_conf_tmp_path, _conf_path) mv_dir(new_serverId, old_serverId) print(e) print('Error: Could not rename serverId.') finally: os.chmod(_conf_path, self.DEFAULT_HOSTS_CONF_MODE) fp_conf.close() def remove_serverId(self, serverId): def rm_dir(serverId): _dirpath = (((self.base_dir + self.REPOSITORY_DIR) + '/') + serverId) if os.path.exists(_dirpath): shutil.rmtree(_dirpath) if (Log.debug1 <= self.LogLevel): print('Debug1: Deleted {}.'.format(_dirpath)) return True else: print('Debug1: {} Not Found.'.format(_dirpath)) return False if (self.check_serverId(serverId) == False): print("Error: serverId '{}' does not exit.".format(serverId)) (sys, exit(1)) return rm_dir(serverId) def show_hosts(self, verbose): _path = self.get_conf_file_path() _config = configparser.ConfigParser() _config.read(_path) print('ServerId:') for section in _config.sections(): if ('host' in _config[section]): print('\t{}'.format(section)) if (verbose == True): print('\t\thost = {}'.format(_config[section]['host'])) if ('port' in _config[section]): print('\t\tport = {}'.format(_config[section]['port'])) if ('username' in _config[section]): print('\t\tusername = {}'.format(_config[section]['username'])) '\n tables subdir\n ' def update_tables_stat_file(self, serverId, max_seqid): self.__update_stat_file(serverId, max_seqid, self.TABLES_DIR) def get_seqid_from_tables_stat(self, serverId): return self.__get_seqid_from_stat_file(serverId, self.TABLES_DIR) def check_tables_dir(self, serverId): self.__check_dir(serverId, self.TABLES_DIR, []) def reset_tables_dir(self, serverId): self.__reset_dir(serverId, self.TABLES_DIR, self.update_tables_stat_file) def get_log_csv_path(self, serverId): _csvdirpath = self.dirpath([serverId, self.TABLES_DIR]) return self.path(_csvdirpath, self.TABLES_FILE) def get_query_dir_path(self, serverId, queryid): return self.dirpath([serverId, self.TABLES_DIR, self.TABLES_QUERY_DIR, self.hash_dir(queryid), str(queryid)]) def get_plan_dir_path(self, serverId, queryid, planid): return self.dirpath([serverId, self.TABLES_DIR, self.TABLES_PLAN_DIR, self.hash_dir(planid), ((str(queryid) + '.') + str(planid))]) def get_plan_json_dir_path(self, serverId, queryid, planid): return self.dirpath([serverId, self.TABLES_DIR, self.TABLES_PLAN_JSON_DIR, self.hash_dir(planid), ((str(queryid) + '.') + str(planid))]) def get_plan_json_path(self, serverId, seqid, queryid, planid): _logdirpath = self.get_plan_json_dir_path(serverId, queryid, planid) return self.path(_logdirpath, str(seqid)) def get_query(self, serverId, queryid): 'Get Query by queryid' _dirpath = self.dirpath([serverId, self.TABLES_DIR, self.TABLES_QUERY_DIR, self.hash_dir(int(queryid)), str(queryid)]) _files = glob.glob((_dirpath + '[0-9]*')) for _qf in _files: _seqid_file = _qf.split('/')[(- 1)] with open(_qf) as fp: _query = fp.read() with open(self.get_log_csv_path(self.ServerId), newline='') as f: _reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE) for _row in _reader: _seqid = int(_row[0]) _database = str(_row[3]) _planid = int(_row[7]) if (int(_seqid_file) == _seqid): return (_database, _query, _planid) return (None, None, None) '\n grouping subdir\n ' def update_grouping_stat_file(self, serverId, max_seqid): self.__update_stat_file(serverId, max_seqid, self.GROUPING_DIR) def get_seqid_from_grouping_stat(self, serverId): return self.__get_seqid_from_stat_file(serverId, self.GROUPING_DIR) def check_grouping_dir(self, serverId): self.__check_dir(serverId, self.GROUPING_DIR, []) def reset_grouping_dir(self, serverId): self.__reset_dir(serverId, self.GROUPING_DIR, self.update_grouping_stat_file) def get_grouping_plan_dir_path(self, serverId, planid): return self.dirpath([str(serverId), self.GROUPING_DIR, self.hash_dir(planid)]) def get_grouping_plan_path(self, serverId, queryid, planid): return self.path(self.get_grouping_plan_dir_path(serverId, planid), ((str(queryid) + '.') + str(planid))) def get_grouping_dir_path(self, serverId): return self.dirpath([serverId, self.GROUPING_DIR]) def get_grouping_dir_list(self, serverId): return os.listdir(self.dirpath([serverId, self.GROUPING_DIR])) def get_grouping_subdir_path(self, serverId, subdir): return self.dirpath([serverId, self.GROUPING_DIR, subdir]) def get_grouping_subdir_list(self, serverId, subdir): return os.listdir(self.dirpath([serverId, self.GROUPING_DIR, subdir])) '\n regression subdir\n ' def update_regression_stat_file(self, serverId, max_seqid): self.__update_stat_file(serverId, max_seqid, self.REGRESSION_DIR) def get_seqid_from_regression_stat(self, serverId): return self.__get_seqid_from_stat_file(serverId, self.REGRESSION_DIR) def check_regression_dir(self, serverId): self.__check_dir(serverId, self.REGRESSION_DIR, []) def reset_regression_dir(self, serverId): self.__reset_dir(serverId, self.REGRESSION_DIR, self.update_regression_stat_file) def get_regression_subdir_path(self, serverId, subdir): return self.dirpath([serverId, self.REGRESSION_DIR, subdir]) def get_regression_param(self, serverId, queryid, planid): _key = ((str(queryid) + '.') + str(planid)) _pathdir = self.dirpath([serverId, self.REGRESSION_DIR, self.hash_dir(planid)]) _path = self.path(_pathdir, _key) if os.path.exists(_path): return self.read_plan_json(_path) else: return None '\n formatted regression parameter subdir\n ' def check_formatted_regression_params_dir(self, serverId): self.__check_dir(serverId, self.FORMATTED_REGRESSION_PARAMS_DIR, []) def get_formatted_regression_params_subdir_path(self, serverId): return self.dirpath([serverId, self.FORMATTED_REGRESSION_PARAMS_DIR]) def truncate_formatted_regression_params(self, serverId): _dir = self.get_formatted_regression_params_subdir_path(serverId) for _file_name in os.listdir(_dir): os.remove(((str(_dir) + '/') + str(_file_name))) def write_formatted_regression_params(self, serverId, queryid, param): _dir = self.get_formatted_regression_params_subdir_path(serverId) with open(((str(_dir) + '/') + str(queryid)), mode='w') as _fp: _fp.write(param) def check_formatted_regression_params(self, serverId, queryid): _dir = self.get_formatted_regression_params_subdir_path(serverId) for _file in os.listdir(_dir): if (str(_file) == str(queryid)): return True return False
class Rules(Common): def __init__(self, log_level=Log.error): self.LogLevel = log_level '\n All rules are heuristics; there is no theoretical background.\n ' def __rule1(self, plan): if ((plan['CurrentState'] == State.RUNNING) and (plan['Node Type'] == 'Hash Join') and ('Join Filter' in plan)): if (plan['Plan Rows'] <= plan['Actual Rows']): for p in plan['Plans']: p['CurrentState'] = State.FINISHED def __rule2(self, plan): if ((((plan['CurrentState'] == State.RUNNING) and (plan['Node Type'] == 'Materialize')) or (plan['Node Type'] == 'Hash')) and ((plan['Actual Loops'] > 0) or (plan['Actual Rows'] > 0) or (plan['MergeFlag'] == 'True'))): plan['CurrentState'] = State.FINISHED def __rule3(self, plan): if ((plan['CurrentState'] == State.RUNNING) and self.isScan(plan) and (self.isInner(plan) and (plan['Actual Loops'] > 0))): plan['CurrentState'] = State.FINISHED def __rule4(self, plan): if ((plan['CurrentState'] == State.RUNNING) and self.isScan(plan) and self.isOuter(plan) and (plan['Plan Rows'] <= plan['Actual Rows'])): plan['CurrentState'] = State.FINISHED def __rule5(self, plan): if ((plan['CurrentState'] == State.RUNNING) and self.isScan(plan) and ((self.isOuter(plan) == False) and (self.isInner(plan) == False))): plan['CurrentState'] = State.FINISHED def __rule6(self, plan): if ((plan['CurrentState'] == State.RUNNING) and ((plan['Node Type'] == 'Hash Join') or (plan['Node Type'] == 'Merge Join')) and ('Join Filter' not in plan)): if ((plan['Plan Rows'] * 5) < plan['Actual Rows']): for p in plan['Plans']: if (p['Parent Relationship'] == 'Outer'): plan['Plan Rows'] = p['Plan Rows'] def __op(self, Plans): if isinstance(Plans, list): for plan in Plans: for r in self.rules: r(plan) if ('Plans' in plan): self.__op(plan['Plans']) return else: for r in self.rules: r(Plans) if ('Plans' in Plans): self.__op(Plans['Plans']) return '\n Public method\n ' def apply_rules(self, plans): "\n Apply the rules to 'plans' if there is no regression params.\n (if there are regression params, this method is skipped in query_progress.py.)\n\n You can add or remove rules to suit your environment.\n " self.rules = [self.__rule1, self.__rule2, self.__rule3, self.__rule4, self.__rule5, self.__rule6] self.__op(plans)
class NN(): def __init__(self, base_dir='.', log_level=Log.error): self.ServerId = '' self.LogLevel = log_level 'For Neural Net' NN_DIR = 'nn' NN_THRESHOLD = 3 def store_params(self, serverId, queryid, planid, depth, node_type, xouter, xinner, y): _keys = ['Depth', 'Node Type', 'Xouter', 'Xinner', 'Y'] _values = [depth, node_type, xouter, xinner, y] _dict = dict(zip(_keys, _values)) _pathdir = (((self.NN_DIR + '/') + serverId) + '/') if (os.path.exists(_pathdir) == False): os.makedirs(_pathdir, self.DEFAULT_DIR_MODE) _path = self.path(_pathdir, ((((str(queryid) + '.') + str(planid)) + '.') + str(depth))) self.write_plan_json(_dict, _path)
class ExtendedStatistics(): def __init__(self, base_dir='.', log_level=Log.error): self.ServerId = '' self.LogLevel = log_level ES_FILE_PREFIX = 'es_' ES_THRESHOLD = 10 COND_LIST = ('Index Cond', 'Recheck Cond', 'TID Cond', 'Merge Cond', 'Hash Cond', 'Filter', 'Join Filter') def __check_conds(self, conds): _ret = [] for _cond in conds: if (re.search('>|<', _cond) == None): _ret.append(_cond) return (list(set(_ret)) if (len(_ret) > 0) else None) def check_es(self, plan, queryid, planid, depth): def check_conds(plan): _ret = None for n in self.COND_LIST: if (n in plan): _ret = self.__check_conds(plan[n]) return _ret _ret = None for i in range(0, len(plan['Actual Rows'])): _actual_rows = plan['Actual Rows'][i] _plan_rows = plan['Plan Rows'][i] if ((((_actual_rows * self.ES_THRESHOLD) < _plan_rows) or ((_plan_rows * self.ES_THRESHOLD) < _actual_rows)) and ((0 < _actual_rows) and (0 < _plan_rows))): _ret = check_conds(plan) if (_ret != None): return _ret if ('Plans' in plan): if isinstance(plan, list): for k in range(0, 2): _ret = check_conds(plan['Plans'][k]) if (_ret != None): return _ret else: _ret = check_conds(plan['Plans'][0]) if (_ret != None): return _ret return _ret
class Histogram(): def __init__(self, base_dir='.', log_level=Log.error): self.ServerId = '' self.LogLevel = log_level HIST_FILE_PREFIX = 'hist_' HIST_THRESHOLD = 0.025 DIFF_THRESHOLD = 0.001 RANGE_THRESHOLD = 1.75 COND_LIST = ('Index Cond', 'Recheck Cond', 'TID Cond', 'Merge Cond', 'Hash Cond', 'Filter', 'Join Filter') def __get_bounds(self, y): return (int(((1 - self.HIST_THRESHOLD) * y)), int(((1 + self.HIST_THRESHOLD) * y))) def __min_max(self, y): _max = 0 for i in range(0, len(y)): if (_max < y[i]): _max = y[i] _min = _max for i in range(0, len(y)): if (y[i] < _min): _min = y[i] return (_min, _max) def check_histogram(self, plan, queryid, planid, depth): def check_conds(conds): _ret = [] for _cond in conds: _ret.append(_cond) return (list(set(_ret)) if (len(_ret) > 0) else None) def create_item(_dict_list, y, x, _lower, _upper): _d = {'y': [y], 'y_lower': _lower, 'y_upper': _upper, 'x': [x], 'x_lower': x, 'x_upper': x} _dict_list.append(_d) def append_data(_dict_list, y, x): (_lower, _upper) = self.__get_bounds(y) if (len(_dict_list) == 0): create_item(_dict_list, y, x, _lower, _upper) return else: for _dict in _dict_list: if ((_dict['y_lower'] <= y) and (y <= _dict['y_upper'])): _dict['y'].append(y) _dict['x'].append(x) if (x < _dict['x_lower']): _dict['x_lower'] = x if (_dict['x_upper'] < x): _dict['x_upper'] = x return create_item(_dict_list, y, x, _lower, _upper) return _dict_list = [] _ret = None if ('Plan Rows' in plan): _plan_rows = plan['Plan Rows'] _actual_rows = plan['Actual Rows'] for i in range(0, len(_plan_rows)): append_data(_dict_list, _plan_rows[i], _actual_rows[i]) for i in range(0, len(_dict_list)): _dict = _dict_list[i] _y = _dict['y'] _x = _dict['x'] if (len(_y) > 1): (_y_lower, _y_upper) = self.__min_max(_y) _x_lower = _dict['x_lower'] _x_upper = _dict['x_upper'] _d_y = (_y_upper - _y_lower) _d_x = (_x_upper - _x_lower) if ((_x_upper <= (_x_lower * self.RANGE_THRESHOLD)) or (_d_x == 0)): continue if ((_d_y / _d_x) < self.DIFF_THRESHOLD): for n in self.COND_LIST: if (n in plan): _ret = check_conds(plan[n]) return _ret
class Analyze(Repository, ExtendedStatistics, NN, Histogram): def __init__(self, base_dir='.', log_level=Log.error): self.ServerId = '' self.Level = 0 self.set_base_dir(base_dir) self.LogLevel = log_level def __set_serverId(self, serverId): self.ServerId = serverId def set_log_level(self, log_level): self.LogLevel = log_level '\n Handle self.Level value.\n ' def __init_level(self): self.Level = 0 def __incr_level(self): self.Level += 1 def __get_level(self): return self.Level def __calc(self, plan, queryid, planid, depth): self.__incr_level() _level = self.__get_level() _node_type = plan['Node Type'] '\n nested loop type\n ' for n in ('Append', 'Merge Append', 'Recursive Union', 'Nested Loop', 'BitmapAnd', 'BitmapOr'): if (n == _node_type): (_Xouter, _Xinner, _RR) = self.get_inputs(plan) _Y = plan['Actual Rows'] if (len(_Y) >= self.NN_THRESHOLD): self.store_params(self.ServerId, queryid, planid, depth, n, _Xouter, _Xinner, _Y) return '\n hash or merge join\n ' for n in ('Merge Join', 'Hash Join'): if (n == _node_type): (_Xouter, _Xinner, _RR) = self.get_inputs(plan) _Y = plan['Actual Rows'] if (len(_Y) >= self.NN_THRESHOLD): self.store_params(self.ServerId, queryid, planid, depth, n, _Xouter, _Xinner, _Y) return '\n scan type\n ' (_database, _query, _planid) = self.get_query(self.ServerId, queryid) _ret = self.check_es(plan, queryid, planid, depth) if (_ret != None): self.__write_data(self.ES_FILE_PREFIX, _database, queryid, _ret, _query) _ret = self.check_histogram(plan, queryid, planid, depth) if (_ret != None): self.__write_data(self.HIST_FILE_PREFIX, _database, queryid, _ret, _query) return def __get_file_name(self, prefix): return ((prefix + str(self.ServerId)) + '.dat') def __write_data(self, prefix, database, queryid, ret, query): _str = ('Candidate:\ndatabase=' + str(database)) _str += ('\nqueryid=' + str(queryid)) _str += ('\nCondition:' + str(ret)) _str += ('\nquery:' + str(query)) _str += '\n\n' _file = self.__get_file_name(prefix) with open(_file, mode='a') as f: f.write(_str) def __analyze(self, Plans, queryid, planid): '\n\n Parameters\n ----------\n Plans : dict\n A plan grouped with the same queryid-planid.\n queryid : int\n planid : int\n ' def incr(plan): if ('Node Type' in plan): self._count += 1 def op(Plans, queryid, planid): if isinstance(Plans, list): for i in range(0, len(Plans)): incr(Plans[i]) self.__calc(Plans[i], queryid, planid, self._count) if ('Plans' in Plans[i]): op(Plans[i]['Plans'], queryid, planid) return else: incr(Plans) self.__calc(Plans, queryid, planid, self._count) if ('Plans' in Plans): op(Plans['Plans'], queryid, planid) return self._count = 0 op(Plans, queryid, planid) '\n Public method\n ' def analyze(self, serverId, command='all'): if (self.check_serverId(serverId) == False): if (Log.error <= self.LogLevel): print("Error: serverId '{}' is not registered.".format(serverId)) sys.exit(1) self.__set_serverId(serverId) self.set_log_level(self.LogLevel) for _prefix in (self.ES_FILE_PREFIX, self.HIST_FILE_PREFIX): _file = self.__get_file_name(_prefix) if os.path.exists(_file): if (Log.info <= self.LogLevel): print("Info: Remove '{}'".format(_file)) os.remove(_file) if (Log.info <= self.LogLevel): print("Info: Create '{}'".format(_file)) with open(_file, 'w'): pass for _hash_subdir in self.get_grouping_dir_list(self.ServerId): _gsdirpath = self.get_grouping_subdir_path(self.ServerId, _hash_subdir) if os.path.isdir(_gsdirpath): for f in self.get_grouping_subdir_list(self.ServerId, _hash_subdir): _gpath = self.path(_gsdirpath, f) _qp_id = str(f).split('.') _queryid = _qp_id[0] _planid = _qp_id[1] _json_dict = self.read_plan_json(_gpath) self.__init_level() self.__analyze(_json_dict['Plan'], _queryid, _planid)
def stylize(): device = ('cuda' if torch.cuda.is_available() else 'cpu') net = transformer.TransformerNetwork() net.load_state_dict(torch.load(STYLE_TRANSFORM_PATH)) net = net.to(device) with torch.no_grad(): while 1: torch.cuda.empty_cache() print('Stylize Image~ Press Ctrl+C and Enter to close the program') content_image_path = input('Enter the image path: ') content_image = utils.load_image(content_image_path) starttime = time.time() content_tensor = utils.itot(content_image).to(device) generated_tensor = net(content_tensor) generated_image = utils.ttoi(generated_tensor.detach()) if PRESERVE_COLOR: generated_image = utils.transfer_color(content_image, generated_image) print('Transfer Time: {}'.format((time.time() - starttime))) utils.show(generated_image) utils.saveimg(generated_image, 'helloworld.jpg')
def stylize_folder_single(style_path, content_folder, save_folder): '\n Reads frames/pictures as follows:\n\n content_folder\n pic1.ext\n pic2.ext\n pic3.ext\n ...\n\n and saves as the styled images in save_folder as follow:\n\n save_folder\n pic1.ext\n pic2.ext\n pic3.ext\n ...\n ' device = ('cuda' if torch.cuda.is_available() else 'cpu') net = transformer.TransformerNetwork() net.load_state_dict(torch.load(style_path)) net = net.to(device) images = [img for img in os.listdir(content_folder) if img.endswith('.jpg')] with torch.no_grad(): for image_name in images: torch.cuda.empty_cache() content_image = utils.load_image((content_folder + image_name)) content_tensor = utils.itot(content_image).to(device) generated_tensor = net(content_tensor) generated_image = utils.ttoi(generated_tensor.detach()) if PRESERVE_COLOR: generated_image = utils.transfer_color(content_image, generated_image) utils.saveimg(generated_image, (save_folder + image_name))
def stylize_folder(style_path, folder_containing_the_content_folder, save_folder, batch_size=1): 'Stylizes images in a folder by batch\n If the images are of different dimensions, use transform.resize() or use a batch size of 1\n IMPORTANT: Put content_folder inside another folder folder_containing_the_content_folder\n\n folder_containing_the_content_folder\n content_folder\n pic1.ext\n pic2.ext\n pic3.ext\n ...\n\n and saves as the styled images in save_folder as follow:\n\n save_folder\n pic1.ext\n pic2.ext\n pic3.ext\n ...\n ' device = ('cuda' if torch.cuda.is_available() else 'cpu') transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda((lambda x: x.mul(255)))]) image_dataset = utils.ImageFolderWithPaths(folder_containing_the_content_folder, transform=transform) image_loader = torch.utils.data.DataLoader(image_dataset, batch_size=batch_size) net = transformer.TransformerNetwork() net.load_state_dict(torch.load(style_path)) net = net.to(device) with torch.no_grad(): for (content_batch, _, path) in image_loader: torch.cuda.empty_cache() generated_tensor = net(content_batch.to(device)).detach() for i in range(len(path)): generated_image = utils.ttoi(generated_tensor[i]) if PRESERVE_COLOR: generated_image = utils.transfer_color(content_image, generated_image) image_name = os.path.basename(path[i]) utils.saveimg(generated_image, (save_folder + image_name))
class TransformerNetwork(nn.Module): 'Feedforward Transformation Network without Tanh\n reference: https://arxiv.org/abs/1603.08155 \n exact architecture: https://cs.stanford.edu/people/jcjohns/papers/fast-style/fast-style-supp.pdf\n ' def __init__(self): super(TransformerNetwork, self).__init__() self.ConvBlock = nn.Sequential(ConvLayer(3, 32, 9, 1), nn.ReLU(), ConvLayer(32, 64, 3, 2), nn.ReLU(), ConvLayer(64, 128, 3, 2), nn.ReLU()) self.ResidualBlock = nn.Sequential(ResidualLayer(128, 3), ResidualLayer(128, 3), ResidualLayer(128, 3), ResidualLayer(128, 3), ResidualLayer(128, 3)) self.DeconvBlock = nn.Sequential(DeconvLayer(128, 64, 3, 2, 1), nn.ReLU(), DeconvLayer(64, 32, 3, 2, 1), nn.ReLU(), ConvLayer(32, 3, 9, 1, norm='None')) def forward(self, x): x = self.ConvBlock(x) x = self.ResidualBlock(x) out = self.DeconvBlock(x) return out
class TransformerNetworkTanh(TransformerNetwork): "A modification of the transformation network that uses Tanh function as output \n This follows more closely the architecture outlined in the original paper's supplementary material\n his model produces darker images and provides retro styling effect\n Reference: https://cs.stanford.edu/people/jcjohns/papers/fast-style/fast-style-supp.pdf\n " def __init__(self, tanh_multiplier=150): super(TransformerNetworkTanh, self).__init__() self.DeconvBlock = nn.Sequential(DeconvLayer(128, 64, 3, 2, 1), nn.ReLU(), DeconvLayer(64, 32, 3, 2, 1), nn.ReLU(), ConvLayer(32, 3, 9, 1, norm='None'), nn.Tanh()) self.tanh_multiplier = tanh_multiplier def forward(self, x): return (super(TransformerNetworkTanh, self).forward(x) * self.tanh_multiplier)
class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, norm='instance'): super(ConvLayer, self).__init__() padding_size = (kernel_size // 2) self.reflection_pad = nn.ReflectionPad2d(padding_size) self.conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride) self.norm_type = norm if (norm == 'instance'): self.norm_layer = nn.InstanceNorm2d(out_channels, affine=True) elif (norm == 'batch'): self.norm_layer = nn.BatchNorm2d(out_channels, affine=True) def forward(self, x): x = self.reflection_pad(x) x = self.conv_layer(x) if (self.norm_type == 'None'): out = x else: out = self.norm_layer(x) return out
class ResidualLayer(nn.Module): '\n Deep Residual Learning for Image Recognition\n\n https://arxiv.org/abs/1512.03385\n ' def __init__(self, channels=128, kernel_size=3): super(ResidualLayer, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size, stride=1) self.relu = nn.ReLU() self.conv2 = ConvLayer(channels, channels, kernel_size, stride=1) def forward(self, x): identity = x out = self.relu(self.conv1(x)) out = self.conv2(out) out = (out + identity) return out
class DeconvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, output_padding, norm='instance'): super(DeconvLayer, self).__init__() padding_size = (kernel_size // 2) self.conv_transpose = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding_size, output_padding) self.norm_type = norm if (norm == 'instance'): self.norm_layer = nn.InstanceNorm2d(out_channels, affine=True) elif (norm == 'batch'): self.norm_layer = nn.BatchNorm2d(out_channels, affine=True) def forward(self, x): x = self.conv_transpose(x) if (self.norm_type == 'None'): out = x else: out = self.norm_layer(x) return out
def gram(tensor): (B, C, H, W) = tensor.shape x = tensor.view(B, C, (H * W)) x_t = x.transpose(1, 2) return (torch.bmm(x, x_t) / ((C * H) * W))
def load_image(path): img = cv2.imread(path) return img
def show(img): img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = np.array((img / 255)).clip(0, 1) plt.figure(figsize=(10, 5)) plt.imshow(img) plt.show()
def saveimg(img, image_path): img = img.clip(0, 255) cv2.imwrite(image_path, img)
def itot(img, max_size=None): if (max_size == None): itot_t = transforms.Compose([transforms.ToTensor(), transforms.Lambda((lambda x: x.mul(255)))]) else: (H, W, C) = img.shape image_size = tuple([int(((float(max_size) / max([H, W])) * x)) for x in [H, W]]) itot_t = transforms.Compose([transforms.ToPILImage(), transforms.Resize(image_size), transforms.ToTensor(), transforms.Lambda((lambda x: x.mul(255)))]) tensor = itot_t(img) tensor = tensor.unsqueeze(dim=0) return tensor
def ttoi(tensor): tensor = tensor.squeeze() img = tensor.cpu().numpy() img = img.transpose(1, 2, 0) return img
def transfer_color(src, dest): '\n Transfer Color using YIQ colorspace. Useful in preserving colors in style transfer.\n This method assumes inputs of shape [Height, Width, Channel] in BGR Color Space\n ' (src, dest) = (src.clip(0, 255), dest.clip(0, 255)) (H, W, _) = src.shape dest = cv2.resize(dest, dsize=(W, H), interpolation=cv2.INTER_CUBIC) dest_gray = cv2.cvtColor(dest, cv2.COLOR_BGR2GRAY) src_yiq = cv2.cvtColor(src, cv2.COLOR_BGR2YCrCb) src_yiq[(..., 0)] = dest_gray return cv2.cvtColor(src_yiq, cv2.COLOR_YCrCb2BGR).clip(0, 255)
def plot_loss_hist(c_loss, s_loss, total_loss, title='Loss History'): x = [i for i in range(len(total_loss))] plt.figure(figsize=[10, 6]) plt.plot(x, c_loss, label='Content Loss') plt.plot(x, s_loss, label='Style Loss') plt.plot(x, total_loss, label='Total Loss') plt.legend() plt.xlabel('Every 500 iterations') plt.ylabel('Loss') plt.title(title) plt.show()
class ImageFolderWithPaths(datasets.ImageFolder): 'Custom dataset that includes image file paths. \n Extends torchvision.datasets.ImageFolder()\n Reference: https://discuss.pytorch.org/t/dataloader-filenames-in-each-batch/4212/2\n ' def __getitem__(self, index): original_tuple = super(ImageFolderWithPaths, self).__getitem__(index) path = self.imgs[index][0] tuple_with_path = (*original_tuple, path) return tuple_with_path
class VGG19(nn.Module): def __init__(self, vgg_path='models/vgg19-d01eb7cb.pth'): super(VGG19, self).__init__() vgg19_features = models.vgg19(pretrained=False) vgg19_features.load_state_dict(torch.load(vgg_path), strict=False) self.features = vgg19_features.features for param in self.features.parameters(): param.requires_grad = False def forward(self, x): layers = {'3': 'relu1_2', '8': 'relu2_2', '17': 'relu3_4', '22': 'relu4_2', '26': 'relu4_4', '35': 'relu5_4'} features = {} for (name, layer) in self.features._modules.items(): x = layer(x) if (name in layers): features[layers[name]] = x return features
class VGG16(nn.Module): def __init__(self, vgg_path='models/vgg16-00b39a1b.pth'): super(VGG16, self).__init__() vgg16_features = models.vgg16(pretrained=False) vgg16_features.load_state_dict(torch.load(vgg_path), strict=False) self.features = vgg16_features.features for param in self.features.parameters(): param.requires_grad = False def forward(self, x): layers = {'3': 'relu1_2', '8': 'relu2_2', '15': 'relu3_3', '22': 'relu4_3'} features = {} for (name, layer) in self.features._modules.items(): x = layer(x) if (name in layers): features[layers[name]] = x if (name == '22'): break return features
def video_transfer(video_path, style_path): print('OpenCV {}'.format(cv2.__version__)) starttime = time.time() (H, W, fps) = getInfo(video_path) print('Height: {} Width: {} FPS: {}'.format(H, W, fps)) print('Extracting video frames') getFrames(video_path) print('Performing style transfer on frames') stylize_folder(style_path, FRAME_SAVE_PATH, STYLE_FRAME_SAVE_PATH, batch_size=BATCH_SIZE) print('Combining style frames into one video') makeVideo(STYLE_FRAME_SAVE_PATH, STYLE_VIDEO_NAME, fps, int(H), int(W)) print('Elapsed Time: {}'.format((time.time() - starttime))) tor
def getInfo(video_path): '\n Extracts the height, width,\n and fps of a video\n ' vidcap = cv2.VideoCapture(video_path) width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH) height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT) fps = vidcap.get(cv2.CAP_PROP_FPS) return (height, width, fps)
def getFrames(video_path): '\n Extracts the frames of a video\n and saves in specified path\n ' vidcap = cv2.VideoCapture(video_path) (success, image) = vidcap.read() count = 1 success = True while success: cv2.imwrite('{}{}{}{}'.format((FRAME_SAVE_PATH + FRAME_CONTENT_FOLDER), FRAME_BASE_FILE_NAME, count, FRAME_BASE_FILE_TYPE), image) (success, image) = vidcap.read() count += 1 print('Done extracting all frames')
def makeVideo(frames_path, save_name, fps, height, width): base_name_len = len(FRAME_BASE_FILE_NAME) filetype_len = len(FRAME_BASE_FILE_TYPE) images = [img for img in sorted(os.listdir(frames_path), key=(lambda x: int(x[base_name_len:(- filetype_len)]))) if img.endswith('.jpg')] fourcc = cv2.VideoWriter_fourcc(*'MP4V') vout = cv2.VideoWriter(save_name, fourcc, fps, (width, height)) for image_name in images: vout.write(cv2.imread(os.path.join(frames_path, image_name))) print('Done writing video')
def webcam(style_transform_path, width=1280, height=720): '\n Captures and saves an image, perform style transfer, and again saves the styled image.\n Reads the styled image and show in window. \n ' device = ('cuda' if torch.cuda.is_available() else 'cpu') print('Loading Transformer Network') net = transformer.TransformerNetwork() net.load_state_dict(torch.load(style_transform_path)) net = net.to(device) print('Done Loading Transformer Network') cam = cv2.VideoCapture(0) cam.set(3, width) cam.set(4, height) with torch.no_grad(): while True: (ret_val, img) = cam.read() img = cv2.flip(img, 1) torch.cuda.empty_cache() content_tensor = utils.itot(img).to(device) generated_tensor = net(content_tensor) generated_image = utils.ttoi(generated_tensor.detach()) if PRESERVE_COLOR: generated_image = utils.transfer_color(img, generated_image) generated_image = (generated_image / 255) cv2.imshow('Demo webcam', generated_image) if (cv2.waitKey(1) == 27): break cam.release() cv2.destroyAllWindows()
def pretty(d, indent=0): for (key, value) in d.items(): print((('\t' * indent) + str(key))) if isinstance(value, dict): pretty(value, (indent + 1)) else: print((('\t' * (indent + 1)) + str(value)))
def get_record(output_dir): print('Loading records from:', output_dir) records = reporting.load_records(output_dir) print('Total records:', len(records)) return records
def get_results(out_dir, selection_method): 'Given all records, print a results table for each dataset.' records = get_record(out_dir) grouped_records = reporting.get_grouped_records(records, group_test_envs=True).map((lambda group: {**group, 'sweep_acc': selection_method.sweep_acc(group['records'], return_extra=True)})).filter((lambda g: (g['sweep_acc'] is not None))) alg_names = Q(records).select('args.algorithm').unique() assert (len(alg_names) == 1) algorithm = alg_names[0] dataset_names = Q(records).select('args.dataset').unique().sorted() assert (len(dataset_names) == 1) dataset = dataset_names[0] trial_averages = grouped_records.filter_equals('algorithm, dataset', (algorithm, dataset)).group('trial_seed').map((lambda trial_seed, group: tuple(map((lambda y: (sum(y) / float(len(y)))), zip(*group.select('sweep_acc')))))) (tgt_all, src_all, tgt_in_all) = zip(*trial_averages) (tgt_mean, src_mean, tgt_in_mean) = ((100 * np.mean(list(tgt_all))), (100 * np.mean(list(src_all))), (100 * np.mean(list(tgt_in_all)))) (tgt_std, src_std, tgt_in_std) = ((100 * np.std(list(tgt_all))), (100 * np.std(list(src_all))), (100 * np.std(list(tgt_in_all)))) return ((tgt_mean, src_mean, tgt_in_mean), (tgt_std, src_std, tgt_in_std))
def get_result(setup): result_dict = {} for dataset in dataset_all: result_dict[dataset] = {} sub_result_dict = result_dict[dataset] basedir = f'{base_output_dir}/{dataset}/{setup}' for (alg_name, alg_name_long) in algorithm_all.items(): if (alg_name in ['CLIPPretrained', 'CLIPBase']): sub_result_dict[alg_name] = {} subsub_result_dict = sub_result_dict[alg_name] output_dir = os.path.join(basedir, f'{alg_name_long}/base') ((acc_tgt, acc_src, acc_tgt_in), (acc_tgt_std, acc_src_std, acc_tgt_in_std)) = get_results(output_dir, select_method) subsub_result_dict['acc_tgt'] = acc_tgt subsub_result_dict['acc_src'] = acc_src subsub_result_dict['acc_tgt_in'] = acc_tgt_in subsub_result_dict['acc_diff'] = (acc_src - acc_tgt) subsub_result_dict['acc_tgt_std'] = acc_tgt_std subsub_result_dict['acc_src_std'] = acc_src_std subsub_result_dict['acc_tgt_in_std'] = acc_tgt_in_std else: lambda_str_array = list(map((lambda s: s.split('_')[(- 1)]), glob.glob(os.path.join(basedir, f'{alg_name_long}/*')))) lambda_str_array = sorted(lambda_str_array, key=(lambda r: float(r))) lambda_val_array = np.array(list(map((lambda s: float(s)), lambda_str_array))) for lambda_str in lambda_str_array: sub_result_dict[(alg_name + '_lambda_{}'.format(lambda_str))] = {} subsub_result_dict = sub_result_dict[(alg_name + '_lambda_{}'.format(lambda_str))] output_dir = os.path.join(basedir, f'{alg_name_long}/lambda_{lambda_str}') ((acc_tgt, acc_src, acc_tgt_in), (acc_tgt_std, acc_src_std, acc_tgt_in_std)) = get_results(output_dir, select_method) subsub_result_dict['acc_tgt'] = acc_tgt subsub_result_dict['acc_src'] = acc_src subsub_result_dict['acc_tgt_in'] = acc_tgt_in subsub_result_dict['acc_diff'] = (acc_src - acc_tgt) subsub_result_dict['acc_tgt_std'] = acc_tgt_std subsub_result_dict['acc_src_std'] = acc_src_std subsub_result_dict['acc_tgt_in_std'] = acc_tgt_in_std return result_dict
def plot_result(result_dict, plot_dataset, plot_y='acc_tgt', include=None, exclude=None, plot_std=False): plt.figure() sub_result_dict = result_dict[plot_dataset] plt_xs = [] plt_ys = [] plt_errs = [] for (k, v) in sub_result_dict.items(): subsub_result_dict = sub_result_dict[k] if ((include is not None) and (k not in include)): continue if ((exclude is not None) and (k in exclude)): continue plt_xs.append(k.replace('_', '\n')) plt_ys.append(subsub_result_dict[plot_y]) plt_errs.append(subsub_result_dict[(plot_y + '_std')]) if (not plot_std): plt.bar(plt_xs, plt_ys) else: plt.bar(plt_xs, plt_ys, yerr=plt_errs) for (plt_x, plt_y, plt_err) in zip(plt_xs, plt_ys, plt_errs): plt.text(plt_x, (plt_y + 0.25), '{:.1f} +/- {:.1f}'.format(plt_y, plt_err), color='blue', fontweight='bold') plt.xlabel('Method') plt.ylabel('Accuracy') plt.ylim([(np.min(plt_ys) - 0.5), (np.max(plt_ys) + 0.5)]) plt.show()
def pretty(d, indent=0): for (key, value) in d.items(): print((('\t' * indent) + str(key))) if isinstance(value, dict): pretty(value, (indent + 1)) else: print((('\t' * (indent + 1)) + str(value)))
def get_record(output_dir): print('Loading records from:', output_dir) records = reporting.load_records(output_dir) print('Total records:', len(records)) return records
def get_results_per_domain(out_dir, selection_method, num_envs, env_names=None): 'Given all records, get averaged results of each setup for each test domain.' records = get_record(out_dir) grouped_records = reporting.get_grouped_records(records, group_test_envs=True).map((lambda group: {**group, 'sweep_acc': selection_method.sweep_acc(group['records'], return_extra=True)})).filter((lambda g: (g['sweep_acc'] is not None))) alg_names = Q(records).select('args.algorithm').unique() assert (len(alg_names) == 1) algorithm = alg_names[0] dataset_names = Q(records).select('args.dataset').unique().sorted() assert (len(dataset_names) == 1) dataset = dataset_names[0] results = {} for test_env in range(num_envs): trial_averages = grouped_records.filter_equals('algorithm, dataset, test_env', (algorithm, dataset, test_env)).group('trial_seed').map((lambda trial_seed, group: tuple(map((lambda y: (sum(y) / float(len(y)))), zip(*group.select('sweep_acc')))))) (tgt_all, src_all, tgt_in_all) = zip(*trial_averages) (tgt_mean, src_mean, tgt_in_mean) = ((100 * np.mean(list(tgt_all))), (100 * np.mean(list(src_all))), (100 * np.mean(list(tgt_in_all)))) (tgt_std, src_std, tgt_in_std) = ((100 * np.std(list(tgt_all))), (100 * np.std(list(src_all))), (100 * np.std(list(tgt_in_all)))) if (env_names is not None): result_key = env_names[test_env] else: result_key = f'env_{test_env}' results.update({result_key: '{:.1f} +/- {:.1f}'.format(tgt_mean, tgt_std)}) return results
def get_result(setup): result_dict = {} for dataset in dataset_all: result_dict[dataset] = {} sub_result_dict = result_dict[dataset] basedir = f'{base_output_dir}/{dataset}/{setup}' env_names = datasets.get_dataset_class(dataset).ENVIRONMENTS num_envs = len(env_names) for (alg_name, alg_name_long) in algorithm_all.items(): if (alg_name in ['CLIPPretrained', 'CLIPBase']): sub_result_dict[alg_name] = {} subsub_result_dict = sub_result_dict[alg_name] output_dir = os.path.join(basedir, f'{alg_name_long}/base') subsub_result_dict.update(get_results_per_domain(output_dir, select_method, num_envs, env_names)) else: lambda_str_array = list(map((lambda s: s.split('_')[(- 1)]), glob.glob(os.path.join(basedir, f'{alg_name_long}/*')))) lambda_str_array = sorted(lambda_str_array, key=(lambda r: float(r))) lambda_val_array = np.array(list(map((lambda s: float(s)), lambda_str_array))) for lambda_str in lambda_str_array: sub_result_dict[(alg_name + '_lambda_{}'.format(lambda_str))] = {} subsub_result_dict = sub_result_dict[(alg_name + '_lambda_{}'.format(lambda_str))] output_dir = os.path.join(basedir, f'{alg_name_long}/lambda_{lambda_str}') subsub_result_dict.update(get_results_per_domain(output_dir, select_method, num_envs, env_names)) return result_dict
def pretty(d, indent=0): for (key, value) in d.items(): print((('\t' * indent) + str(key))) if isinstance(value, dict): pretty(value, (indent + 1)) else: print((('\t' * (indent + 1)) + str(value)))
def get_record(output_dir): print('Loading records from:', output_dir) records = reporting.load_records(output_dir) print('Total records:', len(records)) return records
def get_results(out_dir, selection_method): 'Given all records, print a results table for each dataset.' records = get_record(out_dir) grouped_records = reporting.get_grouped_records(records, group_test_envs=True).map((lambda group: {**group, 'sweep_acc': selection_method.sweep_acc(group['records'], return_extra=True)})).filter((lambda g: (g['sweep_acc'] is not None))) alg_names = Q(records).select('args.algorithm').unique() assert (len(alg_names) == 1) algorithm = alg_names[0] dataset_names = Q(records).select('args.dataset').unique().sorted() assert (len(dataset_names) == 1) dataset = dataset_names[0] trial_averages = grouped_records.filter_equals('algorithm, dataset', (algorithm, dataset)).group('trial_seed').map((lambda trial_seed, group: tuple(map((lambda y: (sum(y) / float(len(y)))), zip(*group.select('sweep_acc')))))) (tgt_all, src_all, tgt_in_all) = zip(*trial_averages) (tgt_mean, src_mean, tgt_in_mean) = ((100 * np.mean(list(tgt_all))), (100 * np.mean(list(src_all))), (100 * np.mean(list(tgt_in_all)))) (tgt_std, src_std, tgt_in_std) = ((100 * np.std(list(tgt_all))), (100 * np.std(list(src_all))), (100 * np.std(list(tgt_in_all)))) return ((tgt_mean, src_mean, tgt_in_mean), (tgt_std, src_std, tgt_in_std))
def get_result(setup): result_dict = {} for dataset in dataset_all: result_dict[dataset] = {} sub_result_dict = result_dict[dataset] basedir = f'{base_output_dir}/{dataset}/{setup}' for (alg_name, alg_name_long) in algorithm_all.items(): if (alg_name in ['CLIPPretrained', 'CLIPBase']): sub_result_dict[alg_name] = {} subsub_result_dict = sub_result_dict[alg_name] output_dir = os.path.join(basedir, f'{alg_name_long}/base') print(output_dir) ((acc_tgt, acc_src, acc_tgt_in), (acc_tgt_std, acc_src_std, acc_tgt_in_std)) = get_results(output_dir, select_method) subsub_result_dict['acc_tgt'] = acc_tgt subsub_result_dict['acc_src'] = acc_src subsub_result_dict['acc_tgt_in'] = acc_tgt_in subsub_result_dict['acc_diff'] = (acc_src - acc_tgt) subsub_result_dict['acc_tgt_std'] = acc_tgt_std subsub_result_dict['acc_src_std'] = acc_src_std subsub_result_dict['acc_tgt_in_std'] = acc_tgt_in_std else: lambda_str_array = list(map((lambda s: s.split('_')[(- 1)]), glob.glob(os.path.join(basedir, f'{alg_name_long}/*')))) lambda_str_array = sorted(lambda_str_array, key=(lambda r: float(r))) lambda_val_array = np.array(list(map((lambda s: float(s)), lambda_str_array))) for lambda_str in lambda_str_array: sub_result_dict[(alg_name + '_lambda_{}'.format(lambda_str))] = {} subsub_result_dict = sub_result_dict[(alg_name + '_lambda_{}'.format(lambda_str))] output_dir = os.path.join(basedir, f'{alg_name_long}/lambda_{lambda_str}') ((acc_tgt, acc_src, acc_tgt_in), (acc_tgt_std, acc_src_std, acc_tgt_in_std)) = get_results(output_dir, select_method) subsub_result_dict['acc_tgt'] = acc_tgt subsub_result_dict['acc_src'] = acc_src subsub_result_dict['acc_tgt_in'] = acc_tgt_in subsub_result_dict['acc_diff'] = (acc_src - acc_tgt) subsub_result_dict['acc_tgt_std'] = acc_tgt_std subsub_result_dict['acc_src_std'] = acc_src_std subsub_result_dict['acc_tgt_in_std'] = acc_tgt_in_std return result_dict
def plot_result(result_dict, plot_dataset, plot_y='acc_tgt', include=None, exclude=None, plot_std=False): plt.figure() sub_result_dict = result_dict[plot_dataset] plt_xs = [] plt_ys = [] plt_errs = [] for (k, v) in sub_result_dict.items(): subsub_result_dict = sub_result_dict[k] if ((include is not None) and (k not in include)): continue if ((exclude is not None) and (k in exclude)): continue plt_xs.append(k.replace('_', '\n')) plt_ys.append(subsub_result_dict[plot_y]) plt_errs.append(subsub_result_dict[(plot_y + '_std')]) if (not plot_std): plt.bar(plt_xs, plt_ys) else: plt.bar(plt_xs, plt_ys, yerr=plt_errs) for (plt_x, plt_y, plt_err) in zip(plt_xs, plt_ys, plt_errs): plt.text(plt_x, (plt_y + 0.25), '{:.1f} +/- {:.1f}'.format(plt_y, plt_err), color='blue', fontweight='bold') plt.xlabel('Method') plt.ylabel('Accuracy') plt.ylim([(np.min(plt_ys) - 0.5), (np.max(plt_ys) + 0.5)]) plt.show()
class AbstractBottleneck(torch.nn.Module): 'Domain Bottleneck (abstract class)' def __init__(self, feature_dim, num_classes, num_domains, hparams): super(AbstractBottleneck, self).__init__() self.hparams = hparams def forward(self, z): return z def loss(self, z, y, dom_labels): raise NotImplementedError def update(self, z, y, dom_labels): pass @property def trainable(self): 'Whether the bottleneck has trainable parameters' return False @property def is_conditional(self): 'Whether the bottleneck is conditioned on labels' return False
class DummyBottleneck(AbstractBottleneck): 'Dummy Bottleneck (without bottleneck)' def __init__(self, feature_dim, num_classes, num_domains, hparams): super(DummyBottleneck, self).__init__(feature_dim, num_classes, num_domains, hparams) def loss(self, z, y, dom_labels): dummy_loss = torch.Tensor([0.0]).to(z.device) return (dummy_loss, z) def update(self, z, y, dom_labels): pass @property def trainable(self): return False
class DiscreteEntropyBottleneck(AbstractBottleneck): 'Entropy Bottleneck (with discretization)\n Introduced by J. Ballé, et al., in “Variational image compression with a scale hyperprior”.\n\n Properties:\n - Minimize H(Z)\n - Require no access to domain labels and task labels\n ' def __init__(self, feature_dim, num_classes, num_domains, hparams): super(DiscreteEntropyBottleneck, self).__init__(feature_dim, num_classes, num_domains, hparams) self.bottleneck = EntropyBottleneck(feature_dim) self.scaling = torch.nn.Parameter((torch.ones(feature_dim) * math.log(10))) def forward(self, z): z = (z * self.scaling.exp()) (z_hat, _) = self.bottleneck(z.unsqueeze((- 1)).unsqueeze((- 1))) z_hat = (z_hat.squeeze((- 1)).squeeze((- 1)) / self.scaling.exp()) return z_hat def loss(self, z, y, dom_labels): z = (z * self.scaling.exp()) (z_hat, q_z) = self.bottleneck(z.unsqueeze((- 1)).unsqueeze((- 1))) z_hat = (z_hat.squeeze() / self.scaling.exp()) bn_loss = (- torch.log(q_z).sum((- 1)).mean()) return (bn_loss, z_hat) @property def trainable(self): return True
class AbstractContrastBottleneck(AbstractBottleneck): 'Contrastive based bottlenecks (abstract class)\n The implementation is based on the supervised contrastive loss (SupCon) introduced by\n P. Khosla, et al., in “Supervised Contrastive Learning“.\n ' def __init__(self, feature_dim, num_classes, num_domains, hparams): super(AbstractContrastBottleneck, self).__init__(feature_dim, num_classes, num_domains, hparams) self.bn_supcon = SupConLoss(feature_dim, num_domains, temperature=hparams['temperature'], is_normalized=hparams['is_normalized'], is_project=hparams['is_project']) self.is_flipped = hparams['is_flipped'] def loss(self, z, y, dom_labels): return (self.bn_supcon(z, y, dom_labels, bn_conditional=self.is_conditional, bn_flipped=self.is_flipped)[1], z) @property def trainable(self): return self.bn_supcon.is_project
class CADBottleneck(AbstractContrastBottleneck): 'Contrastive Adversarial Domain (CAD) bottleneck\n Introduced in Sec 4.2.1 in our paper.\n\n Properties:\n - Minimize I(D;Z)\n - Require access to domain labels but not task labels\n ' def __init__(self, feature_dim, num_classes, num_domains, hparams): super(CADBottleneck, self).__init__(feature_dim, num_classes, num_domains, hparams)
class CondCADBottleneck(AbstractContrastBottleneck): 'Conditional Contrastive Adversarial Domain (CAD) bottleneck\n Introduced in Appx C.4 in our paper.\n\n Properties:\n - Minimize I(D;Z|Y)\n - Require access to both domain labels and task labels\n ' def __init__(self, feature_dim, num_classes, num_domains, hparams): super(CondCADBottleneck, self).__init__(feature_dim, num_classes, num_domains, hparams) @property def is_conditional(self): return True
class SupConLoss(nn.Module): 'Supervised Contrastive (SupCon) loss\n Introduced by P. Khosla, et al., in “Supervised Contrastive Learning“.\n Modified from https://github.com/HobbitLong/SupContrast/blob/8d0963a7dbb1cd28accb067f5144d61f18a77588/losses.py#L11\n ' def __init__(self, feature_dim, num_domains, temperature=0.07, base_temperature=0.07, is_logsumexp=True, is_normalized=False, is_project=False): super(SupConLoss, self).__init__() self.num_domains = num_domains self.temperature = temperature self.base_temperature = base_temperature self.is_logsumexp = is_logsumexp self.is_normalized = is_normalized self.is_project = is_project if self.is_project: self.project = nn.Sequential(nn.Linear(feature_dim, feature_dim), nn.ReLU(inplace=True), nn.Linear(feature_dim, 128)) def forward(self, z, y, dom_labels, bn_conditional=True, bn_flipped=True): '\n Args:\n z: hidden vector of shape [batch_size, z_dim].\n y: ground truth of shape [batch_size].\n dom_labels: ground truth domains of shape [batch_size].\n bn_conditional: if the bottleneck loss conditioned on the label\n bn_flipped: if flip maximize log(p) (False) to minimize -log(1-p) (True) for the bottleneck loss\n Returns:\n SupCon loss and the bottleneck loss.\n ' device = z.device batch_size = z.shape[0] y = y.contiguous().view((- 1), 1) dom_labels = dom_labels.contiguous().view((- 1), 1) mask_y = torch.eq(y, y.T).to(device) mask_d = torch.eq(dom_labels, dom_labels.T).to(device) mask_drop = (~ torch.eye(batch_size).bool().to(device)) mask_y &= mask_drop mask_y_n_d = (mask_y & (~ mask_d)) mask_y_d = (mask_y & mask_d) (mask_y, mask_drop, mask_y_n_d, mask_y_d) = (mask_y.float(), mask_drop.float(), mask_y_n_d.float(), mask_y_d.float()) if self.is_project: z = self.project(z) if self.is_normalized: z = F.normalize(z, dim=1) outer = (z @ z.T) logits = (outer / self.temperature) logits = (logits * mask_drop) (logits_max, _) = torch.max(logits, dim=1, keepdim=True) logits = (logits - logits_max.detach()) denominator = torch.logsumexp((logits + mask_drop.log()), dim=1, keepdim=True) log_prob = (logits - denominator) mask_valid = (mask_y.sum(1) > 0) log_prob = log_prob[mask_valid] mask_y = mask_y[mask_valid] mask_y_n_d = mask_y_n_d[mask_valid] mask_y_d = mask_y_d[mask_valid] mask_d = mask_d[mask_valid] logits = logits[mask_valid] outer = outer[mask_valid] batch_size = log_prob.shape[0] if self.is_logsumexp: agg_log_prob_pos = ((torch.logsumexp((log_prob + mask_y.log()), dim=1) / mask_y.sum(1)) * batch_size) else: agg_log_prob_pos = ((mask_y * log_prob).sum(1) / mask_y.sum(1)) loss = ((- (self.temperature / self.base_temperature)) * agg_log_prob_pos) if (not bn_conditional): if bn_flipped: bn_loss = ((- (self.temperature / self.base_temperature)) * torch.logsumexp((log_prob + (~ mask_d).float().log()), dim=1)) else: bn_loss = ((self.temperature / self.base_temperature) * torch.logsumexp((log_prob + mask_d.float().log()), dim=1)) else: if bn_flipped: mask_valid = (mask_y_n_d.sum(1) > 0) else: mask_valid = (mask_y_d.sum(1) > 0) log_prob = log_prob[mask_valid] mask_y = mask_y[mask_valid] mask_y_d = mask_y_d[mask_valid] mask_y_n_d = mask_y_n_d[mask_valid] outer = outer[mask_valid] logits = logits[mask_valid] batch_size = log_prob.shape[0] denominator = torch.logsumexp((logits + mask_y.log()), dim=1, keepdim=True) log_prob_y = (logits - denominator) if bn_flipped: bn_loss = ((- (self.temperature / self.base_temperature)) * torch.logsumexp((log_prob_y + mask_y_n_d.log()), dim=1)) else: bn_loss = ((self.temperature / self.base_temperature) * torch.logsumexp((log_prob_y + mask_y_d.log()), dim=1)) return (finite_mean(loss), finite_mean(bn_loss))
class AbstractCLIPAlgorithm(Algorithm): 'CLIP based algorithms (abstract class)' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(AbstractCLIPAlgorithm, self).__init__(feature_dim, num_classes, num_domains, hparams) self.clip_model = pretrained self.num_classes = num_classes self.idx2class = idx2class if (self.clip_model is not None): for param in self.clip_model.parameters(): param.requires_grad = False self.transform = None self.bottleneck = None self.classifier = None self.is_debug = hparams['debug'] def get_clip_label_text_features(self, normalize=True, multiple_prompts=False): 'Get CLIP features of label text prompts\n\n Args:\n normalize: whether normalize the output text features\n multiple_prompts: whether apply prompt engineering with multiple prompts\n ' device = next(self.clip_model.parameters()).device class_names = [self.idx2class[idx] for idx in range(len(self.idx2class.items()))] if (not multiple_prompts): tool = language_tool_python.LanguageTool('en-US') text_inputs = torch.cat([clip.tokenize(tool.correct(f'a picture of a {c}')) for c in class_names]).to(device) with torch.no_grad(): text_features = self.clip_model.encode_text(text_inputs) if normalize: text_features /= text_features.norm(dim=(- 1), keepdim=True) else: is_training = self.clip_model.training self.clip_model.eval() with torch.no_grad(): text_features = [] for classname in tqdm(class_names): texts = [template.format(classname) for template in clip_prompt_templates] texts = clip.tokenize(texts).to(device) class_embeddings = self.clip_model.encode_text(texts) if normalize: class_embeddings /= class_embeddings.norm(dim=(- 1), keepdim=True) class_embedding = class_embeddings.mean(dim=0) if normalize: class_embedding /= class_embedding.norm() text_features.append(class_embedding) text_features = torch.stack(text_features, dim=0).to(device) if is_training: self.clip_model.train() text_features = text_features.float() return text_features def get_device(self): 'Get model device' if (self.clip_model is not None): device = next(self.clip_model.parameters()).device else: device = next(self.transform.parameters()).device return device def get_transformed_feature(self, all_x): 'Get the transformed feature of a batch samples' all_z = self.transform(all_x) return all_z def preprocess_features(self, loader, return_tensor=False, use_tqdm=False): 'Get the finetuned features (that can be directly used for training classifier) for a whole dataset\n Args:\n loader: the dataset loader\n return_tensor: whether return features as tensors (True) or numpy aarrays (False)\n use_tqdm: use tqdm to visualize progress\n ' assert (not self.training), 'Should be in the evaluation mode!!!' device = self.get_device() if use_tqdm: loader = tqdm(loader) with torch.no_grad(): (Z, Y) = ([], []) for (x, y) in loader: Z += [self.bottleneck(self.get_transformed_feature(x.to(device))).cpu().numpy()] Y += [y.cpu().numpy()] if return_tensor: return (torch.tensor(np.concatenate(Z)), torch.tensor(np.concatenate(Y))) else: return (np.concatenate(Z), np.concatenate(Y)) def loss(self, all_x, all_y, all_d): 'Compute the loss' raise NotImplementedError def update(self, minibatches, unlabeled=None): 'Update the model with a batch' raise NotImplementedError def fit_classifier(self, clf_train_data, clf_valid_data, prompt_engineer=False, train_clf_hparams=None): "Fit classifier\n The classifier types include:\n - 'SVM' or 'Logistic' for sklearn classifiers\n - 'LogisticPT' for pytorch implemented logistic regression, used with large dataset like DomainNet for\n minibatch training\n - 'ZeroShot' for CLIP zero-shot classifier with label prompts, note that it works with pretrained CLIP or CLIP\n finetuned with image-text contrastive loss but not supervised cross-entropy loss\n\n Args:\n clf_train_data: training data\n clf_valid_data: validation data\n prompt_engineer: whether use multiple prompts with prompt engineering for 'ZeroShot' classifier\n train_clf_hparams: hyperparameter dict for training the pytorch implemented logistic regression\n " device = self.get_device() clf_type = self.hparams['clf_type'] assert (clf_type in ['SVM', 'Logistic', 'LogisticPT', 'ZeroShot']) use_sklearn = (clf_type in ['SVM', 'Logistic']) print('Fitting classifier: {}...'.format(clf_type)) (clf_train_features, clf_train_labels) = clf_train_data (clf_val_features, clf_val_labels) = clf_valid_data if use_sklearn: clf_all_features = np.concatenate([clf_train_features, clf_val_features]) clf_all_labels = np.concatenate([clf_train_labels, clf_val_labels]) cv_fold = np.concatenate([np.full(clf_train_features.shape[0], (- 1), dtype=np.int8), np.zeros(clf_val_features.shape[0], dtype=np.int8)]) cv = PredefinedSplit(cv_fold) if (clf_type == 'SVM'): base_params = {'penalty': 'l2', 'max_iter': 1000, 'verbose': 0} base_estimator_class = LinearSVC elif (clf_type == 'Logistic'): base_params = {'penalty': 'l2', 'max_iter': 1000, 'multi_class': 'multinomial', 'solver': 'lbfgs', 'verbose': 0, 'n_jobs': (- 1), 'warm_start': False} base_estimator_class = LogisticRegression else: raise NotImplementedError base_estimator = base_estimator_class(**base_params) if self.is_debug: best_param = {'C': 1.0} else: c_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0] param_grid = {'C': c_range} clf_cv = GridSearchCV(base_estimator, param_grid, cv=cv, refit=False, scoring='accuracy', n_jobs=(- 1), error_score='raise', verbose=0) clf_cv.fit(clf_all_features, clf_all_labels) best_param = clf_cv.best_params_ if (best_param['C'] in [c_range[0], c_range[(- 1)]]): print(f'The best param {best_param} hits the boundary! Please use a larger range!') clf = base_estimator_class(**best_param, **base_params) clf.fit(clf_train_features, clf_train_labels) if (clf_type == 'Logistic'): self.classifier = (lambda z: torch.Tensor(clf.predict_proba(z.cpu().numpy())).to(device)) else: self.classifier = (lambda z: torch.Tensor(clf.decision_function(z.cpu().numpy())).to(device)) elif (clf_type == 'LogisticPT'): precision = 32 lr = 0.0005 batch_size = 512 max_epochs = 500 l2_reg = 0.0 if (train_clf_hparams is not None): assert isinstance(train_clf_hparams, dict) if ('precision' in train_clf_hparams): precision = train_clf_hparams['precision'] if ('lr' in train_clf_hparams): lr = train_clf_hparams['lr'] if ('batch_size' in train_clf_hparams): batch_size = train_clf_hparams['batch_size'] if ('max_epochs' in train_clf_hparams): max_epochs = train_clf_hparams['max_epochs'] if ('l2_reg' in train_clf_hparams): l2_reg = train_clf_hparams['l2_reg'] print('Training PyTorch logistic regression hyperparamters:\n\tprecision: {}\n\tlearning rate: {}\n\tl2 regularization: {}\n\tbatch size: {}\n\tmax epochs: {}\n'.format(precision, lr, l2_reg, batch_size, max_epochs)) dm = SklearnDataModule(clf_train_features, clf_train_labels, x_val=clf_val_features, y_val=clf_val_labels, x_test=None, y_test=None, val_split=0, test_split=0, num_workers=4, shuffle=True, batch_size=batch_size, pin_memory=True, drop_last=False) self.classifier = PLLogisticRegression(input_dim=clf_train_features.shape[(- 1)], num_classes=self.num_classes, learning_rate=lr, l2_strength=l2_reg) early_stop_callback = EarlyStopping(monitor='val_acc', min_delta=0.0005, patience=3, verbose=True, mode='max') trainer = pl.Trainer(gpus=1, precision=precision, auto_lr_find=False, max_epochs=max_epochs, logger=False, checkpoint_callback=False, flush_logs_every_n_steps=50, progress_bar_refresh_rate=50, callbacks=[early_stop_callback]) trainer.fit(self.classifier, train_dataloader=dm.train_dataloader(), val_dataloaders=dm.val_dataloader()) trainer.validate(self.classifier, val_dataloaders=dm.val_dataloader()) self.classifier.to(device) elif (clf_type == 'ZeroShot'): if (self.classifier is None): self.classifier = PLLogisticRegression(input_dim=clf_train_features.shape[(- 1)], num_classes=self.num_classes) self.classifier.to(device) text_features = self.get_clip_label_text_features(multiple_prompts=prompt_engineer) self.classifier.linear.weight.data.copy_(text_features) self.classifier.linear.bias.data.copy_(torch.zeros_like(self.classifier.linear.bias)) else: raise NotImplementedError def forward(self, x): return self.predict(x) def predict(self, x): assert (self.classifier is not None), 'Please fit the classifier by calling `fit_classifier` first!' z = self.bottleneck(self.get_transformed_feature(x)) if (self.hparams['clf_type'] == 'ZeroShot'): z /= z.norm(dim=(- 1), keepdim=True) return self.classifier(z) @property def trainable(self): return True def adjust_lr(self, step, max_steps, steps_per_epoch): learning_rate = self.hparams['lr'] warmup_from = (self.hparams['lr'] / 5) warm_epochs = 10 lr_decay_rate = 0.1 lr_decay_epochs = [25, 40] eta_min = (self.hparams['lr'] * (lr_decay_rate ** 3)) if (self.hparams['warmup'] and (step <= (warm_epochs * steps_per_epoch))): if self.hparams['cosine_anneal']: warmup_to = (eta_min + (((learning_rate - eta_min) * (1 + math.cos((((math.pi * warm_epochs) * steps_per_epoch) / max_steps)))) / 2)) else: warmup_to = learning_rate p = (step / (warm_epochs * steps_per_epoch)) lr = (warmup_from + (p * (warmup_to - warmup_from))) elif self.hparams['cosine_anneal']: p = (step / max_steps) lr = (eta_min + (((learning_rate - eta_min) * (1 + math.cos((math.pi * p)))) / 2)) else: decay_steps = np.sum((step > (np.asarray(lr_decay_epochs) * steps_per_epoch))) if (decay_steps > 0): lr = (learning_rate * (lr_decay_rate ** decay_steps)) else: lr = learning_rate for param_group in self.optimizer.param_groups: param_group['lr'] = lr
class CLIPPretrained(AbstractCLIPAlgorithm): 'Pretrained CLIP model' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(CLIPPretrained, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class) self.transform = (lambda x: x) self.bottleneck = (lambda x: x) self.featurizer = self.clip_model.visual def update(self, minibatches, unlabeled=None): return {} def loss(self, all_x, all_y, all_d): return {} @property def trainable(self): return False
class AbstractCLIPBottleneck(AbstractCLIPAlgorithm): 'CLIP based algorithms with an additional bottleneck (abstract class)' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, bottleneck_class, use_clip_contrast=False): '\n Args:\n feature_dim: dimension of CLIP output features\n num_classes: number of classes\n num_domains: number of domains\n hparams: hyperparameter dict\n pretrained: pretrained CLIP model\n idx2class: the dict mapping from indices to class names, used to get label prompts\n bottleneck_class: bottleneck class\n use_clip_contrast: whether use CLIP text-image contrastive loss\n ' super(AbstractCLIPBottleneck, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class) assert isinstance(feature_dim, int) self.use_clip_contrast = use_clip_contrast self.bottleneck = bottleneck_class(feature_dim, num_classes, num_domains, hparams) self.transform = torch.nn.Sequential(*[networks.CLIPMLP(feature_dim, feature_dim, mlp_width=hparams['mlp_width'], mlp_depth=hparams['mlp_depth'], mlp_dropout=hparams['mlp_dropout'], add_residual=True, add_norm=hparams['mlp_norm']) for _ in range(hparams['mlp_blocks'])]) if (not self.use_clip_contrast): self.classifier_head = nn.Linear(feature_dim, num_classes, bias=True) self.refit_classifier = hparams['refit_classifier'] params = (list(self.transform.parameters()) + list(self.classifier_head.parameters())) else: assert (not self.bottleneck.is_conditional) self.clipcon = CLIPConLoss(feature_dim, temperature=hparams['temperature'], learnable_temperature=hparams['learnable_temperature'], is_project=hparams['is_project'], is_symmetric=hparams['is_symmetric']) params = (list(self.transform.parameters()) + list(self.clipcon.parameters())) if self.bottleneck.trainable: params += list(self.bottleneck.parameters()) num_trainable_params = sum([(sum((p.numel() for p in param_group.parameters())) if (not isinstance(param_group, nn.Parameter)) else param_group.numel()) for param_group in params]) print('Trainable parameters # : ', num_trainable_params) self.optimizer = torch.optim.AdamW(params, lr=self.hparams['lr'], weight_decay=self.hparams['weight_decay']) def loss(self, all_x, all_y, all_d): all_z = self.get_transformed_feature(all_x) if (not self.use_clip_contrast): (bn_loss, all_z_hat) = self.bottleneck.loss(all_z, all_y, all_d) clf_out = self.classifier_head(all_z_hat) clf_loss = F.cross_entropy(clf_out, all_y) total_loss = (clf_loss + (self.hparams['lmbda'] * bn_loss)) losses = {'clf_loss': clf_loss, 'bn_loss': bn_loss, 'total_loss': total_loss} else: text_features = all_y all_y = torch.ones(all_z.shape[0]).to(all_z) (bn_loss, all_z_hat) = self.bottleneck.loss(all_z, all_y, all_d) clipcon_loss = self.clipcon(all_z_hat, text_features) total_loss = (clipcon_loss + (self.hparams['lmbda'] * bn_loss)) losses = {'clipcon_loss': clipcon_loss, 'bn_loss': bn_loss, 'total_loss': total_loss} return losses def update(self, minibatches, unlabeled=None): device = ('cuda' if minibatches[0][1].is_cuda else 'cpu') all_x = torch.cat([x for (x, y) in minibatches]) all_y = torch.cat([y for (x, y) in minibatches]) all_d = torch.cat([torch.full((x.shape[0],), i, dtype=torch.int64, device=device) for (i, (x, y)) in enumerate(minibatches)]) _losses = self.loss(all_x, all_y, all_d) self.optimizer.zero_grad() _losses['total_loss'].backward() self.optimizer.step() losses = {k: v.item() for (k, v) in _losses.items()} return losses def fit_classifier(self, clf_train_data, clf_valid_data, prompt_engineer=False, train_clf_hparams=None): if ((not self.use_clip_contrast) and (not self.refit_classifier)): self.classifier = self.classifier_head else: super().fit_classifier(clf_train_data, clf_valid_data, prompt_engineer=prompt_engineer, train_clf_hparams=train_clf_hparams)
class SupCLIPBottleneckBase(AbstractCLIPBottleneck): 'CLIP finetuned with supervised cross-entropy loss but no bottleneck' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(SupCLIPBottleneckBase, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, DummyBottleneck)
class SupCLIPBottleneckEnt(AbstractCLIPBottleneck): 'CLIP finetuned with supervised cross-entropy loss and entropy bottleneck' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(SupCLIPBottleneckEnt, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, DiscreteEntropyBottleneck)
class SupCLIPBottleneckCAD(AbstractCLIPBottleneck): 'CLIP finetuned with supervised cross-entropy loss and CAD bottleneck' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(SupCLIPBottleneckCAD, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, CADBottleneck)
class SupCLIPBottleneckCondCAD(AbstractCLIPBottleneck): 'CLIP finetuned with supervised cross-entropy loss and conditional CAD bottleneck' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(SupCLIPBottleneckCondCAD, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, CondCADBottleneck)
class ContrastCLIPBottleneckBase(AbstractCLIPBottleneck): 'CLIP finetuned with text-image contrastive loss but no bottleneck' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(ContrastCLIPBottleneckBase, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, DummyBottleneck, use_clip_contrast=True)
class ContrastCLIPBottleneckEnt(AbstractCLIPBottleneck): 'CLIP finetuned with text-image contrastive loss and entropy bottleneck (no need to access to domain labels)' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(ContrastCLIPBottleneckEnt, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, DiscreteEntropyBottleneck, use_clip_contrast=True)
class ContrastCLIPBottleneckCAD(AbstractCLIPBottleneck): 'CLIP finetuned with text-image contrastive loss and CAD bottleneck (require access to domain labels)' def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(ContrastCLIPBottleneckCAD, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, CADBottleneck, use_clip_contrast=True)
def local_launcher(commands): 'Launch commands serially on the local machine.' for cmd in commands: subprocess.call(cmd, shell=True)
def dummy_launcher(commands): "\n Doesn't run anything; instead, prints each command.\n Useful for testing.\n " for cmd in commands: print(f'Dummy launcher: {cmd}')
def multi_gpu_launcher(commands): '\n Launch commands on the local machine, using all GPUs in parallel.\n ' print('WARNING: using experimental multi_gpu_launcher.') n_gpus = torch.cuda.device_count() procs_by_gpu = ([None] * n_gpus) while (len(commands) > 0): for gpu_idx in range(n_gpus): proc = procs_by_gpu[gpu_idx] if ((proc is None) or (proc.poll() is not None)): cmd = commands.pop(0) new_proc = subprocess.Popen(f'CUDA_VISIBLE_DEVICES={gpu_idx} {cmd}', shell=True) procs_by_gpu[gpu_idx] = new_proc break time.sleep(1) for p in procs_by_gpu: if (p is not None): p.wait()
def slurm_launcher(commands): '\n Parallel job launcher for computationnal cluster using SLURM workload manager.\n An example of SBATCH options:\n #!/bin/bash\n #SBATCH --job-name=<job_name>\n #SBATCH --output=<job_name>.out\n #SBATCH --error=<job_name>_error.out\n #SBATCH --ntasks=4\n #SBATCH --cpus-per-task=8\n #SBATCH --gres=gpu:4\n #SBATCH --time=1-00:00:00\n #SBATCH --mem=81Gb\n Note: --cpus-per-task should match the N_WORKERS defined in datasets.py (default 8)\n Note: there should be equal number of --ntasks and --gres\n ' if (len(commands) == 0): return large_mem = False use_qos = 'normal' if (use_qos == 'legacy'): assert (not large_mem) max_proc = 39 partition = 'p100' qos = '--account=legacy --qos=legacy' elif (use_qos == 'deadline'): max_proc = 16 if (not large_mem): partition = 'p100,t4v2' else: partition = 't4v2' qos = '--account=deadline --qos=deadline' else: assert (use_qos == 'normal') max_proc = 200 if (not large_mem): partition = 'p100,t4v2,rtx6000' else: partition = 't4v2,rtx6000' qos = '--qos=normal' group_num = 10 with Pool(processes=max_proc) as pool: processes = [] if (group_num == 1): for command in commands: out_dir = command.split('output_dir')[1].split(' ')[1] out_path = os.path.join(out_dir, 'out.txt') err_path = os.path.join(out_dir, 'err.txt') script_path = os.path.join(out_dir, 'run.sh') with open(script_path, 'w+') as f: f.write('#!/bin/sh\n') f.write(command) os.chmod(script_path, 500) process = pool.apply_async(subprocess.run, [f'sbatch -o {out_path} -e {err_path} --gres=gpu:1 --mem=48G -c 8 -p {partition} {qos} {script_path}'], {'shell': True}) processes.append(process) time.sleep(0.1) else: def split(arr, size): arrs = [] while (len(arr) > size): pice = arr[:size] arrs.append(pice) arr = arr[size:] arrs.append(arr) return arrs commands_grouped = split(commands, group_num) print('Grouping {} jobs to {} groups, {} jobs each.'.format(len(commands), len(commands_grouped), group_num)) for cmd_grp in commands_grouped: out_dir_first = cmd_grp[0].split('output_dir')[1].split(' ')[1] out_path = os.path.join(out_dir_first, 'out_group.txt') err_path = os.path.join(out_dir_first, 'err_group.txt') script_path = os.path.join(out_dir_first, 'run_group.sh') with open(script_path, 'w+') as f: f.write('#!/bin/sh\n') for cmd in cmd_grp: f.write((cmd + '\n')) os.chmod(script_path, 500) process = pool.apply_async(subprocess.run, [f'sbatch -o {out_path} -e {err_path} --gres=gpu:1 --mem=48G -c 8 -p {partition} {qos} {script_path}'], {'shell': True}) processes.append(process) time.sleep(0.1) for (i, process) in enumerate(processes): process.wait() print('//////////////////////////////') print('//// Completed ', i, ' / ', len(processes), '////') print('//////////////////////////////')
def _define_hparam(hparams, hparam_name, default_val, random_val_fn): hparams[hparam_name] = (hparams, hparam_name, default_val, random_val_fn)
def _hparams(algorithm, dataset, random_seed, larger_batch=False): '\n Global registry of hyperparams. Each entry is a (default, random) tuple.\n New algorithms / networks / etc. should add entries here.\n ' SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST'] hparams = {} def _hparam(name, default_val, random_val_fn): 'Define a hyperparameter. random_val_fn takes a RandomState and\n returns a random hyperparameter value.' assert (name not in hparams) random_state = np.random.RandomState(misc.seed_hash(random_seed, name)) hparams[name] = (default_val, random_val_fn(random_state)) if ('CLIP' not in algorithm): _hparam('data_augmentation', True, (lambda r: True)) _hparam('resnet18', False, (lambda r: False)) _hparam('resnet_pretrained', True, (lambda r: True)) _hparam('resnet_dropout', 0.0, (lambda r: r.choice([0.0, 0.1, 0.5]))) _hparam('class_balanced', False, (lambda r: False)) _hparam('nonlinear_classifier', False, (lambda r: bool(r.choice([False, False])))) if (algorithm in ['DANN', 'CDANN']): _hparam('lambda', 1.0, (lambda r: (10 ** r.uniform((- 2), 2)))) _hparam('weight_decay_d', 0.0, (lambda r: (10 ** r.uniform((- 6), (- 2))))) _hparam('d_steps_per_g_step', 1, (lambda r: int((2 ** r.uniform(0, 3))))) _hparam('grad_penalty', 0.0, (lambda r: (10 ** r.uniform((- 2), 1)))) _hparam('beta1', 0.5, (lambda r: r.choice([0.0, 0.5]))) _hparam('mlp_width', 256, (lambda r: int((2 ** r.uniform(6, 10))))) _hparam('mlp_depth', 3, (lambda r: int(r.choice([3, 4, 5])))) _hparam('mlp_dropout', 0.0, (lambda r: r.choice([0.0, 0.1, 0.5]))) elif (algorithm == 'Fish'): _hparam('meta_lr', 0.5, (lambda r: r.choice([0.05, 0.1, 0.5]))) elif (algorithm == 'RSC'): _hparam('rsc_f_drop_factor', (1 / 3), (lambda r: r.uniform(0, 0.5))) _hparam('rsc_b_drop_factor', (1 / 3), (lambda r: r.uniform(0, 0.5))) elif (algorithm == 'SagNet'): _hparam('sag_w_adv', 0.1, (lambda r: (10 ** r.uniform((- 2), 1)))) elif (algorithm == 'IRM'): _hparam('irm_lambda', 100.0, (lambda r: (10 ** r.uniform((- 1), 5)))) _hparam('irm_penalty_anneal_iters', 500, (lambda r: int((10 ** r.uniform(0, 4))))) elif (algorithm == 'Mixup'): _hparam('mixup_alpha', 0.2, (lambda r: (10 ** r.uniform((- 1), (- 1))))) elif (algorithm == 'GroupDRO'): _hparam('groupdro_eta', 0.01, (lambda r: (10 ** r.uniform((- 3), (- 1))))) elif ((algorithm == 'MMD') or (algorithm == 'CORAL')): _hparam('mmd_gamma', 1.0, (lambda r: (10 ** r.uniform((- 1), 1)))) elif (algorithm == 'MLDG'): _hparam('mldg_beta', 1.0, (lambda r: (10 ** r.uniform((- 1), 1)))) elif (algorithm == 'MTL'): _hparam('mtl_ema', 0.99, (lambda r: r.choice([0.5, 0.9, 0.99, 1.0]))) elif (algorithm == 'VREx'): _hparam('vrex_lambda', 10.0, (lambda r: (10 ** r.uniform((- 1), 5)))) _hparam('vrex_penalty_anneal_iters', 500, (lambda r: int((10 ** r.uniform(0, 4))))) elif (algorithm == 'SD'): _hparam('sd_reg', 0.1, (lambda r: (10 ** r.uniform((- 5), (- 1))))) elif (algorithm == 'ANDMask'): _hparam('tau', 1, (lambda r: r.uniform(0.5, 1.0))) elif (algorithm == 'IGA'): _hparam('penalty', 1000, (lambda r: (10 ** r.uniform(1, 5)))) elif (algorithm == 'SANDMask'): _hparam('tau', 1.0, (lambda r: r.uniform(0.0, 1.0))) _hparam('k', 10.0, (lambda r: int((10 ** r.uniform((- 3), 5))))) elif ((algorithm == 'CAD') or (algorithm == 'CondCAD')): _hparam('lmbda', 1, (lambda r: r.choice([0.0001, 0.001, 0.01, 0.1, 1, 10.0, 100.0]))) _hparam('temperature', 0.1, (lambda r: r.choice([0.05, 0.1]))) _hparam('is_normalized', False, (lambda r: False)) _hparam('is_project', False, (lambda r: False)) _hparam('is_flipped', True, (lambda r: True)) if (dataset in SMALL_IMAGES): _hparam('lr', 0.001, (lambda r: (10 ** r.uniform((- 4.5), (- 2.5))))) else: _hparam('lr', 5e-05, (lambda r: (10 ** r.uniform((- 5), (- 3.5))))) if (dataset in SMALL_IMAGES): _hparam('weight_decay', 0.0, (lambda r: 0.0)) else: _hparam('weight_decay', 0.0, (lambda r: (10 ** r.uniform((- 6), (- 2))))) if (dataset in SMALL_IMAGES): _hparam('batch_size', 64, (lambda r: int((2 ** r.uniform(3, 9))))) elif (algorithm == 'ARM'): _hparam('batch_size', 8, (lambda r: 8)) elif (dataset == 'DomainNet'): _hparam('batch_size', 32, (lambda r: int((2 ** r.uniform(3, 5))))) else: _hparam('batch_size', 32, (lambda r: int((2 ** r.uniform(3, 5.3))))) if ((algorithm in ['DANN', 'CDANN']) and (dataset in SMALL_IMAGES)): _hparam('lr_g', 0.001, (lambda r: (10 ** r.uniform((- 4.5), (- 2.5))))) elif (algorithm in ['DANN', 'CDANN']): _hparam('lr_g', 5e-05, (lambda r: (10 ** r.uniform((- 5), (- 3.5))))) if ((algorithm in ['DANN', 'CDANN']) and (dataset in SMALL_IMAGES)): _hparam('lr_d', 0.001, (lambda r: (10 ** r.uniform((- 4.5), (- 2.5))))) elif (algorithm in ['DANN', 'CDANN']): _hparam('lr_d', 5e-05, (lambda r: (10 ** r.uniform((- 5), (- 3.5))))) if ((algorithm in ['DANN', 'CDANN']) and (dataset in SMALL_IMAGES)): _hparam('weight_decay_g', 0.0, (lambda r: 0.0)) elif (algorithm in ['DANN', 'CDANN']): _hparam('weight_decay_g', 0.0, (lambda r: (10 ** r.uniform((- 6), (- 2))))) else: _hparam('clip_model', 'RN50', (lambda r: 'RN50')) _hparam('data_augmentation', False, (lambda r: False)) _hparam('class_balanced', False, (lambda r: False)) _hparam('clf_type', 'SVM', (lambda r: 'SVM')) _hparam('lr', 0.0003, (lambda r: r.choice([0.0001, 0.0003, 0.001, 0.003]))) if (not larger_batch): _hparam('batch_size', 64, (lambda r: int(r.choice([64, 128, 256])))) else: _hparam('batch_size', 256, (lambda r: int(r.choice([128, 256, 512])))) _hparam('warmup', False, (lambda r: bool(r.choice([True, False])))) _hparam('cosine_anneal', True, (lambda r: True)) _hparam('weight_decay', 1e-05, (lambda r: 1e-05)) _hparam('max_epoch', 50, (lambda r: 50)) _hparam('max_step', 5001, (lambda r: 5001)) _hparam('use_fix_step', False, (lambda r: False)) _hparam('mlp_width', 1024, (lambda r: 1024)) _hparam('mlp_depth', 2, (lambda r: 2)) _hparam('mlp_blocks', 1, (lambda r: 1)) _hparam('mlp_dropout', 0.1, (lambda r: r.choice([0.0, 0.1, 0.5]))) _hparam('mlp_norm', False, (lambda r: False)) if ('ContrastCLIPBottleneck' in algorithm): _hparam('temperature', 0.05, (lambda r: 0.05)) _hparam('learnable_temperature', True, (lambda r: True)) _hparam('is_symmetric', False, (lambda r: False)) _hparam('is_project', False, (lambda r: False)) elif ('SupCLIPBottleneck' in algorithm): _hparam('refit_classifier', True, (lambda r: True)) else: assert (algorithm in ['CLIPZeroShot', 'CLIPPretrained']), algorithm if ('BottleneckBase' in algorithm): _hparam('lmbda', 0.0, (lambda r: 0.0)) elif ('BottleneckEnt' in algorithm): _hparam('lmbda', 1, (lambda r: r.choice([0.0001, 0.001, 0.01, 0.1, 1, 10.0, 100.0]))) elif (('BottleneckCAD' in algorithm) or 'BottleneckCondCAD'): _hparam('lmbda', 0.01, (lambda r: r.choice([0.0001, 0.001, 0.01, 0.1, 1]))) if ('SupCLIPBottleneck' in algorithm): _hparam('temperature', 0.05, (lambda r: 0.05)) _hparam('is_normalized', False, (lambda r: False)) _hparam('is_project', False, (lambda r: False)) _hparam('is_flipped', True, (lambda r: True)) elif ('ContrastCLIPBottleneck' in algorithm): _hparam('is_normalized', False, (lambda r: False)) _hparam('is_flipped', True, (lambda r: True)) else: assert (algorithm in ['CLIPZeroShot', 'CLIPPretrained']), algorithm return hparams
def default_hparams(algorithm, dataset, larger_batch=False): return {a: b for (a, (b, c)) in _hparams(algorithm, dataset, 0, larger_batch=larger_batch).items()}
def random_hparams(algorithm, dataset, seed, larger_batch=False): return {a: c for (a, (b, c)) in _hparams(algorithm, dataset, seed, larger_batch=larger_batch).items()}
class _InfiniteSampler(torch.utils.data.Sampler): 'Wraps another Sampler to yield an infinite stream.' def __init__(self, sampler): self.sampler = sampler def __iter__(self): while True: for batch in self.sampler: (yield batch)
class InfiniteDataLoader(): def __init__(self, dataset, weights, batch_size, num_workers): super().__init__() if (weights is not None): sampler = torch.utils.data.WeightedRandomSampler(weights, replacement=True, num_samples=batch_size) else: sampler = torch.utils.data.RandomSampler(dataset, replacement=True) if (weights == None): weights = torch.ones(len(dataset)) batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=batch_size, drop_last=True) self._infinite_iterator = iter(torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_sampler=_InfiniteSampler(batch_sampler))) def __iter__(self): while True: (yield next(self._infinite_iterator)) def __len__(self): raise ValueError
class FastDataLoader(): 'DataLoader wrapper with slightly improved speed by not respawning worker\n processes at every epoch.' def __init__(self, dataset, batch_size, num_workers): super().__init__() batch_sampler = torch.utils.data.BatchSampler(torch.utils.data.RandomSampler(dataset, replacement=False), batch_size=batch_size, drop_last=False) self._infinite_iterator = iter(torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_sampler=_InfiniteSampler(batch_sampler))) self._length = len(batch_sampler) def __iter__(self): for _ in range(len(self)): (yield next(self._infinite_iterator)) def __len__(self): return self._length
def make_weights_for_balanced_classes(dataset): counts = Counter() classes = [] for (_, y) in dataset: y = int(y) counts[y] += 1 classes.append(y) n_classes = len(counts) weight_per_class = {} for y in counts: weight_per_class[y] = (1 / (counts[y] * n_classes)) weights = torch.zeros(len(dataset)) for (i, y) in enumerate(classes): weights[i] = weight_per_class[int(y)] return weights
def pdb(): sys.stdout = sys.__stdout__ import pdb print("Launching PDB, enter 'n' to step to parent function.") pdb.set_trace()
def seed_hash(*args): '\n Derive an integer hash from all args, for use as a random seed.\n ' args_str = str(args) return (int(hashlib.md5(args_str.encode('utf-8')).hexdigest(), 16) % (2 ** 31))
def print_separator(): print(('=' * 80))
def print_row(row, colwidth=10, latex=False): if latex: sep = ' & ' end_ = '\\\\' else: sep = ' ' end_ = '' def format_val(x): if np.issubdtype(type(x), np.floating): x = '{:.5f}'.format(x) return str(x).ljust(colwidth)[:colwidth] print(sep.join([format_val(x) for x in row]), end_)
class _SplitDataset(torch.utils.data.Dataset): 'Used by split_dataset' def __init__(self, underlying_dataset, keys): super(_SplitDataset, self).__init__() self.underlying_dataset = underlying_dataset self.keys = keys def __getitem__(self, key): return self.underlying_dataset[self.keys[key]] def __len__(self): return len(self.keys)
def split_dataset(dataset, n, seed=0): '\n Return a pair of datasets corresponding to a random split of the given\n dataset, with n datapoints in the first dataset and the rest in the last,\n using the given random seed\n ' assert (n <= len(dataset)) keys = list(range(len(dataset))) np.random.RandomState(seed).shuffle(keys) keys_1 = keys[:n] keys_2 = keys[n:] return (_SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2))
def random_pairs_of_minibatches(minibatches): perm = torch.randperm(len(minibatches)).tolist() pairs = [] for i in range(len(minibatches)): j = ((i + 1) if (i < (len(minibatches) - 1)) else 0) (xi, yi) = (minibatches[perm[i]][0], minibatches[perm[i]][1]) (xj, yj) = (minibatches[perm[j]][0], minibatches[perm[j]][1]) min_n = min(len(xi), len(xj)) pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n]))) return pairs
def accuracy(network, loader, weights, device): correct = 0 total = 0 weights_offset = 0 network.eval() with torch.no_grad(): for (x, y) in loader: x = x.to(device) y = y.to(device) p = network.predict(x) if (weights is None): batch_weights = torch.ones(len(x)) else: batch_weights = weights[weights_offset:(weights_offset + len(x))] weights_offset += len(x) batch_weights = batch_weights.to(device) if (p.size(1) == 1): correct += (p.gt(0).eq(y).float() * batch_weights.view((- 1), 1)).sum().item() else: correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item() total += batch_weights.sum().item() network.train() return (correct / total)
def loss(network, loader, device): total_loss = 0 total_num = 0 network.eval() with torch.no_grad(): for (x, y, d) in loader: x = x.to(device) y = y.to(device) d = d.to(device) loss = network.loss(x, y, d)['total_loss'] total_loss += (loss * x.shape[0]).item() total_num += x.shape[0] network.train() return (total_loss / total_num)
class Tee(): def __init__(self, fname, mode='a'): self.stdout = sys.stdout self.file = open(fname, mode) self.encoding = sys.stdout.encoding def write(self, message): self.stdout.write(message) self.file.write(message) self.flush() def flush(self): self.stdout.flush() self.file.flush()
class ParamDict(OrderedDict): 'Code adapted from https://github.com/Alok/rl_implementations/tree/master/reptile.\n A dictionary where the values are Tensors, meant to represent weights of\n a model. This subclass lets you perform arithmetic on weights directly.' def __init__(self, *args, **kwargs): super().__init__(*args, *kwargs) def _prototype(self, other, op): if isinstance(other, Number): return ParamDict({k: op(v, other) for (k, v) in self.items()}) elif isinstance(other, dict): return ParamDict({k: op(self[k], other[k]) for k in self}) else: raise NotImplementedError def __add__(self, other): return self._prototype(other, operator.add) def __rmul__(self, other): return self._prototype(other, operator.mul) __mul__ = __rmul__ def __neg__(self): return ParamDict({k: (- v) for (k, v) in self.items()}) def __rsub__(self, other): return self.__add__(other.__neg__()) __sub__ = __rsub__ def __truediv__(self, other): return self._prototype(other, operator.truediv)
def str2bool(v): if isinstance(v, bool): return v if (v.lower() in ('yes', 'true', 't', 'y', '1')): return True elif (v.lower() in ('no', 'false', 'f', 'n', '0')): return False else: raise argparse.ArgumentTypeError('Boolean value expected.')
def make_selector_fn(selector): "\n If selector is a function, return selector.\n Otherwise, return a function corresponding to the selector string. Examples\n of valid selector strings and the corresponding functions:\n x lambda obj: obj['x']\n x.y lambda obj: obj['x']['y']\n x,y lambda obj: (obj['x'], obj['y'])\n " if isinstance(selector, str): if (',' in selector): parts = selector.split(',') part_selectors = [make_selector_fn(part) for part in parts] return (lambda obj: tuple((sel(obj) for sel in part_selectors))) elif ('.' in selector): parts = selector.split('.') part_selectors = [make_selector_fn(part) for part in parts] def f(obj): for sel in part_selectors: obj = sel(obj) return obj return f else: key = selector.strip() return (lambda obj: obj[key]) elif isinstance(selector, types.FunctionType): return selector else: raise TypeError
def hashable(obj): try: hash(obj) return obj except TypeError: return json.dumps({'_': obj}, sort_keys=True)
class Q(object): def __init__(self, list_): super(Q, self).__init__() self._list = list_ def __len__(self): return len(self._list) def __getitem__(self, key): return self._list[key] def __eq__(self, other): if isinstance(other, self.__class__): return (self._list == other._list) else: return (self._list == other) def __str__(self): return str(self._list) def __repr__(self): return repr(self._list) def _append(self, item): "Unsafe, be careful you know what you're doing." self._list.append(item) def group(self, selector): '\n Group elements by selector and return a list of (group, group_records)\n tuples.\n ' selector = make_selector_fn(selector) groups = {} for x in self._list: group = selector(x) group_key = hashable(group) if (group_key not in groups): groups[group_key] = (group, Q([])) groups[group_key][1]._append(x) results = [groups[key] for key in sorted(groups.keys())] return Q(results) def group_map(self, selector, fn): '\n Group elements by selector, apply fn to each group, and return a list\n of the results.\n ' return self.group(selector).map(fn) def map(self, fn): '\n map self onto fn. If fn takes multiple args, tuple-unpacking\n is applied.\n ' if (len(inspect.signature(fn).parameters) > 1): return Q([fn(*x) for x in self._list]) else: return Q([fn(x) for x in self._list]) def select(self, selector): selector = make_selector_fn(selector) return Q([selector(x) for x in self._list]) def min(self): return min(self._list) def max(self): return max(self._list) def sum(self): return sum(self._list) def len(self): return len(self._list) def mean(self): with warnings.catch_warnings(): warnings.simplefilter('ignore') return float(np.mean(self._list)) def std(self): with warnings.catch_warnings(): warnings.simplefilter('ignore') return float(np.std(self._list)) def mean_std(self): return (self.mean(), self.std()) def argmax(self, selector): selector = make_selector_fn(selector) return max(self._list, key=selector) def filter(self, fn): return Q([x for x in self._list if fn(x)]) def filter_equals(self, selector, value): 'like [x for x in y if x.selector == value]' selector = make_selector_fn(selector) return self.filter((lambda r: (selector(r) == value))) def filter_not_none(self): return self.filter((lambda r: (r is not None))) def filter_not_nan(self): return self.filter((lambda r: (not np.isnan(r)))) def flatten(self): return Q([y for x in self._list for y in x]) def unique(self): result = [] result_set = set() for x in self._list: hashable_x = hashable(x) if (hashable_x not in result_set): result_set.add(hashable_x) result.append(x) return Q(result) def sorted(self, key=None): if (key is None): key = (lambda x: x) def key2(x): x = key(x) if (isinstance(x, (np.floating, float)) and np.isnan(x)): return float('-inf') else: return x return Q(sorted(self._list, key=key2))
def load_records(path): records = [] for (i, subdir) in tqdm.tqdm(list(enumerate(os.listdir(path))), ncols=80, leave=False): results_path = os.path.join(path, subdir, 'results.jsonl') try: with open(results_path, 'r') as f: for line in f: records.append(json.loads(line[:(- 1)])) except IOError: pass return Q(records)
def get_grouped_records(records, group_test_envs=True): 'Group records by (trial_seed, dataset, algorithm, test_env). Because\n records can have multiple test envs, a given record may appear in more than\n one group.' result = collections.defaultdict((lambda : [])) if group_test_envs: for r in records: for test_env in r['args']['test_envs']: group = (r['args']['trial_seed'], r['args']['dataset'], r['args']['algorithm'], test_env) result[group].append(r) return Q([{'trial_seed': t, 'dataset': d, 'algorithm': a, 'test_env': e, 'records': Q(r)} for ((t, d, a, e), r) in result.items()]) else: for r in records: group = (r['args']['trial_seed'], r['args']['dataset'], r['args']['algorithm']) result[group].append(r) return Q([{'trial_seed': t, 'dataset': d, 'algorithm': a, 'records': Q(r)} for ((t, d, a), r) in result.items()])
def get_test_records(records): 'Given records with a common test env, get the test records (i.e. the\n records with *only* that single test env and no other test envs)' return records.filter((lambda r: (len(r['args']['test_envs']) == 1)))
class SelectionMethod(): 'Abstract class whose subclasses implement strategies for model\n selection across hparams and timesteps.' def __init__(self): raise TypeError @classmethod def run_acc(self, run_records): '\n Given records from a run, return a {val_acc, test_acc} dict representing\n the best val-acc and corresponding test-acc for that run.\n ' raise NotImplementedError @classmethod def hparams_accs(self, records): '\n Given all records from a single (dataset, algorithm, test env) pair,\n return a sorted list of (run_acc, records) tuples.\n ' return records.group('args.hparams_seed').map((lambda _, run_records: (self.run_acc(run_records), run_records))).filter((lambda x: (x[0] is not None))).sorted(key=(lambda x: x[0]['val_acc']))[::(- 1)] @classmethod def sweep_acc(self, records, return_extra=False): '\n Given all records from a single (dataset, algorithm, test env) pair,\n return the mean test acc of the k runs with the top val accs.\n ' _hparams_accs = self.hparams_accs(records) if len(_hparams_accs): if (return_extra and ('ext_acc' in _hparams_accs[0][0])): return (_hparams_accs[0][0]['test_acc'], *_hparams_accs[0][0]['ext_acc']) else: return _hparams_accs[0][0]['test_acc'] else: return None @classmethod def best_record(self, records): '\n Given all records from a single (dataset, algorithm, test env) pair,\n return the best record\n ' _hparams_accs = self.hparams_accs(records) if len(_hparams_accs): assert (len(_hparams_accs[0][1]) == 1) return _hparams_accs[0][1][0] else: return None
class OracleSelectionMethod(SelectionMethod): 'Like Selection method which picks argmax(test_out_acc) across all hparams\n and checkpoints, but instead of taking the argmax over all\n checkpoints, we pick the last checkpoint, i.e. no early stopping.' name = 'test-domain validation set (oracle)' @classmethod def run_acc(self, run_records): run_records = run_records.filter((lambda r: (len(r['args']['test_envs']) == 1))) if (not len(run_records)): return None test_env = run_records[0]['args']['test_envs'][0] test_in_acc_key = 'in_acc_tgt' test_out_acc_key = 'out_acc_tgt' ext_acc_key = 'out_acc_src' chosen_record = run_records.sorted((lambda r: r['step']))[(- 1)] return {'val_acc': chosen_record[test_out_acc_key], 'test_acc': chosen_record[test_in_acc_key], 'ext_acc': (chosen_record[ext_acc_key], chosen_record[test_in_acc_key])}
class IIDAccuracySelectionMethod(SelectionMethod): 'Picks argmax(mean(env_out_acc for env in train_envs))' name = 'training-domain validation set' @classmethod def _step_acc(self, record): 'Given a single record, return a {val_acc, test_acc} dict.' test_env = record['args']['test_envs'][0] val_env_keys = [] for i in itertools.count(): if (f'env{i}_out_acc' not in record): break if (i != test_env): val_env_keys.append(f'env{i}_out_acc') test_in_acc_key = 'in_acc_tgt' test_out_acc_key = 'out_acc_tgt' ext_acc_key = 'out_acc_src' return {'val_acc': np.mean([record[key] for key in val_env_keys]), 'test_acc': record[test_in_acc_key], 'ext_acc': (record[ext_acc_key], record[test_in_acc_key])} @classmethod def run_acc(self, run_records): test_records = get_test_records(run_records) if (not len(test_records)): return None return test_records.map(self._step_acc).argmax('val_acc')